be2net: Use new F/W mailbox cmd to manipulate interrupts.
[linux-2.6-block.git] / drivers / net / ethernet / emulex / benet / be_main.c
CommitLineData
6b7c5b94 1/*
c7bb15a6 2 * Copyright (C) 2005 - 2013 Emulex
6b7c5b94
SP
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
d2145cde 11 * linux-drivers@emulex.com
6b7c5b94 12 *
d2145cde
AK
13 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
6b7c5b94
SP
16 */
17
70c71606 18#include <linux/prefetch.h>
9d9779e7 19#include <linux/module.h>
6b7c5b94 20#include "be.h"
8788fdc2 21#include "be_cmds.h"
65f71b8b 22#include <asm/div64.h>
d6b6d987 23#include <linux/aer.h>
6b7c5b94
SP
24
25MODULE_VERSION(DRV_VER);
26MODULE_DEVICE_TABLE(pci, be_dev_ids);
27MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
00d3d51e 28MODULE_AUTHOR("Emulex Corporation");
6b7c5b94
SP
29MODULE_LICENSE("GPL");
30
ba343c77 31static unsigned int num_vfs;
ba343c77 32module_param(num_vfs, uint, S_IRUGO);
ba343c77 33MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
6b7c5b94 34
11ac75ed
SP
35static ushort rx_frag_size = 2048;
36module_param(rx_frag_size, ushort, S_IRUGO);
37MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
38
6b7c5b94 39static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
c4ca2374 40 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
59fd5d87 41 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
c4ca2374
AK
42 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
43 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
fe6d2a38 44 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
12f4d0a8 45 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
ecedb6ae 46 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
76b73530 47 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID6)},
6b7c5b94
SP
48 { 0 }
49};
50MODULE_DEVICE_TABLE(pci, be_dev_ids);
7c185276 51/* UE Status Low CSR */
42c8b11e 52static const char * const ue_status_low_desc[] = {
7c185276
AK
53 "CEV",
54 "CTX",
55 "DBUF",
56 "ERX",
57 "Host",
58 "MPU",
59 "NDMA",
60 "PTC ",
61 "RDMA ",
62 "RXF ",
63 "RXIPS ",
64 "RXULP0 ",
65 "RXULP1 ",
66 "RXULP2 ",
67 "TIM ",
68 "TPOST ",
69 "TPRE ",
70 "TXIPS ",
71 "TXULP0 ",
72 "TXULP1 ",
73 "UC ",
74 "WDMA ",
75 "TXULP2 ",
76 "HOST1 ",
77 "P0_OB_LINK ",
78 "P1_OB_LINK ",
79 "HOST_GPIO ",
80 "MBOX ",
81 "AXGMAC0",
82 "AXGMAC1",
83 "JTAG",
84 "MPU_INTPEND"
85};
86/* UE Status High CSR */
42c8b11e 87static const char * const ue_status_hi_desc[] = {
7c185276
AK
88 "LPCMEMHOST",
89 "MGMT_MAC",
90 "PCS0ONLINE",
91 "MPU_IRAM",
92 "PCS1ONLINE",
93 "PCTL0",
94 "PCTL1",
95 "PMEM",
96 "RR",
97 "TXPB",
98 "RXPP",
99 "XAUI",
100 "TXP",
101 "ARM",
102 "IPC",
103 "HOST2",
104 "HOST3",
105 "HOST4",
106 "HOST5",
107 "HOST6",
108 "HOST7",
109 "HOST8",
110 "HOST9",
42c8b11e 111 "NETC",
7c185276
AK
112 "Unknown",
113 "Unknown",
114 "Unknown",
115 "Unknown",
116 "Unknown",
117 "Unknown",
118 "Unknown",
119 "Unknown"
120};
6b7c5b94 121
752961a1
SP
122/* Is BE in a multi-channel mode */
123static inline bool be_is_mc(struct be_adapter *adapter) {
124 return (adapter->function_mode & FLEX10_MODE ||
125 adapter->function_mode & VNIC_MODE ||
126 adapter->function_mode & UMC_ENABLED);
127}
128
6b7c5b94
SP
129static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
130{
131 struct be_dma_mem *mem = &q->dma_mem;
1cfafab9 132 if (mem->va) {
2b7bcebf
IV
133 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
134 mem->dma);
1cfafab9
SP
135 mem->va = NULL;
136 }
6b7c5b94
SP
137}
138
139static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
140 u16 len, u16 entry_size)
141{
142 struct be_dma_mem *mem = &q->dma_mem;
143
144 memset(q, 0, sizeof(*q));
145 q->len = len;
146 q->entry_size = entry_size;
147 mem->size = len * entry_size;
2b7bcebf
IV
148 mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
149 GFP_KERNEL);
6b7c5b94 150 if (!mem->va)
10ef9ab4 151 return -ENOMEM;
6b7c5b94
SP
152 memset(mem->va, 0, mem->size);
153 return 0;
154}
155
68c45a2d 156static void be_reg_intr_set(struct be_adapter *adapter, bool enable)
6b7c5b94 157{
db3ea781 158 u32 reg, enabled;
5f0b849e 159
db3ea781
SP
160 pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
161 &reg);
162 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
163
5f0b849e 164 if (!enabled && enable)
6b7c5b94 165 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
5f0b849e 166 else if (enabled && !enable)
6b7c5b94 167 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
5f0b849e 168 else
6b7c5b94 169 return;
5f0b849e 170
db3ea781
SP
171 pci_write_config_dword(adapter->pdev,
172 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
6b7c5b94
SP
173}
174
68c45a2d
SK
175static void be_intr_set(struct be_adapter *adapter, bool enable)
176{
177 int status = 0;
178
179 /* On lancer interrupts can't be controlled via this register */
180 if (lancer_chip(adapter))
181 return;
182
183 if (adapter->eeh_error)
184 return;
185
186 status = be_cmd_intr_set(adapter, enable);
187 if (status)
188 be_reg_intr_set(adapter, enable);
189}
190
8788fdc2 191static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
6b7c5b94
SP
192{
193 u32 val = 0;
194 val |= qid & DB_RQ_RING_ID_MASK;
195 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
f3eb62d2
SP
196
197 wmb();
8788fdc2 198 iowrite32(val, adapter->db + DB_RQ_OFFSET);
6b7c5b94
SP
199}
200
8788fdc2 201static void be_txq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
6b7c5b94
SP
202{
203 u32 val = 0;
204 val |= qid & DB_TXULP_RING_ID_MASK;
205 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
f3eb62d2
SP
206
207 wmb();
8788fdc2 208 iowrite32(val, adapter->db + DB_TXULP1_OFFSET);
6b7c5b94
SP
209}
210
8788fdc2 211static void be_eq_notify(struct be_adapter *adapter, u16 qid,
6b7c5b94
SP
212 bool arm, bool clear_int, u16 num_popped)
213{
214 u32 val = 0;
215 val |= qid & DB_EQ_RING_ID_MASK;
fe6d2a38
SP
216 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
217 DB_EQ_RING_ID_EXT_MASK_SHIFT);
cf588477 218
f67ef7ba 219 if (adapter->eeh_error)
cf588477
SP
220 return;
221
6b7c5b94
SP
222 if (arm)
223 val |= 1 << DB_EQ_REARM_SHIFT;
224 if (clear_int)
225 val |= 1 << DB_EQ_CLR_SHIFT;
226 val |= 1 << DB_EQ_EVNT_SHIFT;
227 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
8788fdc2 228 iowrite32(val, adapter->db + DB_EQ_OFFSET);
6b7c5b94
SP
229}
230
8788fdc2 231void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
6b7c5b94
SP
232{
233 u32 val = 0;
234 val |= qid & DB_CQ_RING_ID_MASK;
fe6d2a38
SP
235 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
236 DB_CQ_RING_ID_EXT_MASK_SHIFT);
cf588477 237
f67ef7ba 238 if (adapter->eeh_error)
cf588477
SP
239 return;
240
6b7c5b94
SP
241 if (arm)
242 val |= 1 << DB_CQ_REARM_SHIFT;
243 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
8788fdc2 244 iowrite32(val, adapter->db + DB_CQ_OFFSET);
6b7c5b94
SP
245}
246
6b7c5b94
SP
247static int be_mac_addr_set(struct net_device *netdev, void *p)
248{
249 struct be_adapter *adapter = netdev_priv(netdev);
250 struct sockaddr *addr = p;
251 int status = 0;
e3a7ae2c 252 u8 current_mac[ETH_ALEN];
fbc13f01 253 u32 pmac_id = adapter->pmac_id[0];
704e4c88 254 bool active_mac = true;
6b7c5b94 255
ca9e4988
AK
256 if (!is_valid_ether_addr(addr->sa_data))
257 return -EADDRNOTAVAIL;
258
704e4c88
PR
259 /* For BE VF, MAC address is already activated by PF.
260 * Hence only operation left is updating netdev->devaddr.
261 * Update it if user is passing the same MAC which was used
262 * during configuring VF MAC from PF(Hypervisor).
263 */
264 if (!lancer_chip(adapter) && !be_physfn(adapter)) {
265 status = be_cmd_mac_addr_query(adapter, current_mac,
266 false, adapter->if_handle, 0);
267 if (!status && !memcmp(current_mac, addr->sa_data, ETH_ALEN))
268 goto done;
269 else
270 goto err;
271 }
272
273 if (!memcmp(addr->sa_data, netdev->dev_addr, ETH_ALEN))
274 goto done;
275
276 /* For Lancer check if any MAC is active.
277 * If active, get its mac id.
278 */
279 if (lancer_chip(adapter) && !be_physfn(adapter))
280 be_cmd_get_mac_from_list(adapter, current_mac, &active_mac,
281 &pmac_id, 0);
282
283 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
284 adapter->if_handle,
285 &adapter->pmac_id[0], 0);
286
a65027e4 287 if (status)
e3a7ae2c 288 goto err;
6b7c5b94 289
704e4c88
PR
290 if (active_mac)
291 be_cmd_pmac_del(adapter, adapter->if_handle,
292 pmac_id, 0);
293done:
e3a7ae2c
SK
294 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
295 return 0;
296err:
297 dev_err(&adapter->pdev->dev, "MAC %pM set Failed\n", addr->sa_data);
6b7c5b94
SP
298 return status;
299}
300
ca34fe38
SP
301/* BE2 supports only v0 cmd */
302static void *hw_stats_from_cmd(struct be_adapter *adapter)
303{
304 if (BE2_chip(adapter)) {
305 struct be_cmd_resp_get_stats_v0 *cmd = adapter->stats_cmd.va;
306
307 return &cmd->hw_stats;
308 } else {
309 struct be_cmd_resp_get_stats_v1 *cmd = adapter->stats_cmd.va;
310
311 return &cmd->hw_stats;
312 }
313}
314
315/* BE2 supports only v0 cmd */
316static void *be_erx_stats_from_cmd(struct be_adapter *adapter)
317{
318 if (BE2_chip(adapter)) {
319 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
320
321 return &hw_stats->erx;
322 } else {
323 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
324
325 return &hw_stats->erx;
326 }
327}
328
329static void populate_be_v0_stats(struct be_adapter *adapter)
89a88ab8 330{
ac124ff9
SP
331 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
332 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
333 struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
89a88ab8 334 struct be_port_rxf_stats_v0 *port_stats =
ac124ff9
SP
335 &rxf_stats->port[adapter->port_num];
336 struct be_drv_stats *drvs = &adapter->drv_stats;
89a88ab8 337
ac124ff9 338 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
89a88ab8
AK
339 drvs->rx_pause_frames = port_stats->rx_pause_frames;
340 drvs->rx_crc_errors = port_stats->rx_crc_errors;
341 drvs->rx_control_frames = port_stats->rx_control_frames;
342 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
343 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
344 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
345 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
346 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
347 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
348 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
349 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
350 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
351 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
352 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
ac124ff9 353 drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
89a88ab8
AK
354 drvs->rx_dropped_header_too_small =
355 port_stats->rx_dropped_header_too_small;
d45b9d39
SP
356 drvs->rx_address_mismatch_drops =
357 port_stats->rx_address_mismatch_drops +
358 port_stats->rx_vlan_mismatch_drops;
89a88ab8
AK
359 drvs->rx_alignment_symbol_errors =
360 port_stats->rx_alignment_symbol_errors;
361
362 drvs->tx_pauseframes = port_stats->tx_pauseframes;
363 drvs->tx_controlframes = port_stats->tx_controlframes;
364
365 if (adapter->port_num)
ac124ff9 366 drvs->jabber_events = rxf_stats->port1_jabber_events;
89a88ab8 367 else
ac124ff9 368 drvs->jabber_events = rxf_stats->port0_jabber_events;
89a88ab8 369 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
89a88ab8 370 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
89a88ab8
AK
371 drvs->forwarded_packets = rxf_stats->forwarded_packets;
372 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
ac124ff9
SP
373 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
374 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
89a88ab8
AK
375 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
376}
377
ca34fe38 378static void populate_be_v1_stats(struct be_adapter *adapter)
89a88ab8 379{
ac124ff9
SP
380 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
381 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
382 struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
89a88ab8 383 struct be_port_rxf_stats_v1 *port_stats =
ac124ff9
SP
384 &rxf_stats->port[adapter->port_num];
385 struct be_drv_stats *drvs = &adapter->drv_stats;
89a88ab8 386
ac124ff9 387 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
02fe7027
AK
388 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
389 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
89a88ab8
AK
390 drvs->rx_pause_frames = port_stats->rx_pause_frames;
391 drvs->rx_crc_errors = port_stats->rx_crc_errors;
392 drvs->rx_control_frames = port_stats->rx_control_frames;
393 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
394 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
395 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
396 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
397 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
398 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
399 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
400 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
401 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
402 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
403 drvs->rx_dropped_header_too_small =
404 port_stats->rx_dropped_header_too_small;
405 drvs->rx_input_fifo_overflow_drop =
406 port_stats->rx_input_fifo_overflow_drop;
d45b9d39 407 drvs->rx_address_mismatch_drops = port_stats->rx_address_mismatch_drops;
89a88ab8
AK
408 drvs->rx_alignment_symbol_errors =
409 port_stats->rx_alignment_symbol_errors;
ac124ff9 410 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
89a88ab8
AK
411 drvs->tx_pauseframes = port_stats->tx_pauseframes;
412 drvs->tx_controlframes = port_stats->tx_controlframes;
413 drvs->jabber_events = port_stats->jabber_events;
414 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
89a88ab8 415 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
89a88ab8
AK
416 drvs->forwarded_packets = rxf_stats->forwarded_packets;
417 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
ac124ff9
SP
418 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
419 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
89a88ab8
AK
420 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
421}
422
005d5696
SX
423static void populate_lancer_stats(struct be_adapter *adapter)
424{
89a88ab8 425
005d5696 426 struct be_drv_stats *drvs = &adapter->drv_stats;
ac124ff9
SP
427 struct lancer_pport_stats *pport_stats =
428 pport_stats_from_cmd(adapter);
429
430 be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
431 drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
432 drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
433 drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
005d5696 434 drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
ac124ff9 435 drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
005d5696
SX
436 drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
437 drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
438 drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
439 drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
440 drvs->rx_dropped_tcp_length =
441 pport_stats->rx_dropped_invalid_tcp_length;
442 drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
443 drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
444 drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
445 drvs->rx_dropped_header_too_small =
446 pport_stats->rx_dropped_header_too_small;
447 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
d45b9d39
SP
448 drvs->rx_address_mismatch_drops =
449 pport_stats->rx_address_mismatch_drops +
450 pport_stats->rx_vlan_mismatch_drops;
ac124ff9 451 drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
005d5696 452 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
ac124ff9
SP
453 drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
454 drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
005d5696 455 drvs->jabber_events = pport_stats->rx_jabbers;
ac124ff9
SP
456 drvs->forwarded_packets = pport_stats->num_forwards_lo;
457 drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
005d5696 458 drvs->rx_drops_too_many_frags =
ac124ff9 459 pport_stats->rx_drops_too_many_frags_lo;
005d5696 460}
89a88ab8 461
09c1c68f
SP
462static void accumulate_16bit_val(u32 *acc, u16 val)
463{
464#define lo(x) (x & 0xFFFF)
465#define hi(x) (x & 0xFFFF0000)
466 bool wrapped = val < lo(*acc);
467 u32 newacc = hi(*acc) + val;
468
469 if (wrapped)
470 newacc += 65536;
471 ACCESS_ONCE(*acc) = newacc;
472}
473
89a88ab8
AK
474void be_parse_stats(struct be_adapter *adapter)
475{
ac124ff9
SP
476 struct be_erx_stats_v1 *erx = be_erx_stats_from_cmd(adapter);
477 struct be_rx_obj *rxo;
478 int i;
479
ca34fe38
SP
480 if (lancer_chip(adapter)) {
481 populate_lancer_stats(adapter);
005d5696 482 } else {
ca34fe38
SP
483 if (BE2_chip(adapter))
484 populate_be_v0_stats(adapter);
485 else
486 /* for BE3 and Skyhawk */
487 populate_be_v1_stats(adapter);
d51ebd33 488
ca34fe38
SP
489 /* as erx_v1 is longer than v0, ok to use v1 for v0 access */
490 for_all_rx_queues(adapter, rxo, i) {
491 /* below erx HW counter can actually wrap around after
492 * 65535. Driver accumulates a 32-bit value
493 */
494 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
495 (u16)erx->rx_drops_no_fragments \
496 [rxo->q.id]);
497 }
09c1c68f 498 }
89a88ab8
AK
499}
500
ab1594e9
SP
501static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
502 struct rtnl_link_stats64 *stats)
6b7c5b94 503{
ab1594e9 504 struct be_adapter *adapter = netdev_priv(netdev);
89a88ab8 505 struct be_drv_stats *drvs = &adapter->drv_stats;
3abcdeda 506 struct be_rx_obj *rxo;
3c8def97 507 struct be_tx_obj *txo;
ab1594e9
SP
508 u64 pkts, bytes;
509 unsigned int start;
3abcdeda 510 int i;
6b7c5b94 511
3abcdeda 512 for_all_rx_queues(adapter, rxo, i) {
ab1594e9
SP
513 const struct be_rx_stats *rx_stats = rx_stats(rxo);
514 do {
515 start = u64_stats_fetch_begin_bh(&rx_stats->sync);
516 pkts = rx_stats(rxo)->rx_pkts;
517 bytes = rx_stats(rxo)->rx_bytes;
518 } while (u64_stats_fetch_retry_bh(&rx_stats->sync, start));
519 stats->rx_packets += pkts;
520 stats->rx_bytes += bytes;
521 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
522 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
523 rx_stats(rxo)->rx_drops_no_frags;
3abcdeda
SP
524 }
525
3c8def97 526 for_all_tx_queues(adapter, txo, i) {
ab1594e9
SP
527 const struct be_tx_stats *tx_stats = tx_stats(txo);
528 do {
529 start = u64_stats_fetch_begin_bh(&tx_stats->sync);
530 pkts = tx_stats(txo)->tx_pkts;
531 bytes = tx_stats(txo)->tx_bytes;
532 } while (u64_stats_fetch_retry_bh(&tx_stats->sync, start));
533 stats->tx_packets += pkts;
534 stats->tx_bytes += bytes;
3c8def97 535 }
6b7c5b94
SP
536
537 /* bad pkts received */
ab1594e9 538 stats->rx_errors = drvs->rx_crc_errors +
89a88ab8
AK
539 drvs->rx_alignment_symbol_errors +
540 drvs->rx_in_range_errors +
541 drvs->rx_out_range_errors +
542 drvs->rx_frame_too_long +
543 drvs->rx_dropped_too_small +
544 drvs->rx_dropped_too_short +
545 drvs->rx_dropped_header_too_small +
546 drvs->rx_dropped_tcp_length +
ab1594e9 547 drvs->rx_dropped_runt;
68110868 548
6b7c5b94 549 /* detailed rx errors */
ab1594e9 550 stats->rx_length_errors = drvs->rx_in_range_errors +
89a88ab8
AK
551 drvs->rx_out_range_errors +
552 drvs->rx_frame_too_long;
68110868 553
ab1594e9 554 stats->rx_crc_errors = drvs->rx_crc_errors;
6b7c5b94
SP
555
556 /* frame alignment errors */
ab1594e9 557 stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
68110868 558
6b7c5b94
SP
559 /* receiver fifo overrun */
560 /* drops_no_pbuf is no per i/f, it's per BE card */
ab1594e9 561 stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
89a88ab8
AK
562 drvs->rx_input_fifo_overflow_drop +
563 drvs->rx_drops_no_pbuf;
ab1594e9 564 return stats;
6b7c5b94
SP
565}
566
b236916a 567void be_link_status_update(struct be_adapter *adapter, u8 link_status)
6b7c5b94 568{
6b7c5b94
SP
569 struct net_device *netdev = adapter->netdev;
570
b236916a 571 if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
ea172a01 572 netif_carrier_off(netdev);
b236916a 573 adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
6b7c5b94 574 }
b236916a
AK
575
576 if ((link_status & LINK_STATUS_MASK) == LINK_UP)
577 netif_carrier_on(netdev);
578 else
579 netif_carrier_off(netdev);
6b7c5b94
SP
580}
581
3c8def97 582static void be_tx_stats_update(struct be_tx_obj *txo,
91992e44 583 u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
6b7c5b94 584{
3c8def97
SP
585 struct be_tx_stats *stats = tx_stats(txo);
586
ab1594e9 587 u64_stats_update_begin(&stats->sync);
ac124ff9
SP
588 stats->tx_reqs++;
589 stats->tx_wrbs += wrb_cnt;
590 stats->tx_bytes += copied;
591 stats->tx_pkts += (gso_segs ? gso_segs : 1);
6b7c5b94 592 if (stopped)
ac124ff9 593 stats->tx_stops++;
ab1594e9 594 u64_stats_update_end(&stats->sync);
6b7c5b94
SP
595}
596
597/* Determine number of WRB entries needed to xmit data in an skb */
fe6d2a38
SP
598static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
599 bool *dummy)
6b7c5b94 600{
ebc8d2ab
DM
601 int cnt = (skb->len > skb->data_len);
602
603 cnt += skb_shinfo(skb)->nr_frags;
604
6b7c5b94
SP
605 /* to account for hdr wrb */
606 cnt++;
fe6d2a38
SP
607 if (lancer_chip(adapter) || !(cnt & 1)) {
608 *dummy = false;
609 } else {
6b7c5b94
SP
610 /* add a dummy to make it an even num */
611 cnt++;
612 *dummy = true;
fe6d2a38 613 }
6b7c5b94
SP
614 BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
615 return cnt;
616}
617
618static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
619{
620 wrb->frag_pa_hi = upper_32_bits(addr);
621 wrb->frag_pa_lo = addr & 0xFFFFFFFF;
622 wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
89b1f496 623 wrb->rsvd0 = 0;
6b7c5b94
SP
624}
625
1ded132d
AK
626static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
627 struct sk_buff *skb)
628{
629 u8 vlan_prio;
630 u16 vlan_tag;
631
632 vlan_tag = vlan_tx_tag_get(skb);
633 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
634 /* If vlan priority provided by OS is NOT in available bmap */
635 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
636 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
637 adapter->recommended_prio;
638
639 return vlan_tag;
640}
641
93040ae5
SK
642static int be_vlan_tag_chk(struct be_adapter *adapter, struct sk_buff *skb)
643{
644 return vlan_tx_tag_present(skb) || adapter->pvid;
645}
646
cc4ce020
SK
647static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
648 struct sk_buff *skb, u32 wrb_cnt, u32 len)
6b7c5b94 649{
1ded132d 650 u16 vlan_tag;
cc4ce020 651
6b7c5b94
SP
652 memset(hdr, 0, sizeof(*hdr));
653
654 AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
655
49e4b847 656 if (skb_is_gso(skb)) {
6b7c5b94
SP
657 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
658 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
659 hdr, skb_shinfo(skb)->gso_size);
fe6d2a38 660 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
49e4b847 661 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
6b7c5b94
SP
662 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
663 if (is_tcp_pkt(skb))
664 AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
665 else if (is_udp_pkt(skb))
666 AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
667 }
668
4c5102f9 669 if (vlan_tx_tag_present(skb)) {
6b7c5b94 670 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
1ded132d 671 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
cc4ce020 672 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
6b7c5b94
SP
673 }
674
675 AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
676 AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, 1);
677 AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
678 AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
679}
680
2b7bcebf 681static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
7101e111
SP
682 bool unmap_single)
683{
684 dma_addr_t dma;
685
686 be_dws_le_to_cpu(wrb, sizeof(*wrb));
687
688 dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
b681ee77 689 if (wrb->frag_len) {
7101e111 690 if (unmap_single)
2b7bcebf
IV
691 dma_unmap_single(dev, dma, wrb->frag_len,
692 DMA_TO_DEVICE);
7101e111 693 else
2b7bcebf 694 dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
7101e111
SP
695 }
696}
6b7c5b94 697
3c8def97 698static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
6b7c5b94
SP
699 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb)
700{
7101e111
SP
701 dma_addr_t busaddr;
702 int i, copied = 0;
2b7bcebf 703 struct device *dev = &adapter->pdev->dev;
6b7c5b94 704 struct sk_buff *first_skb = skb;
6b7c5b94
SP
705 struct be_eth_wrb *wrb;
706 struct be_eth_hdr_wrb *hdr;
7101e111
SP
707 bool map_single = false;
708 u16 map_head;
6b7c5b94 709
6b7c5b94
SP
710 hdr = queue_head_node(txq);
711 queue_head_inc(txq);
7101e111 712 map_head = txq->head;
6b7c5b94 713
ebc8d2ab 714 if (skb->len > skb->data_len) {
e743d313 715 int len = skb_headlen(skb);
2b7bcebf
IV
716 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
717 if (dma_mapping_error(dev, busaddr))
7101e111
SP
718 goto dma_err;
719 map_single = true;
ebc8d2ab
DM
720 wrb = queue_head_node(txq);
721 wrb_fill(wrb, busaddr, len);
722 be_dws_cpu_to_le(wrb, sizeof(*wrb));
723 queue_head_inc(txq);
724 copied += len;
725 }
6b7c5b94 726
ebc8d2ab 727 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
9e903e08 728 const struct skb_frag_struct *frag =
ebc8d2ab 729 &skb_shinfo(skb)->frags[i];
b061b39e 730 busaddr = skb_frag_dma_map(dev, frag, 0,
9e903e08 731 skb_frag_size(frag), DMA_TO_DEVICE);
2b7bcebf 732 if (dma_mapping_error(dev, busaddr))
7101e111 733 goto dma_err;
ebc8d2ab 734 wrb = queue_head_node(txq);
9e903e08 735 wrb_fill(wrb, busaddr, skb_frag_size(frag));
ebc8d2ab
DM
736 be_dws_cpu_to_le(wrb, sizeof(*wrb));
737 queue_head_inc(txq);
9e903e08 738 copied += skb_frag_size(frag);
6b7c5b94
SP
739 }
740
741 if (dummy_wrb) {
742 wrb = queue_head_node(txq);
743 wrb_fill(wrb, 0, 0);
744 be_dws_cpu_to_le(wrb, sizeof(*wrb));
745 queue_head_inc(txq);
746 }
747
cc4ce020 748 wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied);
6b7c5b94
SP
749 be_dws_cpu_to_le(hdr, sizeof(*hdr));
750
751 return copied;
7101e111
SP
752dma_err:
753 txq->head = map_head;
754 while (copied) {
755 wrb = queue_head_node(txq);
2b7bcebf 756 unmap_tx_frag(dev, wrb, map_single);
7101e111
SP
757 map_single = false;
758 copied -= wrb->frag_len;
759 queue_head_inc(txq);
760 }
761 return 0;
6b7c5b94
SP
762}
763
93040ae5
SK
764static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
765 struct sk_buff *skb)
766{
767 u16 vlan_tag = 0;
768
769 skb = skb_share_check(skb, GFP_ATOMIC);
770 if (unlikely(!skb))
771 return skb;
772
773 if (vlan_tx_tag_present(skb)) {
774 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
775 __vlan_put_tag(skb, vlan_tag);
776 skb->vlan_tci = 0;
777 }
778
779 return skb;
780}
781
61357325 782static netdev_tx_t be_xmit(struct sk_buff *skb,
b31c50a7 783 struct net_device *netdev)
6b7c5b94
SP
784{
785 struct be_adapter *adapter = netdev_priv(netdev);
3c8def97
SP
786 struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
787 struct be_queue_info *txq = &txo->q;
93040ae5 788 struct iphdr *ip = NULL;
6b7c5b94 789 u32 wrb_cnt = 0, copied = 0;
93040ae5 790 u32 start = txq->head, eth_hdr_len;
6b7c5b94
SP
791 bool dummy_wrb, stopped = false;
792
93040ae5
SK
793 eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
794 VLAN_ETH_HLEN : ETH_HLEN;
795
796 /* HW has a bug which considers padding bytes as legal
797 * and modifies the IPv4 hdr's 'tot_len' field
1ded132d 798 */
93040ae5
SK
799 if (skb->len <= 60 && be_vlan_tag_chk(adapter, skb) &&
800 is_ipv4_pkt(skb)) {
801 ip = (struct iphdr *)ip_hdr(skb);
802 pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len));
803 }
1ded132d 804
93040ae5
SK
805 /* HW has a bug wherein it will calculate CSUM for VLAN
806 * pkts even though it is disabled.
807 * Manually insert VLAN in pkt.
808 */
809 if (skb->ip_summed != CHECKSUM_PARTIAL &&
810 be_vlan_tag_chk(adapter, skb)) {
811 skb = be_insert_vlan_in_pkt(adapter, skb);
1ded132d
AK
812 if (unlikely(!skb))
813 goto tx_drop;
1ded132d
AK
814 }
815
fe6d2a38 816 wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
6b7c5b94 817
3c8def97 818 copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb);
c190e3c8 819 if (copied) {
cd8f76c0
ED
820 int gso_segs = skb_shinfo(skb)->gso_segs;
821
c190e3c8 822 /* record the sent skb in the sent_skb table */
3c8def97
SP
823 BUG_ON(txo->sent_skb_list[start]);
824 txo->sent_skb_list[start] = skb;
c190e3c8
AK
825
826 /* Ensure txq has space for the next skb; Else stop the queue
827 * *BEFORE* ringing the tx doorbell, so that we serialze the
828 * tx compls of the current transmit which'll wake up the queue
829 */
7101e111 830 atomic_add(wrb_cnt, &txq->used);
c190e3c8
AK
831 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
832 txq->len) {
3c8def97 833 netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
c190e3c8
AK
834 stopped = true;
835 }
6b7c5b94 836
c190e3c8 837 be_txq_notify(adapter, txq->id, wrb_cnt);
6b7c5b94 838
cd8f76c0 839 be_tx_stats_update(txo, wrb_cnt, copied, gso_segs, stopped);
c190e3c8
AK
840 } else {
841 txq->head = start;
842 dev_kfree_skb_any(skb);
6b7c5b94 843 }
1ded132d 844tx_drop:
6b7c5b94
SP
845 return NETDEV_TX_OK;
846}
847
848static int be_change_mtu(struct net_device *netdev, int new_mtu)
849{
850 struct be_adapter *adapter = netdev_priv(netdev);
851 if (new_mtu < BE_MIN_MTU ||
34a89b8c
AK
852 new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
853 (ETH_HLEN + ETH_FCS_LEN))) {
6b7c5b94
SP
854 dev_info(&adapter->pdev->dev,
855 "MTU must be between %d and %d bytes\n",
34a89b8c
AK
856 BE_MIN_MTU,
857 (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
6b7c5b94
SP
858 return -EINVAL;
859 }
860 dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
861 netdev->mtu, new_mtu);
862 netdev->mtu = new_mtu;
863 return 0;
864}
865
866/*
82903e4b
AK
867 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
868 * If the user configures more, place BE in vlan promiscuous mode.
6b7c5b94 869 */
10329df8 870static int be_vid_config(struct be_adapter *adapter)
6b7c5b94 871{
10329df8
SP
872 u16 vids[BE_NUM_VLANS_SUPPORTED];
873 u16 num = 0, i;
82903e4b 874 int status = 0;
1da87b7f 875
c0e64ef4
SP
876 /* No need to further configure vids if in promiscuous mode */
877 if (adapter->promiscuous)
878 return 0;
879
0fc16ebf
PR
880 if (adapter->vlans_added > adapter->max_vlans)
881 goto set_vlan_promisc;
882
883 /* Construct VLAN Table to give to HW */
884 for (i = 0; i < VLAN_N_VID; i++)
885 if (adapter->vlan_tag[i])
10329df8 886 vids[num++] = cpu_to_le16(i);
0fc16ebf
PR
887
888 status = be_cmd_vlan_config(adapter, adapter->if_handle,
10329df8 889 vids, num, 1, 0);
0fc16ebf
PR
890
891 /* Set to VLAN promisc mode as setting VLAN filter failed */
892 if (status) {
893 dev_info(&adapter->pdev->dev, "Exhausted VLAN HW filters.\n");
894 dev_info(&adapter->pdev->dev, "Disabling HW VLAN filtering.\n");
895 goto set_vlan_promisc;
6b7c5b94 896 }
1da87b7f 897
b31c50a7 898 return status;
0fc16ebf
PR
899
900set_vlan_promisc:
901 status = be_cmd_vlan_config(adapter, adapter->if_handle,
902 NULL, 0, 1, 1);
903 return status;
6b7c5b94
SP
904}
905
8e586137 906static int be_vlan_add_vid(struct net_device *netdev, u16 vid)
6b7c5b94
SP
907{
908 struct be_adapter *adapter = netdev_priv(netdev);
80817cbf 909 int status = 0;
6b7c5b94 910
a85e9986 911 if (!lancer_chip(adapter) && !be_physfn(adapter)) {
80817cbf
AK
912 status = -EINVAL;
913 goto ret;
914 }
ba343c77 915
a85e9986
PR
916 /* Packets with VID 0 are always received by Lancer by default */
917 if (lancer_chip(adapter) && vid == 0)
918 goto ret;
919
6b7c5b94 920 adapter->vlan_tag[vid] = 1;
82903e4b 921 if (adapter->vlans_added <= (adapter->max_vlans + 1))
10329df8 922 status = be_vid_config(adapter);
8e586137 923
80817cbf
AK
924 if (!status)
925 adapter->vlans_added++;
926 else
927 adapter->vlan_tag[vid] = 0;
928ret:
929 return status;
6b7c5b94
SP
930}
931
8e586137 932static int be_vlan_rem_vid(struct net_device *netdev, u16 vid)
6b7c5b94
SP
933{
934 struct be_adapter *adapter = netdev_priv(netdev);
80817cbf 935 int status = 0;
6b7c5b94 936
a85e9986 937 if (!lancer_chip(adapter) && !be_physfn(adapter)) {
80817cbf
AK
938 status = -EINVAL;
939 goto ret;
940 }
ba343c77 941
a85e9986
PR
942 /* Packets with VID 0 are always received by Lancer by default */
943 if (lancer_chip(adapter) && vid == 0)
944 goto ret;
945
6b7c5b94 946 adapter->vlan_tag[vid] = 0;
82903e4b 947 if (adapter->vlans_added <= adapter->max_vlans)
10329df8 948 status = be_vid_config(adapter);
8e586137 949
80817cbf
AK
950 if (!status)
951 adapter->vlans_added--;
952 else
953 adapter->vlan_tag[vid] = 1;
954ret:
955 return status;
6b7c5b94
SP
956}
957
a54769f5 958static void be_set_rx_mode(struct net_device *netdev)
6b7c5b94
SP
959{
960 struct be_adapter *adapter = netdev_priv(netdev);
0fc16ebf 961 int status;
6b7c5b94 962
24307eef 963 if (netdev->flags & IFF_PROMISC) {
5b8821b7 964 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
24307eef
SP
965 adapter->promiscuous = true;
966 goto done;
6b7c5b94
SP
967 }
968
25985edc 969 /* BE was previously in promiscuous mode; disable it */
24307eef
SP
970 if (adapter->promiscuous) {
971 adapter->promiscuous = false;
5b8821b7 972 be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
c0e64ef4
SP
973
974 if (adapter->vlans_added)
10329df8 975 be_vid_config(adapter);
6b7c5b94
SP
976 }
977
e7b909a6 978 /* Enable multicast promisc if num configured exceeds what we support */
4cd24eaf 979 if (netdev->flags & IFF_ALLMULTI ||
abb93951 980 netdev_mc_count(netdev) > adapter->max_mcast_mac) {
5b8821b7 981 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
24307eef 982 goto done;
6b7c5b94 983 }
6b7c5b94 984
fbc13f01
AK
985 if (netdev_uc_count(netdev) != adapter->uc_macs) {
986 struct netdev_hw_addr *ha;
987 int i = 1; /* First slot is claimed by the Primary MAC */
988
989 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++) {
990 be_cmd_pmac_del(adapter, adapter->if_handle,
991 adapter->pmac_id[i], 0);
992 }
993
994 if (netdev_uc_count(netdev) > adapter->max_pmac_cnt) {
995 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
996 adapter->promiscuous = true;
997 goto done;
998 }
999
1000 netdev_for_each_uc_addr(ha, adapter->netdev) {
1001 adapter->uc_macs++; /* First slot is for Primary MAC */
1002 be_cmd_pmac_add(adapter, (u8 *)ha->addr,
1003 adapter->if_handle,
1004 &adapter->pmac_id[adapter->uc_macs], 0);
1005 }
1006 }
1007
0fc16ebf
PR
1008 status = be_cmd_rx_filter(adapter, IFF_MULTICAST, ON);
1009
1010 /* Set to MCAST promisc mode if setting MULTICAST address fails */
1011 if (status) {
1012 dev_info(&adapter->pdev->dev, "Exhausted multicast HW filters.\n");
1013 dev_info(&adapter->pdev->dev, "Disabling HW multicast filtering.\n");
1014 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
1015 }
24307eef
SP
1016done:
1017 return;
6b7c5b94
SP
1018}
1019
ba343c77
SB
1020static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
1021{
1022 struct be_adapter *adapter = netdev_priv(netdev);
11ac75ed 1023 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
ba343c77 1024 int status;
704e4c88
PR
1025 bool active_mac = false;
1026 u32 pmac_id;
1027 u8 old_mac[ETH_ALEN];
ba343c77 1028
11ac75ed 1029 if (!sriov_enabled(adapter))
ba343c77
SB
1030 return -EPERM;
1031
11ac75ed 1032 if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
ba343c77
SB
1033 return -EINVAL;
1034
590c391d 1035 if (lancer_chip(adapter)) {
704e4c88
PR
1036 status = be_cmd_get_mac_from_list(adapter, old_mac, &active_mac,
1037 &pmac_id, vf + 1);
1038 if (!status && active_mac)
1039 be_cmd_pmac_del(adapter, vf_cfg->if_handle,
1040 pmac_id, vf + 1);
1041
590c391d
PR
1042 status = be_cmd_set_mac_list(adapter, mac, 1, vf + 1);
1043 } else {
11ac75ed
SP
1044 status = be_cmd_pmac_del(adapter, vf_cfg->if_handle,
1045 vf_cfg->pmac_id, vf + 1);
ba343c77 1046
11ac75ed
SP
1047 status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
1048 &vf_cfg->pmac_id, vf + 1);
590c391d
PR
1049 }
1050
64600ea5 1051 if (status)
ba343c77
SB
1052 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
1053 mac, vf);
64600ea5 1054 else
11ac75ed 1055 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
64600ea5 1056
ba343c77
SB
1057 return status;
1058}
1059
64600ea5
AK
1060static int be_get_vf_config(struct net_device *netdev, int vf,
1061 struct ifla_vf_info *vi)
1062{
1063 struct be_adapter *adapter = netdev_priv(netdev);
11ac75ed 1064 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
64600ea5 1065
11ac75ed 1066 if (!sriov_enabled(adapter))
64600ea5
AK
1067 return -EPERM;
1068
11ac75ed 1069 if (vf >= adapter->num_vfs)
64600ea5
AK
1070 return -EINVAL;
1071
1072 vi->vf = vf;
11ac75ed
SP
1073 vi->tx_rate = vf_cfg->tx_rate;
1074 vi->vlan = vf_cfg->vlan_tag;
64600ea5 1075 vi->qos = 0;
11ac75ed 1076 memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
64600ea5
AK
1077
1078 return 0;
1079}
1080
1da87b7f
AK
1081static int be_set_vf_vlan(struct net_device *netdev,
1082 int vf, u16 vlan, u8 qos)
1083{
1084 struct be_adapter *adapter = netdev_priv(netdev);
1085 int status = 0;
1086
11ac75ed 1087 if (!sriov_enabled(adapter))
1da87b7f
AK
1088 return -EPERM;
1089
11ac75ed 1090 if (vf >= adapter->num_vfs || vlan > 4095)
1da87b7f
AK
1091 return -EINVAL;
1092
1093 if (vlan) {
f1f3ee1b
AK
1094 if (adapter->vf_cfg[vf].vlan_tag != vlan) {
1095 /* If this is new value, program it. Else skip. */
1096 adapter->vf_cfg[vf].vlan_tag = vlan;
1097
1098 status = be_cmd_set_hsw_config(adapter, vlan,
1099 vf + 1, adapter->vf_cfg[vf].if_handle);
1100 }
1da87b7f 1101 } else {
f1f3ee1b 1102 /* Reset Transparent Vlan Tagging. */
11ac75ed 1103 adapter->vf_cfg[vf].vlan_tag = 0;
f1f3ee1b
AK
1104 vlan = adapter->vf_cfg[vf].def_vid;
1105 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
1106 adapter->vf_cfg[vf].if_handle);
1da87b7f
AK
1107 }
1108
1da87b7f
AK
1109
1110 if (status)
1111 dev_info(&adapter->pdev->dev,
1112 "VLAN %d config on VF %d failed\n", vlan, vf);
1113 return status;
1114}
1115
e1d18735
AK
1116static int be_set_vf_tx_rate(struct net_device *netdev,
1117 int vf, int rate)
1118{
1119 struct be_adapter *adapter = netdev_priv(netdev);
1120 int status = 0;
1121
11ac75ed 1122 if (!sriov_enabled(adapter))
e1d18735
AK
1123 return -EPERM;
1124
94f434c2 1125 if (vf >= adapter->num_vfs)
e1d18735
AK
1126 return -EINVAL;
1127
94f434c2
AK
1128 if (rate < 100 || rate > 10000) {
1129 dev_err(&adapter->pdev->dev,
1130 "tx rate must be between 100 and 10000 Mbps\n");
1131 return -EINVAL;
1132 }
e1d18735 1133
d5c18473
PR
1134 if (lancer_chip(adapter))
1135 status = be_cmd_set_profile_config(adapter, rate / 10, vf + 1);
1136 else
1137 status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
e1d18735
AK
1138
1139 if (status)
94f434c2 1140 dev_err(&adapter->pdev->dev,
e1d18735 1141 "tx rate %d on VF %d failed\n", rate, vf);
94f434c2
AK
1142 else
1143 adapter->vf_cfg[vf].tx_rate = rate;
e1d18735
AK
1144 return status;
1145}
1146
39f1d94d
SP
1147static int be_find_vfs(struct be_adapter *adapter, int vf_state)
1148{
1149 struct pci_dev *dev, *pdev = adapter->pdev;
2f6a0260 1150 int vfs = 0, assigned_vfs = 0, pos;
39f1d94d
SP
1151 u16 offset, stride;
1152
1153 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
d79c0a20
SP
1154 if (!pos)
1155 return 0;
39f1d94d
SP
1156 pci_read_config_word(pdev, pos + PCI_SRIOV_VF_OFFSET, &offset);
1157 pci_read_config_word(pdev, pos + PCI_SRIOV_VF_STRIDE, &stride);
1158
1159 dev = pci_get_device(pdev->vendor, PCI_ANY_ID, NULL);
1160 while (dev) {
2f6a0260 1161 if (dev->is_virtfn && pci_physfn(dev) == pdev) {
39f1d94d
SP
1162 vfs++;
1163 if (dev->dev_flags & PCI_DEV_FLAGS_ASSIGNED)
1164 assigned_vfs++;
1165 }
1166 dev = pci_get_device(pdev->vendor, PCI_ANY_ID, dev);
1167 }
1168 return (vf_state == ASSIGNED) ? assigned_vfs : vfs;
1169}
1170
10ef9ab4 1171static void be_eqd_update(struct be_adapter *adapter, struct be_eq_obj *eqo)
6b7c5b94 1172{
10ef9ab4 1173 struct be_rx_stats *stats = rx_stats(&adapter->rx_obj[eqo->idx]);
4097f663 1174 ulong now = jiffies;
ac124ff9 1175 ulong delta = now - stats->rx_jiffies;
ab1594e9
SP
1176 u64 pkts;
1177 unsigned int start, eqd;
ac124ff9 1178
10ef9ab4
SP
1179 if (!eqo->enable_aic) {
1180 eqd = eqo->eqd;
1181 goto modify_eqd;
1182 }
1183
1184 if (eqo->idx >= adapter->num_rx_qs)
ac124ff9 1185 return;
6b7c5b94 1186
10ef9ab4
SP
1187 stats = rx_stats(&adapter->rx_obj[eqo->idx]);
1188
4097f663 1189 /* Wrapped around */
3abcdeda
SP
1190 if (time_before(now, stats->rx_jiffies)) {
1191 stats->rx_jiffies = now;
4097f663
SP
1192 return;
1193 }
6b7c5b94 1194
ac124ff9
SP
1195 /* Update once a second */
1196 if (delta < HZ)
6b7c5b94
SP
1197 return;
1198
ab1594e9
SP
1199 do {
1200 start = u64_stats_fetch_begin_bh(&stats->sync);
1201 pkts = stats->rx_pkts;
1202 } while (u64_stats_fetch_retry_bh(&stats->sync, start));
1203
68c3e5a7 1204 stats->rx_pps = (unsigned long)(pkts - stats->rx_pkts_prev) / (delta / HZ);
ab1594e9 1205 stats->rx_pkts_prev = pkts;
3abcdeda 1206 stats->rx_jiffies = now;
10ef9ab4
SP
1207 eqd = (stats->rx_pps / 110000) << 3;
1208 eqd = min(eqd, eqo->max_eqd);
1209 eqd = max(eqd, eqo->min_eqd);
ac124ff9
SP
1210 if (eqd < 10)
1211 eqd = 0;
10ef9ab4
SP
1212
1213modify_eqd:
1214 if (eqd != eqo->cur_eqd) {
1215 be_cmd_modify_eqd(adapter, eqo->q.id, eqd);
1216 eqo->cur_eqd = eqd;
ac124ff9 1217 }
6b7c5b94
SP
1218}
1219
3abcdeda 1220static void be_rx_stats_update(struct be_rx_obj *rxo,
2e588f84 1221 struct be_rx_compl_info *rxcp)
4097f663 1222{
ac124ff9 1223 struct be_rx_stats *stats = rx_stats(rxo);
1ef78abe 1224
ab1594e9 1225 u64_stats_update_begin(&stats->sync);
3abcdeda 1226 stats->rx_compl++;
2e588f84 1227 stats->rx_bytes += rxcp->pkt_size;
3abcdeda 1228 stats->rx_pkts++;
2e588f84 1229 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
3abcdeda 1230 stats->rx_mcast_pkts++;
2e588f84 1231 if (rxcp->err)
ac124ff9 1232 stats->rx_compl_err++;
ab1594e9 1233 u64_stats_update_end(&stats->sync);
4097f663
SP
1234}
1235
2e588f84 1236static inline bool csum_passed(struct be_rx_compl_info *rxcp)
728a9972 1237{
19fad86f
PR
1238 /* L4 checksum is not reliable for non TCP/UDP packets.
1239 * Also ignore ipcksm for ipv6 pkts */
2e588f84
SP
1240 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
1241 (rxcp->ip_csum || rxcp->ipv6);
728a9972
AK
1242}
1243
10ef9ab4
SP
1244static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo,
1245 u16 frag_idx)
6b7c5b94 1246{
10ef9ab4 1247 struct be_adapter *adapter = rxo->adapter;
6b7c5b94 1248 struct be_rx_page_info *rx_page_info;
3abcdeda 1249 struct be_queue_info *rxq = &rxo->q;
6b7c5b94 1250
3abcdeda 1251 rx_page_info = &rxo->page_info_tbl[frag_idx];
6b7c5b94
SP
1252 BUG_ON(!rx_page_info->page);
1253
205859a2 1254 if (rx_page_info->last_page_user) {
2b7bcebf
IV
1255 dma_unmap_page(&adapter->pdev->dev,
1256 dma_unmap_addr(rx_page_info, bus),
1257 adapter->big_page_size, DMA_FROM_DEVICE);
205859a2
AK
1258 rx_page_info->last_page_user = false;
1259 }
6b7c5b94
SP
1260
1261 atomic_dec(&rxq->used);
1262 return rx_page_info;
1263}
1264
1265/* Throwaway the data in the Rx completion */
10ef9ab4
SP
1266static void be_rx_compl_discard(struct be_rx_obj *rxo,
1267 struct be_rx_compl_info *rxcp)
6b7c5b94 1268{
3abcdeda 1269 struct be_queue_info *rxq = &rxo->q;
6b7c5b94 1270 struct be_rx_page_info *page_info;
2e588f84 1271 u16 i, num_rcvd = rxcp->num_rcvd;
6b7c5b94 1272
e80d9da6 1273 for (i = 0; i < num_rcvd; i++) {
10ef9ab4 1274 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
e80d9da6
PR
1275 put_page(page_info->page);
1276 memset(page_info, 0, sizeof(*page_info));
2e588f84 1277 index_inc(&rxcp->rxq_idx, rxq->len);
6b7c5b94
SP
1278 }
1279}
1280
1281/*
1282 * skb_fill_rx_data forms a complete skb for an ether frame
1283 * indicated by rxcp.
1284 */
10ef9ab4
SP
1285static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
1286 struct be_rx_compl_info *rxcp)
6b7c5b94 1287{
3abcdeda 1288 struct be_queue_info *rxq = &rxo->q;
6b7c5b94 1289 struct be_rx_page_info *page_info;
2e588f84
SP
1290 u16 i, j;
1291 u16 hdr_len, curr_frag_len, remaining;
6b7c5b94 1292 u8 *start;
6b7c5b94 1293
10ef9ab4 1294 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
6b7c5b94
SP
1295 start = page_address(page_info->page) + page_info->page_offset;
1296 prefetch(start);
1297
1298 /* Copy data in the first descriptor of this completion */
2e588f84 1299 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
6b7c5b94 1300
6b7c5b94
SP
1301 skb->len = curr_frag_len;
1302 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
ac1ae5f3 1303 memcpy(skb->data, start, curr_frag_len);
6b7c5b94
SP
1304 /* Complete packet has now been moved to data */
1305 put_page(page_info->page);
1306 skb->data_len = 0;
1307 skb->tail += curr_frag_len;
1308 } else {
ac1ae5f3
ED
1309 hdr_len = ETH_HLEN;
1310 memcpy(skb->data, start, hdr_len);
6b7c5b94 1311 skb_shinfo(skb)->nr_frags = 1;
b061b39e 1312 skb_frag_set_page(skb, 0, page_info->page);
6b7c5b94
SP
1313 skb_shinfo(skb)->frags[0].page_offset =
1314 page_info->page_offset + hdr_len;
9e903e08 1315 skb_frag_size_set(&skb_shinfo(skb)->frags[0], curr_frag_len - hdr_len);
6b7c5b94 1316 skb->data_len = curr_frag_len - hdr_len;
bdb28a97 1317 skb->truesize += rx_frag_size;
6b7c5b94
SP
1318 skb->tail += hdr_len;
1319 }
205859a2 1320 page_info->page = NULL;
6b7c5b94 1321
2e588f84
SP
1322 if (rxcp->pkt_size <= rx_frag_size) {
1323 BUG_ON(rxcp->num_rcvd != 1);
1324 return;
6b7c5b94
SP
1325 }
1326
1327 /* More frags present for this completion */
2e588f84
SP
1328 index_inc(&rxcp->rxq_idx, rxq->len);
1329 remaining = rxcp->pkt_size - curr_frag_len;
1330 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
10ef9ab4 1331 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
2e588f84 1332 curr_frag_len = min(remaining, rx_frag_size);
6b7c5b94 1333
bd46cb6c
AK
1334 /* Coalesce all frags from the same physical page in one slot */
1335 if (page_info->page_offset == 0) {
1336 /* Fresh page */
1337 j++;
b061b39e 1338 skb_frag_set_page(skb, j, page_info->page);
bd46cb6c
AK
1339 skb_shinfo(skb)->frags[j].page_offset =
1340 page_info->page_offset;
9e903e08 1341 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
bd46cb6c
AK
1342 skb_shinfo(skb)->nr_frags++;
1343 } else {
1344 put_page(page_info->page);
1345 }
1346
9e903e08 1347 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
6b7c5b94
SP
1348 skb->len += curr_frag_len;
1349 skb->data_len += curr_frag_len;
bdb28a97 1350 skb->truesize += rx_frag_size;
2e588f84
SP
1351 remaining -= curr_frag_len;
1352 index_inc(&rxcp->rxq_idx, rxq->len);
205859a2 1353 page_info->page = NULL;
6b7c5b94 1354 }
bd46cb6c 1355 BUG_ON(j > MAX_SKB_FRAGS);
6b7c5b94
SP
1356}
1357
5be93b9a 1358/* Process the RX completion indicated by rxcp when GRO is disabled */
10ef9ab4
SP
1359static void be_rx_compl_process(struct be_rx_obj *rxo,
1360 struct be_rx_compl_info *rxcp)
6b7c5b94 1361{
10ef9ab4 1362 struct be_adapter *adapter = rxo->adapter;
6332c8d3 1363 struct net_device *netdev = adapter->netdev;
6b7c5b94 1364 struct sk_buff *skb;
89420424 1365
bb349bb4 1366 skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE);
a058a632 1367 if (unlikely(!skb)) {
ac124ff9 1368 rx_stats(rxo)->rx_drops_no_skbs++;
10ef9ab4 1369 be_rx_compl_discard(rxo, rxcp);
6b7c5b94
SP
1370 return;
1371 }
1372
10ef9ab4 1373 skb_fill_rx_data(rxo, skb, rxcp);
6b7c5b94 1374
6332c8d3 1375 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
728a9972 1376 skb->ip_summed = CHECKSUM_UNNECESSARY;
c6ce2f4b
SK
1377 else
1378 skb_checksum_none_assert(skb);
6b7c5b94 1379
6332c8d3 1380 skb->protocol = eth_type_trans(skb, netdev);
aaa6daec 1381 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
10ef9ab4 1382 if (netdev->features & NETIF_F_RXHASH)
4b972914
AK
1383 skb->rxhash = rxcp->rss_hash;
1384
6b7c5b94 1385
343e43c0 1386 if (rxcp->vlanf)
4c5102f9
AK
1387 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1388
1389 netif_receive_skb(skb);
6b7c5b94
SP
1390}
1391
5be93b9a 1392/* Process the RX completion indicated by rxcp when GRO is enabled */
10ef9ab4
SP
1393void be_rx_compl_process_gro(struct be_rx_obj *rxo, struct napi_struct *napi,
1394 struct be_rx_compl_info *rxcp)
6b7c5b94 1395{
10ef9ab4 1396 struct be_adapter *adapter = rxo->adapter;
6b7c5b94 1397 struct be_rx_page_info *page_info;
5be93b9a 1398 struct sk_buff *skb = NULL;
3abcdeda 1399 struct be_queue_info *rxq = &rxo->q;
2e588f84
SP
1400 u16 remaining, curr_frag_len;
1401 u16 i, j;
3968fa1e 1402
10ef9ab4 1403 skb = napi_get_frags(napi);
5be93b9a 1404 if (!skb) {
10ef9ab4 1405 be_rx_compl_discard(rxo, rxcp);
5be93b9a
AK
1406 return;
1407 }
1408
2e588f84
SP
1409 remaining = rxcp->pkt_size;
1410 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
10ef9ab4 1411 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
6b7c5b94
SP
1412
1413 curr_frag_len = min(remaining, rx_frag_size);
1414
bd46cb6c
AK
1415 /* Coalesce all frags from the same physical page in one slot */
1416 if (i == 0 || page_info->page_offset == 0) {
1417 /* First frag or Fresh page */
1418 j++;
b061b39e 1419 skb_frag_set_page(skb, j, page_info->page);
5be93b9a
AK
1420 skb_shinfo(skb)->frags[j].page_offset =
1421 page_info->page_offset;
9e903e08 1422 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
bd46cb6c
AK
1423 } else {
1424 put_page(page_info->page);
1425 }
9e903e08 1426 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
bdb28a97 1427 skb->truesize += rx_frag_size;
bd46cb6c 1428 remaining -= curr_frag_len;
2e588f84 1429 index_inc(&rxcp->rxq_idx, rxq->len);
6b7c5b94
SP
1430 memset(page_info, 0, sizeof(*page_info));
1431 }
bd46cb6c 1432 BUG_ON(j > MAX_SKB_FRAGS);
6b7c5b94 1433
5be93b9a 1434 skb_shinfo(skb)->nr_frags = j + 1;
2e588f84
SP
1435 skb->len = rxcp->pkt_size;
1436 skb->data_len = rxcp->pkt_size;
5be93b9a 1437 skb->ip_summed = CHECKSUM_UNNECESSARY;
aaa6daec 1438 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
4b972914
AK
1439 if (adapter->netdev->features & NETIF_F_RXHASH)
1440 skb->rxhash = rxcp->rss_hash;
5be93b9a 1441
343e43c0 1442 if (rxcp->vlanf)
4c5102f9
AK
1443 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1444
10ef9ab4 1445 napi_gro_frags(napi);
2e588f84
SP
1446}
1447
10ef9ab4
SP
1448static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
1449 struct be_rx_compl_info *rxcp)
2e588f84
SP
1450{
1451 rxcp->pkt_size =
1452 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1453 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1454 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1455 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
9ecb42fd 1456 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
2e588f84
SP
1457 rxcp->ip_csum =
1458 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1459 rxcp->l4_csum =
1460 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1461 rxcp->ipv6 =
1462 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
1463 rxcp->rxq_idx =
1464 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, fragndx, compl);
1465 rxcp->num_rcvd =
1466 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1467 rxcp->pkt_type =
1468 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
4b972914 1469 rxcp->rss_hash =
c297977e 1470 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, compl);
15d72184
SP
1471 if (rxcp->vlanf) {
1472 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm,
3c709f8f
DM
1473 compl);
1474 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
1475 compl);
15d72184 1476 }
12004ae9 1477 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, port, compl);
2e588f84
SP
1478}
1479
10ef9ab4
SP
1480static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
1481 struct be_rx_compl_info *rxcp)
2e588f84
SP
1482{
1483 rxcp->pkt_size =
1484 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1485 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1486 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1487 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
9ecb42fd 1488 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
2e588f84
SP
1489 rxcp->ip_csum =
1490 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1491 rxcp->l4_csum =
1492 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1493 rxcp->ipv6 =
1494 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
1495 rxcp->rxq_idx =
1496 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, fragndx, compl);
1497 rxcp->num_rcvd =
1498 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1499 rxcp->pkt_type =
1500 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
4b972914 1501 rxcp->rss_hash =
c297977e 1502 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, compl);
15d72184
SP
1503 if (rxcp->vlanf) {
1504 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm,
3c709f8f
DM
1505 compl);
1506 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
1507 compl);
15d72184 1508 }
12004ae9 1509 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, port, compl);
2e588f84
SP
1510}
1511
1512static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1513{
1514 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1515 struct be_rx_compl_info *rxcp = &rxo->rxcp;
1516 struct be_adapter *adapter = rxo->adapter;
6b7c5b94 1517
2e588f84
SP
1518 /* For checking the valid bit it is Ok to use either definition as the
1519 * valid bit is at the same position in both v0 and v1 Rx compl */
1520 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
1521 return NULL;
6b7c5b94 1522
2e588f84
SP
1523 rmb();
1524 be_dws_le_to_cpu(compl, sizeof(*compl));
6b7c5b94 1525
2e588f84 1526 if (adapter->be3_native)
10ef9ab4 1527 be_parse_rx_compl_v1(compl, rxcp);
2e588f84 1528 else
10ef9ab4 1529 be_parse_rx_compl_v0(compl, rxcp);
6b7c5b94 1530
15d72184
SP
1531 if (rxcp->vlanf) {
1532 /* vlanf could be wrongly set in some cards.
1533 * ignore if vtm is not set */
752961a1 1534 if ((adapter->function_mode & FLEX10_MODE) && !rxcp->vtm)
15d72184 1535 rxcp->vlanf = 0;
6b7c5b94 1536
15d72184 1537 if (!lancer_chip(adapter))
3c709f8f 1538 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
6b7c5b94 1539
939cf306 1540 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
3c709f8f 1541 !adapter->vlan_tag[rxcp->vlan_tag])
15d72184
SP
1542 rxcp->vlanf = 0;
1543 }
2e588f84
SP
1544
1545 /* As the compl has been parsed, reset it; we wont touch it again */
1546 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
6b7c5b94 1547
3abcdeda 1548 queue_tail_inc(&rxo->cq);
6b7c5b94
SP
1549 return rxcp;
1550}
1551
1829b086 1552static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
6b7c5b94 1553{
6b7c5b94 1554 u32 order = get_order(size);
1829b086 1555
6b7c5b94 1556 if (order > 0)
1829b086
ED
1557 gfp |= __GFP_COMP;
1558 return alloc_pages(gfp, order);
6b7c5b94
SP
1559}
1560
1561/*
1562 * Allocate a page, split it to fragments of size rx_frag_size and post as
1563 * receive buffers to BE
1564 */
1829b086 1565static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
6b7c5b94 1566{
3abcdeda 1567 struct be_adapter *adapter = rxo->adapter;
26d92f92 1568 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
3abcdeda 1569 struct be_queue_info *rxq = &rxo->q;
6b7c5b94
SP
1570 struct page *pagep = NULL;
1571 struct be_eth_rx_d *rxd;
1572 u64 page_dmaaddr = 0, frag_dmaaddr;
1573 u32 posted, page_offset = 0;
1574
3abcdeda 1575 page_info = &rxo->page_info_tbl[rxq->head];
6b7c5b94
SP
1576 for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1577 if (!pagep) {
1829b086 1578 pagep = be_alloc_pages(adapter->big_page_size, gfp);
6b7c5b94 1579 if (unlikely(!pagep)) {
ac124ff9 1580 rx_stats(rxo)->rx_post_fail++;
6b7c5b94
SP
1581 break;
1582 }
2b7bcebf
IV
1583 page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
1584 0, adapter->big_page_size,
1585 DMA_FROM_DEVICE);
6b7c5b94
SP
1586 page_info->page_offset = 0;
1587 } else {
1588 get_page(pagep);
1589 page_info->page_offset = page_offset + rx_frag_size;
1590 }
1591 page_offset = page_info->page_offset;
1592 page_info->page = pagep;
fac6da5b 1593 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
6b7c5b94
SP
1594 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1595
1596 rxd = queue_head_node(rxq);
1597 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1598 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
6b7c5b94
SP
1599
1600 /* Any space left in the current big page for another frag? */
1601 if ((page_offset + rx_frag_size + rx_frag_size) >
1602 adapter->big_page_size) {
1603 pagep = NULL;
1604 page_info->last_page_user = true;
1605 }
26d92f92
SP
1606
1607 prev_page_info = page_info;
1608 queue_head_inc(rxq);
10ef9ab4 1609 page_info = &rxo->page_info_tbl[rxq->head];
6b7c5b94
SP
1610 }
1611 if (pagep)
26d92f92 1612 prev_page_info->last_page_user = true;
6b7c5b94
SP
1613
1614 if (posted) {
6b7c5b94 1615 atomic_add(posted, &rxq->used);
8788fdc2 1616 be_rxq_notify(adapter, rxq->id, posted);
ea1dae11
SP
1617 } else if (atomic_read(&rxq->used) == 0) {
1618 /* Let be_worker replenish when memory is available */
3abcdeda 1619 rxo->rx_post_starved = true;
6b7c5b94 1620 }
6b7c5b94
SP
1621}
1622
5fb379ee 1623static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
6b7c5b94 1624{
6b7c5b94
SP
1625 struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1626
1627 if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1628 return NULL;
1629
f3eb62d2 1630 rmb();
6b7c5b94
SP
1631 be_dws_le_to_cpu(txcp, sizeof(*txcp));
1632
1633 txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1634
1635 queue_tail_inc(tx_cq);
1636 return txcp;
1637}
1638
3c8def97
SP
1639static u16 be_tx_compl_process(struct be_adapter *adapter,
1640 struct be_tx_obj *txo, u16 last_index)
6b7c5b94 1641{
3c8def97 1642 struct be_queue_info *txq = &txo->q;
a73b796e 1643 struct be_eth_wrb *wrb;
3c8def97 1644 struct sk_buff **sent_skbs = txo->sent_skb_list;
6b7c5b94 1645 struct sk_buff *sent_skb;
ec43b1a6
SP
1646 u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1647 bool unmap_skb_hdr = true;
6b7c5b94 1648
ec43b1a6 1649 sent_skb = sent_skbs[txq->tail];
6b7c5b94 1650 BUG_ON(!sent_skb);
ec43b1a6
SP
1651 sent_skbs[txq->tail] = NULL;
1652
1653 /* skip header wrb */
a73b796e 1654 queue_tail_inc(txq);
6b7c5b94 1655
ec43b1a6 1656 do {
6b7c5b94 1657 cur_index = txq->tail;
a73b796e 1658 wrb = queue_tail_node(txq);
2b7bcebf
IV
1659 unmap_tx_frag(&adapter->pdev->dev, wrb,
1660 (unmap_skb_hdr && skb_headlen(sent_skb)));
ec43b1a6
SP
1661 unmap_skb_hdr = false;
1662
6b7c5b94
SP
1663 num_wrbs++;
1664 queue_tail_inc(txq);
ec43b1a6 1665 } while (cur_index != last_index);
6b7c5b94 1666
6b7c5b94 1667 kfree_skb(sent_skb);
4d586b82 1668 return num_wrbs;
6b7c5b94
SP
1669}
1670
10ef9ab4
SP
1671/* Return the number of events in the event queue */
1672static inline int events_get(struct be_eq_obj *eqo)
859b1e4e 1673{
10ef9ab4
SP
1674 struct be_eq_entry *eqe;
1675 int num = 0;
859b1e4e 1676
10ef9ab4
SP
1677 do {
1678 eqe = queue_tail_node(&eqo->q);
1679 if (eqe->evt == 0)
1680 break;
859b1e4e 1681
10ef9ab4
SP
1682 rmb();
1683 eqe->evt = 0;
1684 num++;
1685 queue_tail_inc(&eqo->q);
1686 } while (true);
1687
1688 return num;
859b1e4e
SP
1689}
1690
10ef9ab4
SP
1691/* Leaves the EQ is disarmed state */
1692static void be_eq_clean(struct be_eq_obj *eqo)
859b1e4e 1693{
10ef9ab4 1694 int num = events_get(eqo);
859b1e4e 1695
10ef9ab4 1696 be_eq_notify(eqo->adapter, eqo->q.id, false, true, num);
859b1e4e
SP
1697}
1698
10ef9ab4 1699static void be_rx_cq_clean(struct be_rx_obj *rxo)
6b7c5b94
SP
1700{
1701 struct be_rx_page_info *page_info;
3abcdeda
SP
1702 struct be_queue_info *rxq = &rxo->q;
1703 struct be_queue_info *rx_cq = &rxo->cq;
2e588f84 1704 struct be_rx_compl_info *rxcp;
d23e946c
SP
1705 struct be_adapter *adapter = rxo->adapter;
1706 int flush_wait = 0;
6b7c5b94
SP
1707 u16 tail;
1708
d23e946c
SP
1709 /* Consume pending rx completions.
1710 * Wait for the flush completion (identified by zero num_rcvd)
1711 * to arrive. Notify CQ even when there are no more CQ entries
1712 * for HW to flush partially coalesced CQ entries.
1713 * In Lancer, there is no need to wait for flush compl.
1714 */
1715 for (;;) {
1716 rxcp = be_rx_compl_get(rxo);
1717 if (rxcp == NULL) {
1718 if (lancer_chip(adapter))
1719 break;
1720
1721 if (flush_wait++ > 10 || be_hw_error(adapter)) {
1722 dev_warn(&adapter->pdev->dev,
1723 "did not receive flush compl\n");
1724 break;
1725 }
1726 be_cq_notify(adapter, rx_cq->id, true, 0);
1727 mdelay(1);
1728 } else {
1729 be_rx_compl_discard(rxo, rxcp);
1730 be_cq_notify(adapter, rx_cq->id, true, 1);
1731 if (rxcp->num_rcvd == 0)
1732 break;
1733 }
6b7c5b94
SP
1734 }
1735
d23e946c
SP
1736 /* After cleanup, leave the CQ in unarmed state */
1737 be_cq_notify(adapter, rx_cq->id, false, 0);
1738
1739 /* Then free posted rx buffers that were not used */
6b7c5b94 1740 tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
cdab23b7 1741 for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
10ef9ab4 1742 page_info = get_rx_page_info(rxo, tail);
6b7c5b94
SP
1743 put_page(page_info->page);
1744 memset(page_info, 0, sizeof(*page_info));
1745 }
1746 BUG_ON(atomic_read(&rxq->used));
482c9e79 1747 rxq->tail = rxq->head = 0;
6b7c5b94
SP
1748}
1749
0ae57bb3 1750static void be_tx_compl_clean(struct be_adapter *adapter)
6b7c5b94 1751{
0ae57bb3
SP
1752 struct be_tx_obj *txo;
1753 struct be_queue_info *txq;
a8e9179a 1754 struct be_eth_tx_compl *txcp;
4d586b82 1755 u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
b03388d6
SP
1756 struct sk_buff *sent_skb;
1757 bool dummy_wrb;
0ae57bb3 1758 int i, pending_txqs;
a8e9179a
SP
1759
1760 /* Wait for a max of 200ms for all the tx-completions to arrive. */
1761 do {
0ae57bb3
SP
1762 pending_txqs = adapter->num_tx_qs;
1763
1764 for_all_tx_queues(adapter, txo, i) {
1765 txq = &txo->q;
1766 while ((txcp = be_tx_compl_get(&txo->cq))) {
1767 end_idx =
1768 AMAP_GET_BITS(struct amap_eth_tx_compl,
1769 wrb_index, txcp);
1770 num_wrbs += be_tx_compl_process(adapter, txo,
1771 end_idx);
1772 cmpl++;
1773 }
1774 if (cmpl) {
1775 be_cq_notify(adapter, txo->cq.id, false, cmpl);
1776 atomic_sub(num_wrbs, &txq->used);
1777 cmpl = 0;
1778 num_wrbs = 0;
1779 }
1780 if (atomic_read(&txq->used) == 0)
1781 pending_txqs--;
a8e9179a
SP
1782 }
1783
0ae57bb3 1784 if (pending_txqs == 0 || ++timeo > 200)
a8e9179a
SP
1785 break;
1786
1787 mdelay(1);
1788 } while (true);
1789
0ae57bb3
SP
1790 for_all_tx_queues(adapter, txo, i) {
1791 txq = &txo->q;
1792 if (atomic_read(&txq->used))
1793 dev_err(&adapter->pdev->dev, "%d pending tx-compls\n",
1794 atomic_read(&txq->used));
1795
1796 /* free posted tx for which compls will never arrive */
1797 while (atomic_read(&txq->used)) {
1798 sent_skb = txo->sent_skb_list[txq->tail];
1799 end_idx = txq->tail;
1800 num_wrbs = wrb_cnt_for_skb(adapter, sent_skb,
1801 &dummy_wrb);
1802 index_adv(&end_idx, num_wrbs - 1, txq->len);
1803 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
1804 atomic_sub(num_wrbs, &txq->used);
1805 }
b03388d6 1806 }
6b7c5b94
SP
1807}
1808
10ef9ab4
SP
1809static void be_evt_queues_destroy(struct be_adapter *adapter)
1810{
1811 struct be_eq_obj *eqo;
1812 int i;
1813
1814 for_all_evt_queues(adapter, eqo, i) {
19d59aa7
PR
1815 if (eqo->q.created) {
1816 be_eq_clean(eqo);
10ef9ab4 1817 be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
19d59aa7 1818 }
10ef9ab4
SP
1819 be_queue_free(adapter, &eqo->q);
1820 }
1821}
1822
1823static int be_evt_queues_create(struct be_adapter *adapter)
1824{
1825 struct be_queue_info *eq;
1826 struct be_eq_obj *eqo;
1827 int i, rc;
1828
1829 adapter->num_evt_qs = num_irqs(adapter);
1830
1831 for_all_evt_queues(adapter, eqo, i) {
1832 eqo->adapter = adapter;
1833 eqo->tx_budget = BE_TX_BUDGET;
1834 eqo->idx = i;
1835 eqo->max_eqd = BE_MAX_EQD;
1836 eqo->enable_aic = true;
1837
1838 eq = &eqo->q;
1839 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1840 sizeof(struct be_eq_entry));
1841 if (rc)
1842 return rc;
1843
1844 rc = be_cmd_eq_create(adapter, eq, eqo->cur_eqd);
1845 if (rc)
1846 return rc;
1847 }
1cfafab9 1848 return 0;
10ef9ab4
SP
1849}
1850
5fb379ee
SP
1851static void be_mcc_queues_destroy(struct be_adapter *adapter)
1852{
1853 struct be_queue_info *q;
5fb379ee 1854
8788fdc2 1855 q = &adapter->mcc_obj.q;
5fb379ee 1856 if (q->created)
8788fdc2 1857 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
5fb379ee
SP
1858 be_queue_free(adapter, q);
1859
8788fdc2 1860 q = &adapter->mcc_obj.cq;
5fb379ee 1861 if (q->created)
8788fdc2 1862 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
5fb379ee
SP
1863 be_queue_free(adapter, q);
1864}
1865
1866/* Must be called only after TX qs are created as MCC shares TX EQ */
1867static int be_mcc_queues_create(struct be_adapter *adapter)
1868{
1869 struct be_queue_info *q, *cq;
5fb379ee 1870
8788fdc2 1871 cq = &adapter->mcc_obj.cq;
5fb379ee 1872 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
efd2e40a 1873 sizeof(struct be_mcc_compl)))
5fb379ee
SP
1874 goto err;
1875
10ef9ab4
SP
1876 /* Use the default EQ for MCC completions */
1877 if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0))
5fb379ee
SP
1878 goto mcc_cq_free;
1879
8788fdc2 1880 q = &adapter->mcc_obj.q;
5fb379ee
SP
1881 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
1882 goto mcc_cq_destroy;
1883
8788fdc2 1884 if (be_cmd_mccq_create(adapter, q, cq))
5fb379ee
SP
1885 goto mcc_q_free;
1886
1887 return 0;
1888
1889mcc_q_free:
1890 be_queue_free(adapter, q);
1891mcc_cq_destroy:
8788fdc2 1892 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
5fb379ee
SP
1893mcc_cq_free:
1894 be_queue_free(adapter, cq);
1895err:
1896 return -1;
1897}
1898
6b7c5b94
SP
1899static void be_tx_queues_destroy(struct be_adapter *adapter)
1900{
1901 struct be_queue_info *q;
3c8def97
SP
1902 struct be_tx_obj *txo;
1903 u8 i;
6b7c5b94 1904
3c8def97
SP
1905 for_all_tx_queues(adapter, txo, i) {
1906 q = &txo->q;
1907 if (q->created)
1908 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
1909 be_queue_free(adapter, q);
6b7c5b94 1910
3c8def97
SP
1911 q = &txo->cq;
1912 if (q->created)
1913 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1914 be_queue_free(adapter, q);
1915 }
6b7c5b94
SP
1916}
1917
dafc0fe3
SP
1918static int be_num_txqs_want(struct be_adapter *adapter)
1919{
abb93951
PR
1920 if ((!lancer_chip(adapter) && sriov_want(adapter)) ||
1921 be_is_mc(adapter) ||
1922 (!lancer_chip(adapter) && !be_physfn(adapter)) ||
ca34fe38 1923 BE2_chip(adapter))
dafc0fe3
SP
1924 return 1;
1925 else
abb93951 1926 return adapter->max_tx_queues;
dafc0fe3
SP
1927}
1928
10ef9ab4 1929static int be_tx_cqs_create(struct be_adapter *adapter)
6b7c5b94 1930{
10ef9ab4
SP
1931 struct be_queue_info *cq, *eq;
1932 int status;
3c8def97
SP
1933 struct be_tx_obj *txo;
1934 u8 i;
6b7c5b94 1935
dafc0fe3 1936 adapter->num_tx_qs = be_num_txqs_want(adapter);
3bb62f4f
PR
1937 if (adapter->num_tx_qs != MAX_TX_QS) {
1938 rtnl_lock();
dafc0fe3
SP
1939 netif_set_real_num_tx_queues(adapter->netdev,
1940 adapter->num_tx_qs);
3bb62f4f
PR
1941 rtnl_unlock();
1942 }
dafc0fe3 1943
10ef9ab4
SP
1944 for_all_tx_queues(adapter, txo, i) {
1945 cq = &txo->cq;
1946 status = be_queue_alloc(adapter, cq, TX_CQ_LEN,
1947 sizeof(struct be_eth_tx_compl));
1948 if (status)
1949 return status;
3c8def97 1950
10ef9ab4
SP
1951 /* If num_evt_qs is less than num_tx_qs, then more than
1952 * one txq share an eq
1953 */
1954 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
1955 status = be_cmd_cq_create(adapter, cq, eq, false, 3);
1956 if (status)
1957 return status;
1958 }
1959 return 0;
1960}
6b7c5b94 1961
10ef9ab4
SP
1962static int be_tx_qs_create(struct be_adapter *adapter)
1963{
1964 struct be_tx_obj *txo;
1965 int i, status;
fe6d2a38 1966
3c8def97 1967 for_all_tx_queues(adapter, txo, i) {
10ef9ab4
SP
1968 status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
1969 sizeof(struct be_eth_wrb));
1970 if (status)
1971 return status;
6b7c5b94 1972
10ef9ab4
SP
1973 status = be_cmd_txq_create(adapter, &txo->q, &txo->cq);
1974 if (status)
1975 return status;
3c8def97 1976 }
6b7c5b94 1977
d379142b
SP
1978 dev_info(&adapter->pdev->dev, "created %d TX queue(s)\n",
1979 adapter->num_tx_qs);
10ef9ab4 1980 return 0;
6b7c5b94
SP
1981}
1982
10ef9ab4 1983static void be_rx_cqs_destroy(struct be_adapter *adapter)
6b7c5b94
SP
1984{
1985 struct be_queue_info *q;
3abcdeda
SP
1986 struct be_rx_obj *rxo;
1987 int i;
1988
1989 for_all_rx_queues(adapter, rxo, i) {
3abcdeda
SP
1990 q = &rxo->cq;
1991 if (q->created)
1992 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1993 be_queue_free(adapter, q);
ac6a0c4a
SP
1994 }
1995}
1996
10ef9ab4 1997static int be_rx_cqs_create(struct be_adapter *adapter)
6b7c5b94 1998{
10ef9ab4 1999 struct be_queue_info *eq, *cq;
3abcdeda
SP
2000 struct be_rx_obj *rxo;
2001 int rc, i;
6b7c5b94 2002
10ef9ab4
SP
2003 /* We'll create as many RSS rings as there are irqs.
2004 * But when there's only one irq there's no use creating RSS rings
2005 */
2006 adapter->num_rx_qs = (num_irqs(adapter) > 1) ?
2007 num_irqs(adapter) + 1 : 1;
7f640062
SP
2008 if (adapter->num_rx_qs != MAX_RX_QS) {
2009 rtnl_lock();
2010 netif_set_real_num_rx_queues(adapter->netdev,
2011 adapter->num_rx_qs);
2012 rtnl_unlock();
2013 }
ac6a0c4a 2014
6b7c5b94 2015 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
3abcdeda
SP
2016 for_all_rx_queues(adapter, rxo, i) {
2017 rxo->adapter = adapter;
3abcdeda
SP
2018 cq = &rxo->cq;
2019 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
2020 sizeof(struct be_eth_rx_compl));
2021 if (rc)
10ef9ab4 2022 return rc;
3abcdeda 2023
10ef9ab4
SP
2024 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2025 rc = be_cmd_cq_create(adapter, cq, eq, false, 3);
3abcdeda 2026 if (rc)
10ef9ab4 2027 return rc;
3abcdeda 2028 }
6b7c5b94 2029
d379142b
SP
2030 dev_info(&adapter->pdev->dev,
2031 "created %d RSS queue(s) and 1 default RX queue\n",
2032 adapter->num_rx_qs - 1);
10ef9ab4 2033 return 0;
b628bde2
SP
2034}
2035
6b7c5b94
SP
2036static irqreturn_t be_intx(int irq, void *dev)
2037{
e49cc34f
SP
2038 struct be_eq_obj *eqo = dev;
2039 struct be_adapter *adapter = eqo->adapter;
2040 int num_evts = 0;
6b7c5b94 2041
d0b9cec3
SP
2042 /* IRQ is not expected when NAPI is scheduled as the EQ
2043 * will not be armed.
2044 * But, this can happen on Lancer INTx where it takes
2045 * a while to de-assert INTx or in BE2 where occasionaly
2046 * an interrupt may be raised even when EQ is unarmed.
2047 * If NAPI is already scheduled, then counting & notifying
2048 * events will orphan them.
e49cc34f 2049 */
d0b9cec3 2050 if (napi_schedule_prep(&eqo->napi)) {
e49cc34f 2051 num_evts = events_get(eqo);
d0b9cec3
SP
2052 __napi_schedule(&eqo->napi);
2053 if (num_evts)
2054 eqo->spurious_intr = 0;
2055 }
2056 be_eq_notify(adapter, eqo->q.id, false, true, num_evts);
e49cc34f 2057
d0b9cec3
SP
2058 /* Return IRQ_HANDLED only for the the first spurious intr
2059 * after a valid intr to stop the kernel from branding
2060 * this irq as a bad one!
e49cc34f 2061 */
d0b9cec3
SP
2062 if (num_evts || eqo->spurious_intr++ == 0)
2063 return IRQ_HANDLED;
2064 else
2065 return IRQ_NONE;
6b7c5b94
SP
2066}
2067
10ef9ab4 2068static irqreturn_t be_msix(int irq, void *dev)
6b7c5b94 2069{
10ef9ab4 2070 struct be_eq_obj *eqo = dev;
6b7c5b94 2071
0b545a62
SP
2072 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
2073 napi_schedule(&eqo->napi);
6b7c5b94
SP
2074 return IRQ_HANDLED;
2075}
2076
2e588f84 2077static inline bool do_gro(struct be_rx_compl_info *rxcp)
6b7c5b94 2078{
2e588f84 2079 return (rxcp->tcpf && !rxcp->err) ? true : false;
6b7c5b94
SP
2080}
2081
10ef9ab4
SP
2082static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
2083 int budget)
6b7c5b94 2084{
3abcdeda
SP
2085 struct be_adapter *adapter = rxo->adapter;
2086 struct be_queue_info *rx_cq = &rxo->cq;
2e588f84 2087 struct be_rx_compl_info *rxcp;
6b7c5b94
SP
2088 u32 work_done;
2089
2090 for (work_done = 0; work_done < budget; work_done++) {
3abcdeda 2091 rxcp = be_rx_compl_get(rxo);
6b7c5b94
SP
2092 if (!rxcp)
2093 break;
2094
12004ae9
SP
2095 /* Is it a flush compl that has no data */
2096 if (unlikely(rxcp->num_rcvd == 0))
2097 goto loop_continue;
2098
2099 /* Discard compl with partial DMA Lancer B0 */
2100 if (unlikely(!rxcp->pkt_size)) {
10ef9ab4 2101 be_rx_compl_discard(rxo, rxcp);
12004ae9
SP
2102 goto loop_continue;
2103 }
2104
2105 /* On BE drop pkts that arrive due to imperfect filtering in
2106 * promiscuous mode on some skews
2107 */
2108 if (unlikely(rxcp->port != adapter->port_num &&
2109 !lancer_chip(adapter))) {
10ef9ab4 2110 be_rx_compl_discard(rxo, rxcp);
12004ae9 2111 goto loop_continue;
64642811 2112 }
009dd872 2113
12004ae9 2114 if (do_gro(rxcp))
10ef9ab4 2115 be_rx_compl_process_gro(rxo, napi, rxcp);
12004ae9 2116 else
10ef9ab4 2117 be_rx_compl_process(rxo, rxcp);
12004ae9 2118loop_continue:
2e588f84 2119 be_rx_stats_update(rxo, rxcp);
6b7c5b94
SP
2120 }
2121
10ef9ab4
SP
2122 if (work_done) {
2123 be_cq_notify(adapter, rx_cq->id, true, work_done);
9372cacb 2124
10ef9ab4
SP
2125 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
2126 be_post_rx_frags(rxo, GFP_ATOMIC);
6b7c5b94 2127 }
10ef9ab4 2128
6b7c5b94
SP
2129 return work_done;
2130}
2131
10ef9ab4
SP
2132static bool be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
2133 int budget, int idx)
6b7c5b94 2134{
6b7c5b94 2135 struct be_eth_tx_compl *txcp;
10ef9ab4 2136 int num_wrbs = 0, work_done;
3c8def97 2137
10ef9ab4
SP
2138 for (work_done = 0; work_done < budget; work_done++) {
2139 txcp = be_tx_compl_get(&txo->cq);
2140 if (!txcp)
2141 break;
2142 num_wrbs += be_tx_compl_process(adapter, txo,
3c8def97
SP
2143 AMAP_GET_BITS(struct amap_eth_tx_compl,
2144 wrb_index, txcp));
10ef9ab4 2145 }
6b7c5b94 2146
10ef9ab4
SP
2147 if (work_done) {
2148 be_cq_notify(adapter, txo->cq.id, true, work_done);
2149 atomic_sub(num_wrbs, &txo->q.used);
3c8def97 2150
10ef9ab4
SP
2151 /* As Tx wrbs have been freed up, wake up netdev queue
2152 * if it was stopped due to lack of tx wrbs. */
2153 if (__netif_subqueue_stopped(adapter->netdev, idx) &&
2154 atomic_read(&txo->q.used) < txo->q.len / 2) {
2155 netif_wake_subqueue(adapter->netdev, idx);
3c8def97 2156 }
10ef9ab4
SP
2157
2158 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
2159 tx_stats(txo)->tx_compl += work_done;
2160 u64_stats_update_end(&tx_stats(txo)->sync_compl);
6b7c5b94 2161 }
10ef9ab4
SP
2162 return (work_done < budget); /* Done */
2163}
6b7c5b94 2164
10ef9ab4
SP
2165int be_poll(struct napi_struct *napi, int budget)
2166{
2167 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2168 struct be_adapter *adapter = eqo->adapter;
0b545a62 2169 int max_work = 0, work, i, num_evts;
10ef9ab4 2170 bool tx_done;
f31e50a8 2171
0b545a62
SP
2172 num_evts = events_get(eqo);
2173
10ef9ab4
SP
2174 /* Process all TXQs serviced by this EQ */
2175 for (i = eqo->idx; i < adapter->num_tx_qs; i += adapter->num_evt_qs) {
2176 tx_done = be_process_tx(adapter, &adapter->tx_obj[i],
2177 eqo->tx_budget, i);
2178 if (!tx_done)
2179 max_work = budget;
f31e50a8
SP
2180 }
2181
10ef9ab4
SP
2182 /* This loop will iterate twice for EQ0 in which
2183 * completions of the last RXQ (default one) are also processed
2184 * For other EQs the loop iterates only once
2185 */
2186 for (i = eqo->idx; i < adapter->num_rx_qs; i += adapter->num_evt_qs) {
2187 work = be_process_rx(&adapter->rx_obj[i], napi, budget);
2188 max_work = max(work, max_work);
2189 }
6b7c5b94 2190
10ef9ab4
SP
2191 if (is_mcc_eqo(eqo))
2192 be_process_mcc(adapter);
93c86700 2193
10ef9ab4
SP
2194 if (max_work < budget) {
2195 napi_complete(napi);
0b545a62 2196 be_eq_notify(adapter, eqo->q.id, true, false, num_evts);
10ef9ab4
SP
2197 } else {
2198 /* As we'll continue in polling mode, count and clear events */
0b545a62 2199 be_eq_notify(adapter, eqo->q.id, false, false, num_evts);
93c86700 2200 }
10ef9ab4 2201 return max_work;
6b7c5b94
SP
2202}
2203
f67ef7ba 2204void be_detect_error(struct be_adapter *adapter)
7c185276 2205{
e1cfb67a
PR
2206 u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
2207 u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
7c185276
AK
2208 u32 i;
2209
d23e946c 2210 if (be_hw_error(adapter))
72f02485
SP
2211 return;
2212
e1cfb67a
PR
2213 if (lancer_chip(adapter)) {
2214 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2215 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2216 sliport_err1 = ioread32(adapter->db +
2217 SLIPORT_ERROR1_OFFSET);
2218 sliport_err2 = ioread32(adapter->db +
2219 SLIPORT_ERROR2_OFFSET);
2220 }
2221 } else {
2222 pci_read_config_dword(adapter->pdev,
2223 PCICFG_UE_STATUS_LOW, &ue_lo);
2224 pci_read_config_dword(adapter->pdev,
2225 PCICFG_UE_STATUS_HIGH, &ue_hi);
2226 pci_read_config_dword(adapter->pdev,
2227 PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask);
2228 pci_read_config_dword(adapter->pdev,
2229 PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
2230
f67ef7ba
PR
2231 ue_lo = (ue_lo & ~ue_lo_mask);
2232 ue_hi = (ue_hi & ~ue_hi_mask);
e1cfb67a 2233 }
7c185276 2234
1451ae6e
AK
2235 /* On certain platforms BE hardware can indicate spurious UEs.
2236 * Allow the h/w to stop working completely in case of a real UE.
2237 * Hence not setting the hw_error for UE detection.
2238 */
2239 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
f67ef7ba 2240 adapter->hw_error = true;
434b3648 2241 dev_err(&adapter->pdev->dev,
f67ef7ba
PR
2242 "Error detected in the card\n");
2243 }
2244
2245 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2246 dev_err(&adapter->pdev->dev,
2247 "ERR: sliport status 0x%x\n", sliport_status);
2248 dev_err(&adapter->pdev->dev,
2249 "ERR: sliport error1 0x%x\n", sliport_err1);
2250 dev_err(&adapter->pdev->dev,
2251 "ERR: sliport error2 0x%x\n", sliport_err2);
d053de91
AK
2252 }
2253
e1cfb67a
PR
2254 if (ue_lo) {
2255 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
2256 if (ue_lo & 1)
7c185276
AK
2257 dev_err(&adapter->pdev->dev,
2258 "UE: %s bit set\n", ue_status_low_desc[i]);
2259 }
2260 }
f67ef7ba 2261
e1cfb67a
PR
2262 if (ue_hi) {
2263 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
2264 if (ue_hi & 1)
7c185276
AK
2265 dev_err(&adapter->pdev->dev,
2266 "UE: %s bit set\n", ue_status_hi_desc[i]);
2267 }
2268 }
2269
2270}
2271
8d56ff11
SP
2272static void be_msix_disable(struct be_adapter *adapter)
2273{
ac6a0c4a 2274 if (msix_enabled(adapter)) {
8d56ff11 2275 pci_disable_msix(adapter->pdev);
ac6a0c4a 2276 adapter->num_msix_vec = 0;
3abcdeda
SP
2277 }
2278}
2279
10ef9ab4
SP
2280static uint be_num_rss_want(struct be_adapter *adapter)
2281{
30e80b55 2282 u32 num = 0;
abb93951 2283
10ef9ab4 2284 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
abb93951
PR
2285 (lancer_chip(adapter) ||
2286 (!sriov_want(adapter) && be_physfn(adapter)))) {
2287 num = adapter->max_rss_queues;
30e80b55
YM
2288 num = min_t(u32, num, (u32)netif_get_num_default_rss_queues());
2289 }
2290 return num;
10ef9ab4
SP
2291}
2292
6b7c5b94
SP
2293static void be_msix_enable(struct be_adapter *adapter)
2294{
10ef9ab4 2295#define BE_MIN_MSIX_VECTORS 1
045508a8 2296 int i, status, num_vec, num_roce_vec = 0;
d379142b 2297 struct device *dev = &adapter->pdev->dev;
6b7c5b94 2298
10ef9ab4
SP
2299 /* If RSS queues are not used, need a vec for default RX Q */
2300 num_vec = min(be_num_rss_want(adapter), num_online_cpus());
045508a8
PP
2301 if (be_roce_supported(adapter)) {
2302 num_roce_vec = min_t(u32, MAX_ROCE_MSIX_VECTORS,
2303 (num_online_cpus() + 1));
2304 num_roce_vec = min(num_roce_vec, MAX_ROCE_EQS);
2305 num_vec += num_roce_vec;
2306 num_vec = min(num_vec, MAX_MSIX_VECTORS);
2307 }
10ef9ab4 2308 num_vec = max(num_vec, BE_MIN_MSIX_VECTORS);
3abcdeda 2309
ac6a0c4a 2310 for (i = 0; i < num_vec; i++)
6b7c5b94
SP
2311 adapter->msix_entries[i].entry = i;
2312
ac6a0c4a 2313 status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
3abcdeda
SP
2314 if (status == 0) {
2315 goto done;
2316 } else if (status >= BE_MIN_MSIX_VECTORS) {
ac6a0c4a 2317 num_vec = status;
3abcdeda 2318 if (pci_enable_msix(adapter->pdev, adapter->msix_entries,
ac6a0c4a 2319 num_vec) == 0)
3abcdeda 2320 goto done;
3abcdeda 2321 }
d379142b
SP
2322
2323 dev_warn(dev, "MSIx enable failed\n");
3abcdeda
SP
2324 return;
2325done:
045508a8
PP
2326 if (be_roce_supported(adapter)) {
2327 if (num_vec > num_roce_vec) {
2328 adapter->num_msix_vec = num_vec - num_roce_vec;
2329 adapter->num_msix_roce_vec =
2330 num_vec - adapter->num_msix_vec;
2331 } else {
2332 adapter->num_msix_vec = num_vec;
2333 adapter->num_msix_roce_vec = 0;
2334 }
2335 } else
2336 adapter->num_msix_vec = num_vec;
d379142b 2337 dev_info(dev, "enabled %d MSI-x vector(s)\n", adapter->num_msix_vec);
ac6a0c4a 2338 return;
6b7c5b94
SP
2339}
2340
fe6d2a38 2341static inline int be_msix_vec_get(struct be_adapter *adapter,
10ef9ab4 2342 struct be_eq_obj *eqo)
b628bde2 2343{
10ef9ab4 2344 return adapter->msix_entries[eqo->idx].vector;
b628bde2 2345}
6b7c5b94 2346
b628bde2
SP
2347static int be_msix_register(struct be_adapter *adapter)
2348{
10ef9ab4
SP
2349 struct net_device *netdev = adapter->netdev;
2350 struct be_eq_obj *eqo;
2351 int status, i, vec;
6b7c5b94 2352
10ef9ab4
SP
2353 for_all_evt_queues(adapter, eqo, i) {
2354 sprintf(eqo->desc, "%s-q%d", netdev->name, i);
2355 vec = be_msix_vec_get(adapter, eqo);
2356 status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
3abcdeda
SP
2357 if (status)
2358 goto err_msix;
2359 }
b628bde2 2360
6b7c5b94 2361 return 0;
3abcdeda 2362err_msix:
10ef9ab4
SP
2363 for (i--, eqo = &adapter->eq_obj[i]; i >= 0; i--, eqo--)
2364 free_irq(be_msix_vec_get(adapter, eqo), eqo);
2365 dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
2366 status);
ac6a0c4a 2367 be_msix_disable(adapter);
6b7c5b94
SP
2368 return status;
2369}
2370
2371static int be_irq_register(struct be_adapter *adapter)
2372{
2373 struct net_device *netdev = adapter->netdev;
2374 int status;
2375
ac6a0c4a 2376 if (msix_enabled(adapter)) {
6b7c5b94
SP
2377 status = be_msix_register(adapter);
2378 if (status == 0)
2379 goto done;
ba343c77
SB
2380 /* INTx is not supported for VF */
2381 if (!be_physfn(adapter))
2382 return status;
6b7c5b94
SP
2383 }
2384
e49cc34f 2385 /* INTx: only the first EQ is used */
6b7c5b94
SP
2386 netdev->irq = adapter->pdev->irq;
2387 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
e49cc34f 2388 &adapter->eq_obj[0]);
6b7c5b94
SP
2389 if (status) {
2390 dev_err(&adapter->pdev->dev,
2391 "INTx request IRQ failed - err %d\n", status);
2392 return status;
2393 }
2394done:
2395 adapter->isr_registered = true;
2396 return 0;
2397}
2398
2399static void be_irq_unregister(struct be_adapter *adapter)
2400{
2401 struct net_device *netdev = adapter->netdev;
10ef9ab4 2402 struct be_eq_obj *eqo;
3abcdeda 2403 int i;
6b7c5b94
SP
2404
2405 if (!adapter->isr_registered)
2406 return;
2407
2408 /* INTx */
ac6a0c4a 2409 if (!msix_enabled(adapter)) {
e49cc34f 2410 free_irq(netdev->irq, &adapter->eq_obj[0]);
6b7c5b94
SP
2411 goto done;
2412 }
2413
2414 /* MSIx */
10ef9ab4
SP
2415 for_all_evt_queues(adapter, eqo, i)
2416 free_irq(be_msix_vec_get(adapter, eqo), eqo);
3abcdeda 2417
6b7c5b94
SP
2418done:
2419 adapter->isr_registered = false;
6b7c5b94
SP
2420}
2421
10ef9ab4 2422static void be_rx_qs_destroy(struct be_adapter *adapter)
482c9e79
SP
2423{
2424 struct be_queue_info *q;
2425 struct be_rx_obj *rxo;
2426 int i;
2427
2428 for_all_rx_queues(adapter, rxo, i) {
2429 q = &rxo->q;
2430 if (q->created) {
2431 be_cmd_rxq_destroy(adapter, q);
2432 /* After the rxq is invalidated, wait for a grace time
2433 * of 1ms for all dma to end and the flush compl to
2434 * arrive
2435 */
2436 mdelay(1);
10ef9ab4 2437 be_rx_cq_clean(rxo);
482c9e79 2438 }
10ef9ab4 2439 be_queue_free(adapter, q);
482c9e79
SP
2440 }
2441}
2442
889cd4b2
SP
2443static int be_close(struct net_device *netdev)
2444{
2445 struct be_adapter *adapter = netdev_priv(netdev);
10ef9ab4
SP
2446 struct be_eq_obj *eqo;
2447 int i;
889cd4b2 2448
045508a8
PP
2449 be_roce_dev_close(adapter);
2450
a323d9bf 2451 for_all_evt_queues(adapter, eqo, i)
10ef9ab4 2452 napi_disable(&eqo->napi);
a323d9bf
SP
2453
2454 be_async_mcc_disable(adapter);
2455
2456 /* Wait for all pending tx completions to arrive so that
2457 * all tx skbs are freed.
2458 */
2459 be_tx_compl_clean(adapter);
2460
2461 be_rx_qs_destroy(adapter);
2462
2463 for_all_evt_queues(adapter, eqo, i) {
10ef9ab4
SP
2464 if (msix_enabled(adapter))
2465 synchronize_irq(be_msix_vec_get(adapter, eqo));
2466 else
2467 synchronize_irq(netdev->irq);
2468 be_eq_clean(eqo);
63fcb27f
PR
2469 }
2470
889cd4b2
SP
2471 be_irq_unregister(adapter);
2472
482c9e79
SP
2473 return 0;
2474}
2475
10ef9ab4 2476static int be_rx_qs_create(struct be_adapter *adapter)
482c9e79
SP
2477{
2478 struct be_rx_obj *rxo;
e9008ee9
PR
2479 int rc, i, j;
2480 u8 rsstable[128];
482c9e79
SP
2481
2482 for_all_rx_queues(adapter, rxo, i) {
10ef9ab4
SP
2483 rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
2484 sizeof(struct be_eth_rx_d));
2485 if (rc)
2486 return rc;
2487 }
2488
2489 /* The FW would like the default RXQ to be created first */
2490 rxo = default_rxo(adapter);
2491 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id, rx_frag_size,
2492 adapter->if_handle, false, &rxo->rss_id);
2493 if (rc)
2494 return rc;
2495
2496 for_all_rss_queues(adapter, rxo, i) {
482c9e79 2497 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
10ef9ab4
SP
2498 rx_frag_size, adapter->if_handle,
2499 true, &rxo->rss_id);
482c9e79
SP
2500 if (rc)
2501 return rc;
2502 }
2503
2504 if (be_multi_rxq(adapter)) {
e9008ee9
PR
2505 for (j = 0; j < 128; j += adapter->num_rx_qs - 1) {
2506 for_all_rss_queues(adapter, rxo, i) {
2507 if ((j + i) >= 128)
2508 break;
2509 rsstable[j + i] = rxo->rss_id;
2510 }
2511 }
2512 rc = be_cmd_rss_config(adapter, rsstable, 128);
482c9e79
SP
2513 if (rc)
2514 return rc;
2515 }
2516
2517 /* First time posting */
10ef9ab4 2518 for_all_rx_queues(adapter, rxo, i)
482c9e79 2519 be_post_rx_frags(rxo, GFP_KERNEL);
889cd4b2
SP
2520 return 0;
2521}
2522
6b7c5b94
SP
2523static int be_open(struct net_device *netdev)
2524{
2525 struct be_adapter *adapter = netdev_priv(netdev);
10ef9ab4 2526 struct be_eq_obj *eqo;
3abcdeda 2527 struct be_rx_obj *rxo;
10ef9ab4 2528 struct be_tx_obj *txo;
b236916a 2529 u8 link_status;
3abcdeda 2530 int status, i;
5fb379ee 2531
10ef9ab4 2532 status = be_rx_qs_create(adapter);
482c9e79
SP
2533 if (status)
2534 goto err;
2535
5fb379ee
SP
2536 be_irq_register(adapter);
2537
10ef9ab4 2538 for_all_rx_queues(adapter, rxo, i)
3abcdeda 2539 be_cq_notify(adapter, rxo->cq.id, true, 0);
5fb379ee 2540
10ef9ab4
SP
2541 for_all_tx_queues(adapter, txo, i)
2542 be_cq_notify(adapter, txo->cq.id, true, 0);
2543
7a1e9b20
SP
2544 be_async_mcc_enable(adapter);
2545
10ef9ab4
SP
2546 for_all_evt_queues(adapter, eqo, i) {
2547 napi_enable(&eqo->napi);
2548 be_eq_notify(adapter, eqo->q.id, true, false, 0);
2549 }
2550
323ff71e 2551 status = be_cmd_link_status_query(adapter, NULL, &link_status, 0);
b236916a
AK
2552 if (!status)
2553 be_link_status_update(adapter, link_status);
2554
045508a8 2555 be_roce_dev_open(adapter);
889cd4b2
SP
2556 return 0;
2557err:
2558 be_close(adapter->netdev);
2559 return -EIO;
5fb379ee
SP
2560}
2561
71d8d1b5
AK
2562static int be_setup_wol(struct be_adapter *adapter, bool enable)
2563{
2564 struct be_dma_mem cmd;
2565 int status = 0;
2566 u8 mac[ETH_ALEN];
2567
2568 memset(mac, 0, ETH_ALEN);
2569
2570 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
2b7bcebf
IV
2571 cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2572 GFP_KERNEL);
71d8d1b5
AK
2573 if (cmd.va == NULL)
2574 return -1;
2575 memset(cmd.va, 0, cmd.size);
2576
2577 if (enable) {
2578 status = pci_write_config_dword(adapter->pdev,
2579 PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2580 if (status) {
2581 dev_err(&adapter->pdev->dev,
2381a55c 2582 "Could not enable Wake-on-lan\n");
2b7bcebf
IV
2583 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2584 cmd.dma);
71d8d1b5
AK
2585 return status;
2586 }
2587 status = be_cmd_enable_magic_wol(adapter,
2588 adapter->netdev->dev_addr, &cmd);
2589 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2590 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2591 } else {
2592 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2593 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2594 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2595 }
2596
2b7bcebf 2597 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
71d8d1b5
AK
2598 return status;
2599}
2600
6d87f5c3
AK
2601/*
2602 * Generate a seed MAC address from the PF MAC Address using jhash.
2603 * MAC Address for VFs are assigned incrementally starting from the seed.
2604 * These addresses are programmed in the ASIC by the PF and the VF driver
2605 * queries for the MAC address during its probe.
2606 */
4c876616 2607static int be_vf_eth_addr_config(struct be_adapter *adapter)
6d87f5c3 2608{
f9449ab7 2609 u32 vf;
3abcdeda 2610 int status = 0;
6d87f5c3 2611 u8 mac[ETH_ALEN];
11ac75ed 2612 struct be_vf_cfg *vf_cfg;
6d87f5c3
AK
2613
2614 be_vf_eth_addr_generate(adapter, mac);
2615
11ac75ed 2616 for_all_vfs(adapter, vf_cfg, vf) {
590c391d
PR
2617 if (lancer_chip(adapter)) {
2618 status = be_cmd_set_mac_list(adapter, mac, 1, vf + 1);
2619 } else {
2620 status = be_cmd_pmac_add(adapter, mac,
11ac75ed
SP
2621 vf_cfg->if_handle,
2622 &vf_cfg->pmac_id, vf + 1);
590c391d
PR
2623 }
2624
6d87f5c3
AK
2625 if (status)
2626 dev_err(&adapter->pdev->dev,
590c391d 2627 "Mac address assignment failed for VF %d\n", vf);
6d87f5c3 2628 else
11ac75ed 2629 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
6d87f5c3
AK
2630
2631 mac[5] += 1;
2632 }
2633 return status;
2634}
2635
4c876616
SP
2636static int be_vfs_mac_query(struct be_adapter *adapter)
2637{
2638 int status, vf;
2639 u8 mac[ETH_ALEN];
2640 struct be_vf_cfg *vf_cfg;
2641 bool active;
2642
2643 for_all_vfs(adapter, vf_cfg, vf) {
2644 be_cmd_get_mac_from_list(adapter, mac, &active,
2645 &vf_cfg->pmac_id, 0);
2646
2647 status = be_cmd_mac_addr_query(adapter, mac, false,
2648 vf_cfg->if_handle, 0);
2649 if (status)
2650 return status;
2651 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
2652 }
2653 return 0;
2654}
2655
f9449ab7 2656static void be_vf_clear(struct be_adapter *adapter)
6d87f5c3 2657{
11ac75ed 2658 struct be_vf_cfg *vf_cfg;
6d87f5c3
AK
2659 u32 vf;
2660
39f1d94d 2661 if (be_find_vfs(adapter, ASSIGNED)) {
4c876616
SP
2662 dev_warn(&adapter->pdev->dev,
2663 "VFs are assigned to VMs: not disabling VFs\n");
39f1d94d
SP
2664 goto done;
2665 }
2666
11ac75ed 2667 for_all_vfs(adapter, vf_cfg, vf) {
590c391d
PR
2668 if (lancer_chip(adapter))
2669 be_cmd_set_mac_list(adapter, NULL, 0, vf + 1);
2670 else
11ac75ed
SP
2671 be_cmd_pmac_del(adapter, vf_cfg->if_handle,
2672 vf_cfg->pmac_id, vf + 1);
f9449ab7 2673
11ac75ed
SP
2674 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
2675 }
39f1d94d
SP
2676 pci_disable_sriov(adapter->pdev);
2677done:
2678 kfree(adapter->vf_cfg);
2679 adapter->num_vfs = 0;
6d87f5c3
AK
2680}
2681
a54769f5
SP
2682static int be_clear(struct be_adapter *adapter)
2683{
fbc13f01
AK
2684 int i = 1;
2685
191eb756
SP
2686 if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) {
2687 cancel_delayed_work_sync(&adapter->work);
2688 adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED;
2689 }
2690
11ac75ed 2691 if (sriov_enabled(adapter))
f9449ab7
SP
2692 be_vf_clear(adapter);
2693
fbc13f01
AK
2694 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++)
2695 be_cmd_pmac_del(adapter, adapter->if_handle,
2696 adapter->pmac_id[i], 0);
2697
f9449ab7 2698 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
a54769f5
SP
2699
2700 be_mcc_queues_destroy(adapter);
10ef9ab4 2701 be_rx_cqs_destroy(adapter);
a54769f5 2702 be_tx_queues_destroy(adapter);
10ef9ab4 2703 be_evt_queues_destroy(adapter);
a54769f5 2704
abb93951
PR
2705 kfree(adapter->pmac_id);
2706 adapter->pmac_id = NULL;
2707
10ef9ab4 2708 be_msix_disable(adapter);
a54769f5
SP
2709 return 0;
2710}
2711
4c876616 2712static int be_vfs_if_create(struct be_adapter *adapter)
abb93951 2713{
4c876616
SP
2714 struct be_vf_cfg *vf_cfg;
2715 u32 cap_flags, en_flags, vf;
abb93951
PR
2716 int status;
2717
4c876616
SP
2718 cap_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2719 BE_IF_FLAGS_MULTICAST;
abb93951 2720
4c876616
SP
2721 for_all_vfs(adapter, vf_cfg, vf) {
2722 if (!BE3_chip(adapter))
2723 be_cmd_get_profile_config(adapter, &cap_flags, vf + 1);
2724
2725 /* If a FW profile exists, then cap_flags are updated */
2726 en_flags = cap_flags & (BE_IF_FLAGS_UNTAGGED |
2727 BE_IF_FLAGS_BROADCAST | BE_IF_FLAGS_MULTICAST);
2728 status = be_cmd_if_create(adapter, cap_flags, en_flags,
2729 &vf_cfg->if_handle, vf + 1);
2730 if (status)
2731 goto err;
2732 }
2733err:
2734 return status;
abb93951
PR
2735}
2736
39f1d94d 2737static int be_vf_setup_init(struct be_adapter *adapter)
30128031 2738{
11ac75ed 2739 struct be_vf_cfg *vf_cfg;
30128031
SP
2740 int vf;
2741
39f1d94d
SP
2742 adapter->vf_cfg = kcalloc(adapter->num_vfs, sizeof(*vf_cfg),
2743 GFP_KERNEL);
2744 if (!adapter->vf_cfg)
2745 return -ENOMEM;
2746
11ac75ed
SP
2747 for_all_vfs(adapter, vf_cfg, vf) {
2748 vf_cfg->if_handle = -1;
2749 vf_cfg->pmac_id = -1;
30128031 2750 }
39f1d94d 2751 return 0;
30128031
SP
2752}
2753
f9449ab7
SP
2754static int be_vf_setup(struct be_adapter *adapter)
2755{
11ac75ed 2756 struct be_vf_cfg *vf_cfg;
f1f3ee1b 2757 u16 def_vlan, lnk_speed;
4c876616
SP
2758 int status, old_vfs, vf;
2759 struct device *dev = &adapter->pdev->dev;
39f1d94d 2760
4c876616
SP
2761 old_vfs = be_find_vfs(adapter, ENABLED);
2762 if (old_vfs) {
2763 dev_info(dev, "%d VFs are already enabled\n", old_vfs);
2764 if (old_vfs != num_vfs)
2765 dev_warn(dev, "Ignoring num_vfs=%d setting\n", num_vfs);
2766 adapter->num_vfs = old_vfs;
39f1d94d 2767 } else {
4c876616
SP
2768 if (num_vfs > adapter->dev_num_vfs)
2769 dev_info(dev, "Device supports %d VFs and not %d\n",
2770 adapter->dev_num_vfs, num_vfs);
2771 adapter->num_vfs = min_t(u16, num_vfs, adapter->dev_num_vfs);
2772
2773 status = pci_enable_sriov(adapter->pdev, num_vfs);
2774 if (status) {
2775 dev_err(dev, "SRIOV enable failed\n");
2776 adapter->num_vfs = 0;
2777 return 0;
2778 }
39f1d94d
SP
2779 }
2780
2781 status = be_vf_setup_init(adapter);
2782 if (status)
2783 goto err;
30128031 2784
4c876616
SP
2785 if (old_vfs) {
2786 for_all_vfs(adapter, vf_cfg, vf) {
2787 status = be_cmd_get_if_id(adapter, vf_cfg, vf);
2788 if (status)
2789 goto err;
2790 }
2791 } else {
2792 status = be_vfs_if_create(adapter);
f9449ab7
SP
2793 if (status)
2794 goto err;
f9449ab7
SP
2795 }
2796
4c876616
SP
2797 if (old_vfs) {
2798 status = be_vfs_mac_query(adapter);
2799 if (status)
2800 goto err;
2801 } else {
39f1d94d
SP
2802 status = be_vf_eth_addr_config(adapter);
2803 if (status)
2804 goto err;
2805 }
f9449ab7 2806
11ac75ed 2807 for_all_vfs(adapter, vf_cfg, vf) {
4c876616
SP
2808 /* BE3 FW, by default, caps VF TX-rate to 100mbps.
2809 * Allow full available bandwidth
2810 */
2811 if (BE3_chip(adapter) && !old_vfs)
2812 be_cmd_set_qos(adapter, 1000, vf+1);
2813
2814 status = be_cmd_link_status_query(adapter, &lnk_speed,
2815 NULL, vf + 1);
2816 if (!status)
2817 vf_cfg->tx_rate = lnk_speed;
f1f3ee1b
AK
2818
2819 status = be_cmd_get_hsw_config(adapter, &def_vlan,
4c876616 2820 vf + 1, vf_cfg->if_handle);
f1f3ee1b
AK
2821 if (status)
2822 goto err;
2823 vf_cfg->def_vid = def_vlan;
dcf7ebba
PR
2824
2825 be_cmd_enable_vf(adapter, vf + 1);
f9449ab7
SP
2826 }
2827 return 0;
2828err:
4c876616
SP
2829 dev_err(dev, "VF setup failed\n");
2830 be_vf_clear(adapter);
f9449ab7
SP
2831 return status;
2832}
2833
30128031
SP
2834static void be_setup_init(struct be_adapter *adapter)
2835{
2836 adapter->vlan_prio_bmap = 0xff;
42f11cf2 2837 adapter->phy.link_speed = -1;
30128031
SP
2838 adapter->if_handle = -1;
2839 adapter->be3_native = false;
2840 adapter->promiscuous = false;
f25b119c
PR
2841 if (be_physfn(adapter))
2842 adapter->cmd_privileges = MAX_PRIVILEGES;
2843 else
2844 adapter->cmd_privileges = MIN_PRIVILEGES;
30128031
SP
2845}
2846
1578e777
PR
2847static int be_get_mac_addr(struct be_adapter *adapter, u8 *mac, u32 if_handle,
2848 bool *active_mac, u32 *pmac_id)
590c391d 2849{
1578e777 2850 int status = 0;
e5e1ee89 2851
1578e777
PR
2852 if (!is_zero_ether_addr(adapter->netdev->perm_addr)) {
2853 memcpy(mac, adapter->netdev->dev_addr, ETH_ALEN);
2854 if (!lancer_chip(adapter) && !be_physfn(adapter))
2855 *active_mac = true;
2856 else
2857 *active_mac = false;
e5e1ee89 2858
1578e777
PR
2859 return status;
2860 }
e5e1ee89 2861
1578e777
PR
2862 if (lancer_chip(adapter)) {
2863 status = be_cmd_get_mac_from_list(adapter, mac,
2864 active_mac, pmac_id, 0);
2865 if (*active_mac) {
5ee4979b
SP
2866 status = be_cmd_mac_addr_query(adapter, mac, false,
2867 if_handle, *pmac_id);
1578e777
PR
2868 }
2869 } else if (be_physfn(adapter)) {
2870 /* For BE3, for PF get permanent MAC */
5ee4979b 2871 status = be_cmd_mac_addr_query(adapter, mac, true, 0, 0);
1578e777 2872 *active_mac = false;
e5e1ee89 2873 } else {
1578e777 2874 /* For BE3, for VF get soft MAC assigned by PF*/
5ee4979b 2875 status = be_cmd_mac_addr_query(adapter, mac, false,
1578e777
PR
2876 if_handle, 0);
2877 *active_mac = true;
e5e1ee89 2878 }
590c391d
PR
2879 return status;
2880}
2881
abb93951
PR
2882static void be_get_resources(struct be_adapter *adapter)
2883{
4c876616
SP
2884 u16 dev_num_vfs;
2885 int pos, status;
abb93951
PR
2886 bool profile_present = false;
2887
4c876616 2888 if (!BEx_chip(adapter)) {
abb93951 2889 status = be_cmd_get_func_config(adapter);
abb93951
PR
2890 if (!status)
2891 profile_present = true;
2892 }
2893
2894 if (profile_present) {
2895 /* Sanity fixes for Lancer */
2896 adapter->max_pmac_cnt = min_t(u16, adapter->max_pmac_cnt,
2897 BE_UC_PMAC_COUNT);
2898 adapter->max_vlans = min_t(u16, adapter->max_vlans,
2899 BE_NUM_VLANS_SUPPORTED);
2900 adapter->max_mcast_mac = min_t(u16, adapter->max_mcast_mac,
2901 BE_MAX_MC);
2902 adapter->max_tx_queues = min_t(u16, adapter->max_tx_queues,
2903 MAX_TX_QS);
2904 adapter->max_rss_queues = min_t(u16, adapter->max_rss_queues,
2905 BE3_MAX_RSS_QS);
2906 adapter->max_event_queues = min_t(u16,
2907 adapter->max_event_queues,
2908 BE3_MAX_RSS_QS);
2909
2910 if (adapter->max_rss_queues &&
2911 adapter->max_rss_queues == adapter->max_rx_queues)
2912 adapter->max_rss_queues -= 1;
2913
2914 if (adapter->max_event_queues < adapter->max_rss_queues)
2915 adapter->max_rss_queues = adapter->max_event_queues;
2916
2917 } else {
2918 if (be_physfn(adapter))
2919 adapter->max_pmac_cnt = BE_UC_PMAC_COUNT;
2920 else
2921 adapter->max_pmac_cnt = BE_VF_UC_PMAC_COUNT;
2922
2923 if (adapter->function_mode & FLEX10_MODE)
2924 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
2925 else
2926 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
2927
2928 adapter->max_mcast_mac = BE_MAX_MC;
2929 adapter->max_tx_queues = MAX_TX_QS;
2930 adapter->max_rss_queues = (adapter->be3_native) ?
2931 BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
2932 adapter->max_event_queues = BE3_MAX_RSS_QS;
2933
2934 adapter->if_cap_flags = BE_IF_FLAGS_UNTAGGED |
2935 BE_IF_FLAGS_BROADCAST |
2936 BE_IF_FLAGS_MULTICAST |
2937 BE_IF_FLAGS_PASS_L3L4_ERRORS |
2938 BE_IF_FLAGS_MCAST_PROMISCUOUS |
2939 BE_IF_FLAGS_VLAN_PROMISCUOUS |
2940 BE_IF_FLAGS_PROMISCUOUS;
2941
2942 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS)
2943 adapter->if_cap_flags |= BE_IF_FLAGS_RSS;
2944 }
4c876616
SP
2945
2946 pos = pci_find_ext_capability(adapter->pdev, PCI_EXT_CAP_ID_SRIOV);
2947 if (pos) {
2948 pci_read_config_word(adapter->pdev, pos + PCI_SRIOV_TOTAL_VF,
2949 &dev_num_vfs);
2950 if (BE3_chip(adapter))
2951 dev_num_vfs = min_t(u16, dev_num_vfs, MAX_VFS);
2952 adapter->dev_num_vfs = dev_num_vfs;
2953 }
abb93951
PR
2954}
2955
39f1d94d
SP
2956/* Routine to query per function resource limits */
2957static int be_get_config(struct be_adapter *adapter)
2958{
4c876616 2959 int status;
39f1d94d 2960
abb93951
PR
2961 status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
2962 &adapter->function_mode,
2963 &adapter->function_caps);
2964 if (status)
2965 goto err;
2966
2967 be_get_resources(adapter);
2968
2969 /* primary mac needs 1 pmac entry */
2970 adapter->pmac_id = kcalloc(adapter->max_pmac_cnt + 1,
2971 sizeof(u32), GFP_KERNEL);
2972 if (!adapter->pmac_id) {
2973 status = -ENOMEM;
2974 goto err;
2975 }
2976
abb93951
PR
2977err:
2978 return status;
39f1d94d
SP
2979}
2980
5fb379ee
SP
2981static int be_setup(struct be_adapter *adapter)
2982{
39f1d94d 2983 struct device *dev = &adapter->pdev->dev;
abb93951 2984 u32 en_flags;
a54769f5 2985 u32 tx_fc, rx_fc;
10ef9ab4 2986 int status;
ba343c77 2987 u8 mac[ETH_ALEN];
1578e777 2988 bool active_mac;
ba343c77 2989
30128031 2990 be_setup_init(adapter);
6b7c5b94 2991
abb93951
PR
2992 if (!lancer_chip(adapter))
2993 be_cmd_req_native_mode(adapter);
39f1d94d 2994
abb93951
PR
2995 status = be_get_config(adapter);
2996 if (status)
2997 goto err;
73d540f2 2998
10ef9ab4
SP
2999 be_msix_enable(adapter);
3000
3001 status = be_evt_queues_create(adapter);
3002 if (status)
a54769f5 3003 goto err;
6b7c5b94 3004
10ef9ab4
SP
3005 status = be_tx_cqs_create(adapter);
3006 if (status)
3007 goto err;
3008
3009 status = be_rx_cqs_create(adapter);
3010 if (status)
a54769f5 3011 goto err;
6b7c5b94 3012
f9449ab7 3013 status = be_mcc_queues_create(adapter);
10ef9ab4 3014 if (status)
a54769f5 3015 goto err;
6b7c5b94 3016
f25b119c
PR
3017 be_cmd_get_fn_privileges(adapter, &adapter->cmd_privileges, 0);
3018 /* In UMC mode FW does not return right privileges.
3019 * Override with correct privilege equivalent to PF.
3020 */
3021 if (be_is_mc(adapter))
3022 adapter->cmd_privileges = MAX_PRIVILEGES;
3023
f9449ab7
SP
3024 en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
3025 BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
5d5adb93 3026
abb93951 3027 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS)
f9449ab7 3028 en_flags |= BE_IF_FLAGS_RSS;
1578e777 3029
abb93951 3030 en_flags = en_flags & adapter->if_cap_flags;
0b13fb45 3031
abb93951 3032 status = be_cmd_if_create(adapter, adapter->if_cap_flags, en_flags,
1578e777 3033 &adapter->if_handle, 0);
5fb379ee 3034 if (status != 0)
a54769f5 3035 goto err;
6b7c5b94 3036
1578e777
PR
3037 memset(mac, 0, ETH_ALEN);
3038 active_mac = false;
3039 status = be_get_mac_addr(adapter, mac, adapter->if_handle,
3040 &active_mac, &adapter->pmac_id[0]);
3041 if (status != 0)
3042 goto err;
3043
3044 if (!active_mac) {
3045 status = be_cmd_pmac_add(adapter, mac, adapter->if_handle,
3046 &adapter->pmac_id[0], 0);
3047 if (status != 0)
3048 goto err;
3049 }
3050
3051 if (is_zero_ether_addr(adapter->netdev->dev_addr)) {
3052 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
3053 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
f9449ab7 3054 }
0dffc83e 3055
10ef9ab4
SP
3056 status = be_tx_qs_create(adapter);
3057 if (status)
3058 goto err;
3059
04b71175 3060 be_cmd_get_fw_ver(adapter, adapter->fw_ver, NULL);
5a56eb10 3061
1d1e9a46 3062 if (adapter->vlans_added)
10329df8 3063 be_vid_config(adapter);
7ab8b0b4 3064
a54769f5 3065 be_set_rx_mode(adapter->netdev);
5fb379ee 3066
ddc3f5cb 3067 be_cmd_get_flow_control(adapter, &tx_fc, &rx_fc);
590c391d 3068
ddc3f5cb
AK
3069 if (rx_fc != adapter->rx_fc || tx_fc != adapter->tx_fc)
3070 be_cmd_set_flow_control(adapter, adapter->tx_fc,
a54769f5 3071 adapter->rx_fc);
2dc1deb6 3072
39f1d94d
SP
3073 if (be_physfn(adapter) && num_vfs) {
3074 if (adapter->dev_num_vfs)
3075 be_vf_setup(adapter);
3076 else
3077 dev_warn(dev, "device doesn't support SRIOV\n");
f9449ab7
SP
3078 }
3079
f25b119c
PR
3080 status = be_cmd_get_phy_info(adapter);
3081 if (!status && be_pause_supported(adapter))
42f11cf2
AK
3082 adapter->phy.fc_autoneg = 1;
3083
191eb756
SP
3084 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
3085 adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
f9449ab7 3086 return 0;
a54769f5
SP
3087err:
3088 be_clear(adapter);
3089 return status;
3090}
6b7c5b94 3091
66268739
IV
3092#ifdef CONFIG_NET_POLL_CONTROLLER
3093static void be_netpoll(struct net_device *netdev)
3094{
3095 struct be_adapter *adapter = netdev_priv(netdev);
10ef9ab4 3096 struct be_eq_obj *eqo;
66268739
IV
3097 int i;
3098
e49cc34f
SP
3099 for_all_evt_queues(adapter, eqo, i) {
3100 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
3101 napi_schedule(&eqo->napi);
3102 }
10ef9ab4
SP
3103
3104 return;
66268739
IV
3105}
3106#endif
3107
84517482 3108#define FW_FILE_HDR_SIGN "ServerEngines Corp. "
c165541e
PR
3109char flash_cookie[2][16] = {"*** SE FLAS", "H DIRECTORY *** "};
3110
fa9a6fed 3111static bool be_flash_redboot(struct be_adapter *adapter,
3f0d4560
AK
3112 const u8 *p, u32 img_start, int image_size,
3113 int hdr_size)
fa9a6fed
SB
3114{
3115 u32 crc_offset;
3116 u8 flashed_crc[4];
3117 int status;
3f0d4560
AK
3118
3119 crc_offset = hdr_size + img_start + image_size - 4;
3120
fa9a6fed 3121 p += crc_offset;
3f0d4560
AK
3122
3123 status = be_cmd_get_flash_crc(adapter, flashed_crc,
f510fc64 3124 (image_size - 4));
fa9a6fed
SB
3125 if (status) {
3126 dev_err(&adapter->pdev->dev,
3127 "could not get crc from flash, not flashing redboot\n");
3128 return false;
3129 }
3130
3131 /*update redboot only if crc does not match*/
3132 if (!memcmp(flashed_crc, p, 4))
3133 return false;
3134 else
3135 return true;
fa9a6fed
SB
3136}
3137
306f1348
SP
3138static bool phy_flashing_required(struct be_adapter *adapter)
3139{
42f11cf2
AK
3140 return (adapter->phy.phy_type == TN_8022 &&
3141 adapter->phy.interface_type == PHY_TYPE_BASET_10GB);
306f1348
SP
3142}
3143
c165541e
PR
3144static bool is_comp_in_ufi(struct be_adapter *adapter,
3145 struct flash_section_info *fsec, int type)
3146{
3147 int i = 0, img_type = 0;
3148 struct flash_section_info_g2 *fsec_g2 = NULL;
3149
ca34fe38 3150 if (BE2_chip(adapter))
c165541e
PR
3151 fsec_g2 = (struct flash_section_info_g2 *)fsec;
3152
3153 for (i = 0; i < MAX_FLASH_COMP; i++) {
3154 if (fsec_g2)
3155 img_type = le32_to_cpu(fsec_g2->fsec_entry[i].type);
3156 else
3157 img_type = le32_to_cpu(fsec->fsec_entry[i].type);
3158
3159 if (img_type == type)
3160 return true;
3161 }
3162 return false;
3163
3164}
3165
3166struct flash_section_info *get_fsec_info(struct be_adapter *adapter,
3167 int header_size,
3168 const struct firmware *fw)
3169{
3170 struct flash_section_info *fsec = NULL;
3171 const u8 *p = fw->data;
3172
3173 p += header_size;
3174 while (p < (fw->data + fw->size)) {
3175 fsec = (struct flash_section_info *)p;
3176 if (!memcmp(flash_cookie, fsec->cookie, sizeof(flash_cookie)))
3177 return fsec;
3178 p += 32;
3179 }
3180 return NULL;
3181}
3182
773a2d7c
PR
3183static int be_flash(struct be_adapter *adapter, const u8 *img,
3184 struct be_dma_mem *flash_cmd, int optype, int img_size)
3185{
3186 u32 total_bytes = 0, flash_op, num_bytes = 0;
3187 int status = 0;
3188 struct be_cmd_write_flashrom *req = flash_cmd->va;
3189
3190 total_bytes = img_size;
3191 while (total_bytes) {
3192 num_bytes = min_t(u32, 32*1024, total_bytes);
3193
3194 total_bytes -= num_bytes;
3195
3196 if (!total_bytes) {
3197 if (optype == OPTYPE_PHY_FW)
3198 flash_op = FLASHROM_OPER_PHY_FLASH;
3199 else
3200 flash_op = FLASHROM_OPER_FLASH;
3201 } else {
3202 if (optype == OPTYPE_PHY_FW)
3203 flash_op = FLASHROM_OPER_PHY_SAVE;
3204 else
3205 flash_op = FLASHROM_OPER_SAVE;
3206 }
3207
be716446 3208 memcpy(req->data_buf, img, num_bytes);
773a2d7c
PR
3209 img += num_bytes;
3210 status = be_cmd_write_flashrom(adapter, flash_cmd, optype,
3211 flash_op, num_bytes);
3212 if (status) {
3213 if (status == ILLEGAL_IOCTL_REQ &&
3214 optype == OPTYPE_PHY_FW)
3215 break;
3216 dev_err(&adapter->pdev->dev,
3217 "cmd to write to flash rom failed.\n");
3218 return status;
3219 }
3220 }
3221 return 0;
3222}
3223
ca34fe38
SP
3224/* For BE2 and BE3 */
3225static int be_flash_BEx(struct be_adapter *adapter,
c165541e
PR
3226 const struct firmware *fw,
3227 struct be_dma_mem *flash_cmd,
3228 int num_of_images)
3f0d4560 3229
84517482 3230{
3f0d4560 3231 int status = 0, i, filehdr_size = 0;
c165541e 3232 int img_hdrs_size = (num_of_images * sizeof(struct image_hdr));
84517482 3233 const u8 *p = fw->data;
215faf9c 3234 const struct flash_comp *pflashcomp;
773a2d7c 3235 int num_comp, redboot;
c165541e
PR
3236 struct flash_section_info *fsec = NULL;
3237
3238 struct flash_comp gen3_flash_types[] = {
3239 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, OPTYPE_ISCSI_ACTIVE,
3240 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_iSCSI},
3241 { FLASH_REDBOOT_START_g3, OPTYPE_REDBOOT,
3242 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3, IMAGE_BOOT_CODE},
3243 { FLASH_iSCSI_BIOS_START_g3, OPTYPE_BIOS,
3244 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_ISCSI},
3245 { FLASH_PXE_BIOS_START_g3, OPTYPE_PXE_BIOS,
3246 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_PXE},
3247 { FLASH_FCoE_BIOS_START_g3, OPTYPE_FCOE_BIOS,
3248 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_FCoE},
3249 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, OPTYPE_ISCSI_BACKUP,
3250 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_iSCSI},
3251 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, OPTYPE_FCOE_FW_ACTIVE,
3252 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_FCoE},
3253 { FLASH_FCoE_BACKUP_IMAGE_START_g3, OPTYPE_FCOE_FW_BACKUP,
3254 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_FCoE},
3255 { FLASH_NCSI_START_g3, OPTYPE_NCSI_FW,
3256 FLASH_NCSI_IMAGE_MAX_SIZE_g3, IMAGE_NCSI},
3257 { FLASH_PHY_FW_START_g3, OPTYPE_PHY_FW,
3258 FLASH_PHY_FW_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_PHY}
3f0d4560 3259 };
c165541e
PR
3260
3261 struct flash_comp gen2_flash_types[] = {
3262 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, OPTYPE_ISCSI_ACTIVE,
3263 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_iSCSI},
3264 { FLASH_REDBOOT_START_g2, OPTYPE_REDBOOT,
3265 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2, IMAGE_BOOT_CODE},
3266 { FLASH_iSCSI_BIOS_START_g2, OPTYPE_BIOS,
3267 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_ISCSI},
3268 { FLASH_PXE_BIOS_START_g2, OPTYPE_PXE_BIOS,
3269 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_PXE},
3270 { FLASH_FCoE_BIOS_START_g2, OPTYPE_FCOE_BIOS,
3271 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_FCoE},
3272 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, OPTYPE_ISCSI_BACKUP,
3273 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_iSCSI},
3274 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, OPTYPE_FCOE_FW_ACTIVE,
3275 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_FCoE},
3276 { FLASH_FCoE_BACKUP_IMAGE_START_g2, OPTYPE_FCOE_FW_BACKUP,
3277 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_FCoE}
3f0d4560
AK
3278 };
3279
ca34fe38 3280 if (BE3_chip(adapter)) {
3f0d4560
AK
3281 pflashcomp = gen3_flash_types;
3282 filehdr_size = sizeof(struct flash_file_hdr_g3);
215faf9c 3283 num_comp = ARRAY_SIZE(gen3_flash_types);
3f0d4560
AK
3284 } else {
3285 pflashcomp = gen2_flash_types;
3286 filehdr_size = sizeof(struct flash_file_hdr_g2);
215faf9c 3287 num_comp = ARRAY_SIZE(gen2_flash_types);
84517482 3288 }
ca34fe38 3289
c165541e
PR
3290 /* Get flash section info*/
3291 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3292 if (!fsec) {
3293 dev_err(&adapter->pdev->dev,
3294 "Invalid Cookie. UFI corrupted ?\n");
3295 return -1;
3296 }
9fe96934 3297 for (i = 0; i < num_comp; i++) {
c165541e 3298 if (!is_comp_in_ufi(adapter, fsec, pflashcomp[i].img_type))
9fe96934 3299 continue;
c165541e
PR
3300
3301 if ((pflashcomp[i].optype == OPTYPE_NCSI_FW) &&
3302 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
3303 continue;
3304
773a2d7c
PR
3305 if (pflashcomp[i].optype == OPTYPE_PHY_FW &&
3306 !phy_flashing_required(adapter))
306f1348 3307 continue;
c165541e 3308
773a2d7c
PR
3309 if (pflashcomp[i].optype == OPTYPE_REDBOOT) {
3310 redboot = be_flash_redboot(adapter, fw->data,
3311 pflashcomp[i].offset, pflashcomp[i].size,
3312 filehdr_size + img_hdrs_size);
3313 if (!redboot)
3314 continue;
3315 }
c165541e 3316
3f0d4560 3317 p = fw->data;
c165541e 3318 p += filehdr_size + pflashcomp[i].offset + img_hdrs_size;
306f1348
SP
3319 if (p + pflashcomp[i].size > fw->data + fw->size)
3320 return -1;
773a2d7c
PR
3321
3322 status = be_flash(adapter, p, flash_cmd, pflashcomp[i].optype,
3323 pflashcomp[i].size);
3324 if (status) {
3325 dev_err(&adapter->pdev->dev,
3326 "Flashing section type %d failed.\n",
3327 pflashcomp[i].img_type);
3328 return status;
84517482 3329 }
84517482 3330 }
84517482
AK
3331 return 0;
3332}
3333
773a2d7c
PR
3334static int be_flash_skyhawk(struct be_adapter *adapter,
3335 const struct firmware *fw,
3336 struct be_dma_mem *flash_cmd, int num_of_images)
3f0d4560 3337{
773a2d7c
PR
3338 int status = 0, i, filehdr_size = 0;
3339 int img_offset, img_size, img_optype, redboot;
3340 int img_hdrs_size = num_of_images * sizeof(struct image_hdr);
3341 const u8 *p = fw->data;
3342 struct flash_section_info *fsec = NULL;
3343
3344 filehdr_size = sizeof(struct flash_file_hdr_g3);
3345 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3346 if (!fsec) {
3347 dev_err(&adapter->pdev->dev,
3348 "Invalid Cookie. UFI corrupted ?\n");
3349 return -1;
3350 }
3351
3352 for (i = 0; i < le32_to_cpu(fsec->fsec_hdr.num_images); i++) {
3353 img_offset = le32_to_cpu(fsec->fsec_entry[i].offset);
3354 img_size = le32_to_cpu(fsec->fsec_entry[i].pad_size);
3355
3356 switch (le32_to_cpu(fsec->fsec_entry[i].type)) {
3357 case IMAGE_FIRMWARE_iSCSI:
3358 img_optype = OPTYPE_ISCSI_ACTIVE;
3359 break;
3360 case IMAGE_BOOT_CODE:
3361 img_optype = OPTYPE_REDBOOT;
3362 break;
3363 case IMAGE_OPTION_ROM_ISCSI:
3364 img_optype = OPTYPE_BIOS;
3365 break;
3366 case IMAGE_OPTION_ROM_PXE:
3367 img_optype = OPTYPE_PXE_BIOS;
3368 break;
3369 case IMAGE_OPTION_ROM_FCoE:
3370 img_optype = OPTYPE_FCOE_BIOS;
3371 break;
3372 case IMAGE_FIRMWARE_BACKUP_iSCSI:
3373 img_optype = OPTYPE_ISCSI_BACKUP;
3374 break;
3375 case IMAGE_NCSI:
3376 img_optype = OPTYPE_NCSI_FW;
3377 break;
3378 default:
3379 continue;
3380 }
3381
3382 if (img_optype == OPTYPE_REDBOOT) {
3383 redboot = be_flash_redboot(adapter, fw->data,
3384 img_offset, img_size,
3385 filehdr_size + img_hdrs_size);
3386 if (!redboot)
3387 continue;
3388 }
3389
3390 p = fw->data;
3391 p += filehdr_size + img_offset + img_hdrs_size;
3392 if (p + img_size > fw->data + fw->size)
3393 return -1;
3394
3395 status = be_flash(adapter, p, flash_cmd, img_optype, img_size);
3396 if (status) {
3397 dev_err(&adapter->pdev->dev,
3398 "Flashing section type %d failed.\n",
3399 fsec->fsec_entry[i].type);
3400 return status;
3401 }
3402 }
3403 return 0;
3f0d4560
AK
3404}
3405
f67ef7ba
PR
3406static int lancer_wait_idle(struct be_adapter *adapter)
3407{
3408#define SLIPORT_IDLE_TIMEOUT 30
3409 u32 reg_val;
3410 int status = 0, i;
3411
3412 for (i = 0; i < SLIPORT_IDLE_TIMEOUT; i++) {
3413 reg_val = ioread32(adapter->db + PHYSDEV_CONTROL_OFFSET);
3414 if ((reg_val & PHYSDEV_CONTROL_INP_MASK) == 0)
3415 break;
3416
3417 ssleep(1);
3418 }
3419
3420 if (i == SLIPORT_IDLE_TIMEOUT)
3421 status = -1;
3422
3423 return status;
3424}
3425
3426static int lancer_fw_reset(struct be_adapter *adapter)
3427{
3428 int status = 0;
3429
3430 status = lancer_wait_idle(adapter);
3431 if (status)
3432 return status;
3433
3434 iowrite32(PHYSDEV_CONTROL_FW_RESET_MASK, adapter->db +
3435 PHYSDEV_CONTROL_OFFSET);
3436
3437 return status;
3438}
3439
485bf569
SN
3440static int lancer_fw_download(struct be_adapter *adapter,
3441 const struct firmware *fw)
84517482 3442{
485bf569
SN
3443#define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024)
3444#define LANCER_FW_DOWNLOAD_LOCATION "/prg"
84517482 3445 struct be_dma_mem flash_cmd;
485bf569
SN
3446 const u8 *data_ptr = NULL;
3447 u8 *dest_image_ptr = NULL;
3448 size_t image_size = 0;
3449 u32 chunk_size = 0;
3450 u32 data_written = 0;
3451 u32 offset = 0;
3452 int status = 0;
3453 u8 add_status = 0;
f67ef7ba 3454 u8 change_status;
84517482 3455
485bf569 3456 if (!IS_ALIGNED(fw->size, sizeof(u32))) {
d9efd2af 3457 dev_err(&adapter->pdev->dev,
485bf569
SN
3458 "FW Image not properly aligned. "
3459 "Length must be 4 byte aligned.\n");
3460 status = -EINVAL;
3461 goto lancer_fw_exit;
d9efd2af
SB
3462 }
3463
485bf569
SN
3464 flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
3465 + LANCER_FW_DOWNLOAD_CHUNK;
3466 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
3467 &flash_cmd.dma, GFP_KERNEL);
3468 if (!flash_cmd.va) {
3469 status = -ENOMEM;
3470 dev_err(&adapter->pdev->dev,
3471 "Memory allocation failure while flashing\n");
3472 goto lancer_fw_exit;
3473 }
84517482 3474
485bf569
SN
3475 dest_image_ptr = flash_cmd.va +
3476 sizeof(struct lancer_cmd_req_write_object);
3477 image_size = fw->size;
3478 data_ptr = fw->data;
3479
3480 while (image_size) {
3481 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
3482
3483 /* Copy the image chunk content. */
3484 memcpy(dest_image_ptr, data_ptr, chunk_size);
3485
3486 status = lancer_cmd_write_object(adapter, &flash_cmd,
f67ef7ba
PR
3487 chunk_size, offset,
3488 LANCER_FW_DOWNLOAD_LOCATION,
3489 &data_written, &change_status,
3490 &add_status);
485bf569
SN
3491 if (status)
3492 break;
3493
3494 offset += data_written;
3495 data_ptr += data_written;
3496 image_size -= data_written;
3497 }
3498
3499 if (!status) {
3500 /* Commit the FW written */
3501 status = lancer_cmd_write_object(adapter, &flash_cmd,
f67ef7ba
PR
3502 0, offset,
3503 LANCER_FW_DOWNLOAD_LOCATION,
3504 &data_written, &change_status,
3505 &add_status);
485bf569
SN
3506 }
3507
3508 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3509 flash_cmd.dma);
3510 if (status) {
3511 dev_err(&adapter->pdev->dev,
3512 "Firmware load error. "
3513 "Status code: 0x%x Additional Status: 0x%x\n",
3514 status, add_status);
3515 goto lancer_fw_exit;
3516 }
3517
f67ef7ba
PR
3518 if (change_status == LANCER_FW_RESET_NEEDED) {
3519 status = lancer_fw_reset(adapter);
3520 if (status) {
3521 dev_err(&adapter->pdev->dev,
3522 "Adapter busy for FW reset.\n"
3523 "New FW will not be active.\n");
3524 goto lancer_fw_exit;
3525 }
3526 } else if (change_status != LANCER_NO_RESET_NEEDED) {
3527 dev_err(&adapter->pdev->dev,
3528 "System reboot required for new FW"
3529 " to be active\n");
3530 }
3531
485bf569
SN
3532 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
3533lancer_fw_exit:
3534 return status;
3535}
3536
ca34fe38
SP
3537#define UFI_TYPE2 2
3538#define UFI_TYPE3 3
3539#define UFI_TYPE4 4
3540static int be_get_ufi_type(struct be_adapter *adapter,
3541 struct flash_file_hdr_g2 *fhdr)
773a2d7c
PR
3542{
3543 if (fhdr == NULL)
3544 goto be_get_ufi_exit;
3545
ca34fe38
SP
3546 if (skyhawk_chip(adapter) && fhdr->build[0] == '4')
3547 return UFI_TYPE4;
3548 else if (BE3_chip(adapter) && fhdr->build[0] == '3')
3549 return UFI_TYPE3;
3550 else if (BE2_chip(adapter) && fhdr->build[0] == '2')
3551 return UFI_TYPE2;
773a2d7c
PR
3552
3553be_get_ufi_exit:
3554 dev_err(&adapter->pdev->dev,
3555 "UFI and Interface are not compatible for flashing\n");
3556 return -1;
3557}
3558
485bf569
SN
3559static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
3560{
3561 struct flash_file_hdr_g2 *fhdr;
3562 struct flash_file_hdr_g3 *fhdr3;
3563 struct image_hdr *img_hdr_ptr = NULL;
3564 struct be_dma_mem flash_cmd;
3565 const u8 *p;
773a2d7c 3566 int status = 0, i = 0, num_imgs = 0, ufi_type = 0;
84517482 3567
be716446 3568 flash_cmd.size = sizeof(struct be_cmd_write_flashrom);
2b7bcebf
IV
3569 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
3570 &flash_cmd.dma, GFP_KERNEL);
84517482
AK
3571 if (!flash_cmd.va) {
3572 status = -ENOMEM;
3573 dev_err(&adapter->pdev->dev,
3574 "Memory allocation failure while flashing\n");
485bf569 3575 goto be_fw_exit;
84517482
AK
3576 }
3577
773a2d7c
PR
3578 p = fw->data;
3579 fhdr = (struct flash_file_hdr_g2 *)p;
3580
ca34fe38 3581 ufi_type = be_get_ufi_type(adapter, fhdr);
773a2d7c
PR
3582
3583 fhdr3 = (struct flash_file_hdr_g3 *)fw->data;
3584 num_imgs = le32_to_cpu(fhdr3->num_imgs);
3585 for (i = 0; i < num_imgs; i++) {
3586 img_hdr_ptr = (struct image_hdr *)(fw->data +
3587 (sizeof(struct flash_file_hdr_g3) +
3588 i * sizeof(struct image_hdr)));
3589 if (le32_to_cpu(img_hdr_ptr->imageid) == 1) {
ca34fe38 3590 if (ufi_type == UFI_TYPE4)
773a2d7c
PR
3591 status = be_flash_skyhawk(adapter, fw,
3592 &flash_cmd, num_imgs);
ca34fe38
SP
3593 else if (ufi_type == UFI_TYPE3)
3594 status = be_flash_BEx(adapter, fw, &flash_cmd,
3595 num_imgs);
3f0d4560 3596 }
773a2d7c
PR
3597 }
3598
ca34fe38
SP
3599 if (ufi_type == UFI_TYPE2)
3600 status = be_flash_BEx(adapter, fw, &flash_cmd, 0);
773a2d7c 3601 else if (ufi_type == -1)
3f0d4560 3602 status = -1;
84517482 3603
2b7bcebf
IV
3604 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3605 flash_cmd.dma);
84517482
AK
3606 if (status) {
3607 dev_err(&adapter->pdev->dev, "Firmware load error\n");
485bf569 3608 goto be_fw_exit;
84517482
AK
3609 }
3610
af901ca1 3611 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
84517482 3612
485bf569
SN
3613be_fw_exit:
3614 return status;
3615}
3616
3617int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
3618{
3619 const struct firmware *fw;
3620 int status;
3621
3622 if (!netif_running(adapter->netdev)) {
3623 dev_err(&adapter->pdev->dev,
3624 "Firmware load not allowed (interface is down)\n");
3625 return -1;
3626 }
3627
3628 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
3629 if (status)
3630 goto fw_exit;
3631
3632 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
3633
3634 if (lancer_chip(adapter))
3635 status = lancer_fw_download(adapter, fw);
3636 else
3637 status = be_fw_download(adapter, fw);
3638
84517482
AK
3639fw_exit:
3640 release_firmware(fw);
3641 return status;
3642}
3643
e5686ad8 3644static const struct net_device_ops be_netdev_ops = {
6b7c5b94
SP
3645 .ndo_open = be_open,
3646 .ndo_stop = be_close,
3647 .ndo_start_xmit = be_xmit,
a54769f5 3648 .ndo_set_rx_mode = be_set_rx_mode,
6b7c5b94
SP
3649 .ndo_set_mac_address = be_mac_addr_set,
3650 .ndo_change_mtu = be_change_mtu,
ab1594e9 3651 .ndo_get_stats64 = be_get_stats64,
6b7c5b94 3652 .ndo_validate_addr = eth_validate_addr,
6b7c5b94
SP
3653 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
3654 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
64600ea5 3655 .ndo_set_vf_mac = be_set_vf_mac,
1da87b7f 3656 .ndo_set_vf_vlan = be_set_vf_vlan,
e1d18735 3657 .ndo_set_vf_tx_rate = be_set_vf_tx_rate,
66268739
IV
3658 .ndo_get_vf_config = be_get_vf_config,
3659#ifdef CONFIG_NET_POLL_CONTROLLER
3660 .ndo_poll_controller = be_netpoll,
3661#endif
6b7c5b94
SP
3662};
3663
3664static void be_netdev_init(struct net_device *netdev)
3665{
3666 struct be_adapter *adapter = netdev_priv(netdev);
10ef9ab4 3667 struct be_eq_obj *eqo;
3abcdeda 3668 int i;
6b7c5b94 3669
6332c8d3 3670 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
8b8ddc68
MM
3671 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
3672 NETIF_F_HW_VLAN_TX;
3673 if (be_multi_rxq(adapter))
3674 netdev->hw_features |= NETIF_F_RXHASH;
6332c8d3
MM
3675
3676 netdev->features |= netdev->hw_features |
8b8ddc68 3677 NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
4b972914 3678
eb8a50d9 3679 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
79032644 3680 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
51c59870 3681
fbc13f01
AK
3682 netdev->priv_flags |= IFF_UNICAST_FLT;
3683
6b7c5b94
SP
3684 netdev->flags |= IFF_MULTICAST;
3685
b7e5887e 3686 netif_set_gso_max_size(netdev, 65535 - ETH_HLEN);
c190e3c8 3687
10ef9ab4 3688 netdev->netdev_ops = &be_netdev_ops;
6b7c5b94
SP
3689
3690 SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
3691
10ef9ab4
SP
3692 for_all_evt_queues(adapter, eqo, i)
3693 netif_napi_add(netdev, &eqo->napi, be_poll, BE_NAPI_WEIGHT);
6b7c5b94
SP
3694}
3695
3696static void be_unmap_pci_bars(struct be_adapter *adapter)
3697{
c5b3ad4c
SP
3698 if (adapter->csr)
3699 pci_iounmap(adapter->pdev, adapter->csr);
8788fdc2 3700 if (adapter->db)
ce66f781 3701 pci_iounmap(adapter->pdev, adapter->db);
045508a8
PP
3702}
3703
ce66f781
SP
3704static int db_bar(struct be_adapter *adapter)
3705{
3706 if (lancer_chip(adapter) || !be_physfn(adapter))
3707 return 0;
3708 else
3709 return 4;
3710}
3711
3712static int be_roce_map_pci_bars(struct be_adapter *adapter)
045508a8 3713{
dbf0f2a7 3714 if (skyhawk_chip(adapter)) {
ce66f781
SP
3715 adapter->roce_db.size = 4096;
3716 adapter->roce_db.io_addr = pci_resource_start(adapter->pdev,
3717 db_bar(adapter));
3718 adapter->roce_db.total_size = pci_resource_len(adapter->pdev,
3719 db_bar(adapter));
3720 }
045508a8 3721 return 0;
6b7c5b94
SP
3722}
3723
3724static int be_map_pci_bars(struct be_adapter *adapter)
3725{
3726 u8 __iomem *addr;
ce66f781 3727 u32 sli_intf;
6b7c5b94 3728
ce66f781
SP
3729 pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3730 adapter->if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
3731 SLI_INTF_IF_TYPE_SHIFT;
fe6d2a38 3732
c5b3ad4c
SP
3733 if (BEx_chip(adapter) && be_physfn(adapter)) {
3734 adapter->csr = pci_iomap(adapter->pdev, 2, 0);
3735 if (adapter->csr == NULL)
3736 return -ENOMEM;
3737 }
3738
ce66f781 3739 addr = pci_iomap(adapter->pdev, db_bar(adapter), 0);
6b7c5b94
SP
3740 if (addr == NULL)
3741 goto pci_map_err;
ba343c77 3742 adapter->db = addr;
ce66f781
SP
3743
3744 be_roce_map_pci_bars(adapter);
6b7c5b94 3745 return 0;
ce66f781 3746
6b7c5b94
SP
3747pci_map_err:
3748 be_unmap_pci_bars(adapter);
3749 return -ENOMEM;
3750}
3751
6b7c5b94
SP
3752static void be_ctrl_cleanup(struct be_adapter *adapter)
3753{
8788fdc2 3754 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
6b7c5b94
SP
3755
3756 be_unmap_pci_bars(adapter);
3757
3758 if (mem->va)
2b7bcebf
IV
3759 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3760 mem->dma);
e7b909a6 3761
5b8821b7 3762 mem = &adapter->rx_filter;
e7b909a6 3763 if (mem->va)
2b7bcebf
IV
3764 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3765 mem->dma);
6b7c5b94
SP
3766}
3767
6b7c5b94
SP
3768static int be_ctrl_init(struct be_adapter *adapter)
3769{
8788fdc2
SP
3770 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
3771 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
5b8821b7 3772 struct be_dma_mem *rx_filter = &adapter->rx_filter;
ce66f781 3773 u32 sli_intf;
6b7c5b94 3774 int status;
6b7c5b94 3775
ce66f781
SP
3776 pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3777 adapter->sli_family = (sli_intf & SLI_INTF_FAMILY_MASK) >>
3778 SLI_INTF_FAMILY_SHIFT;
3779 adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
3780
6b7c5b94
SP
3781 status = be_map_pci_bars(adapter);
3782 if (status)
e7b909a6 3783 goto done;
6b7c5b94
SP
3784
3785 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
2b7bcebf
IV
3786 mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
3787 mbox_mem_alloc->size,
3788 &mbox_mem_alloc->dma,
3789 GFP_KERNEL);
6b7c5b94 3790 if (!mbox_mem_alloc->va) {
e7b909a6
SP
3791 status = -ENOMEM;
3792 goto unmap_pci_bars;
6b7c5b94
SP
3793 }
3794 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
3795 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
3796 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
3797 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
e7b909a6 3798
5b8821b7
SP
3799 rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
3800 rx_filter->va = dma_alloc_coherent(&adapter->pdev->dev, rx_filter->size,
3801 &rx_filter->dma, GFP_KERNEL);
3802 if (rx_filter->va == NULL) {
e7b909a6
SP
3803 status = -ENOMEM;
3804 goto free_mbox;
3805 }
5b8821b7 3806 memset(rx_filter->va, 0, rx_filter->size);
2984961c 3807 mutex_init(&adapter->mbox_lock);
8788fdc2
SP
3808 spin_lock_init(&adapter->mcc_lock);
3809 spin_lock_init(&adapter->mcc_cq_lock);
a8f447bd 3810
dd131e76 3811 init_completion(&adapter->flash_compl);
cf588477 3812 pci_save_state(adapter->pdev);
6b7c5b94 3813 return 0;
e7b909a6
SP
3814
3815free_mbox:
2b7bcebf
IV
3816 dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
3817 mbox_mem_alloc->va, mbox_mem_alloc->dma);
e7b909a6
SP
3818
3819unmap_pci_bars:
3820 be_unmap_pci_bars(adapter);
3821
3822done:
3823 return status;
6b7c5b94
SP
3824}
3825
3826static void be_stats_cleanup(struct be_adapter *adapter)
3827{
3abcdeda 3828 struct be_dma_mem *cmd = &adapter->stats_cmd;
6b7c5b94
SP
3829
3830 if (cmd->va)
2b7bcebf
IV
3831 dma_free_coherent(&adapter->pdev->dev, cmd->size,
3832 cmd->va, cmd->dma);
6b7c5b94
SP
3833}
3834
3835static int be_stats_init(struct be_adapter *adapter)
3836{
3abcdeda 3837 struct be_dma_mem *cmd = &adapter->stats_cmd;
6b7c5b94 3838
ca34fe38
SP
3839 if (lancer_chip(adapter))
3840 cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
3841 else if (BE2_chip(adapter))
89a88ab8 3842 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
ca34fe38
SP
3843 else
3844 /* BE3 and Skyhawk */
3845 cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
3846
2b7bcebf
IV
3847 cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
3848 GFP_KERNEL);
6b7c5b94
SP
3849 if (cmd->va == NULL)
3850 return -1;
d291b9af 3851 memset(cmd->va, 0, cmd->size);
6b7c5b94
SP
3852 return 0;
3853}
3854
3bc6b06c 3855static void be_remove(struct pci_dev *pdev)
6b7c5b94
SP
3856{
3857 struct be_adapter *adapter = pci_get_drvdata(pdev);
8d56ff11 3858
6b7c5b94
SP
3859 if (!adapter)
3860 return;
3861
045508a8 3862 be_roce_dev_remove(adapter);
8cef7a78 3863 be_intr_set(adapter, false);
045508a8 3864
f67ef7ba
PR
3865 cancel_delayed_work_sync(&adapter->func_recovery_work);
3866
6b7c5b94
SP
3867 unregister_netdev(adapter->netdev);
3868
5fb379ee
SP
3869 be_clear(adapter);
3870
bf99e50d
PR
3871 /* tell fw we're done with firing cmds */
3872 be_cmd_fw_clean(adapter);
3873
6b7c5b94
SP
3874 be_stats_cleanup(adapter);
3875
3876 be_ctrl_cleanup(adapter);
3877
d6b6d987
SP
3878 pci_disable_pcie_error_reporting(pdev);
3879
6b7c5b94
SP
3880 pci_set_drvdata(pdev, NULL);
3881 pci_release_regions(pdev);
3882 pci_disable_device(pdev);
3883
3884 free_netdev(adapter->netdev);
3885}
3886
4762f6ce
AK
3887bool be_is_wol_supported(struct be_adapter *adapter)
3888{
3889 return ((adapter->wol_cap & BE_WOL_CAP) &&
3890 !be_is_wol_excluded(adapter)) ? true : false;
3891}
3892
941a77d5
SK
3893u32 be_get_fw_log_level(struct be_adapter *adapter)
3894{
3895 struct be_dma_mem extfat_cmd;
3896 struct be_fat_conf_params *cfgs;
3897 int status;
3898 u32 level = 0;
3899 int j;
3900
f25b119c
PR
3901 if (lancer_chip(adapter))
3902 return 0;
3903
941a77d5
SK
3904 memset(&extfat_cmd, 0, sizeof(struct be_dma_mem));
3905 extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps);
3906 extfat_cmd.va = pci_alloc_consistent(adapter->pdev, extfat_cmd.size,
3907 &extfat_cmd.dma);
3908
3909 if (!extfat_cmd.va) {
3910 dev_err(&adapter->pdev->dev, "%s: Memory allocation failure\n",
3911 __func__);
3912 goto err;
3913 }
3914
3915 status = be_cmd_get_ext_fat_capabilites(adapter, &extfat_cmd);
3916 if (!status) {
3917 cfgs = (struct be_fat_conf_params *)(extfat_cmd.va +
3918 sizeof(struct be_cmd_resp_hdr));
ac46a462 3919 for (j = 0; j < le32_to_cpu(cfgs->module[0].num_modes); j++) {
941a77d5
SK
3920 if (cfgs->module[0].trace_lvl[j].mode == MODE_UART)
3921 level = cfgs->module[0].trace_lvl[j].dbg_lvl;
3922 }
3923 }
3924 pci_free_consistent(adapter->pdev, extfat_cmd.size, extfat_cmd.va,
3925 extfat_cmd.dma);
3926err:
3927 return level;
3928}
abb93951 3929
39f1d94d 3930static int be_get_initial_config(struct be_adapter *adapter)
6b7c5b94 3931{
6b7c5b94 3932 int status;
941a77d5 3933 u32 level;
6b7c5b94 3934
9e1453c5
AK
3935 status = be_cmd_get_cntl_attributes(adapter);
3936 if (status)
3937 return status;
3938
4762f6ce
AK
3939 status = be_cmd_get_acpi_wol_cap(adapter);
3940 if (status) {
3941 /* in case of a failure to get wol capabillities
3942 * check the exclusion list to determine WOL capability */
3943 if (!be_is_wol_excluded(adapter))
3944 adapter->wol_cap |= BE_WOL_CAP;
3945 }
3946
3947 if (be_is_wol_supported(adapter))
3948 adapter->wol = true;
3949
7aeb2156
PR
3950 /* Must be a power of 2 or else MODULO will BUG_ON */
3951 adapter->be_get_temp_freq = 64;
3952
941a77d5
SK
3953 level = be_get_fw_log_level(adapter);
3954 adapter->msg_enable = level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0;
3955
2243e2e9 3956 return 0;
6b7c5b94
SP
3957}
3958
f67ef7ba 3959static int lancer_recover_func(struct be_adapter *adapter)
d8110f62
PR
3960{
3961 int status;
d8110f62 3962
f67ef7ba
PR
3963 status = lancer_test_and_set_rdy_state(adapter);
3964 if (status)
3965 goto err;
d8110f62 3966
f67ef7ba
PR
3967 if (netif_running(adapter->netdev))
3968 be_close(adapter->netdev);
d8110f62 3969
f67ef7ba
PR
3970 be_clear(adapter);
3971
3972 adapter->hw_error = false;
3973 adapter->fw_timeout = false;
3974
3975 status = be_setup(adapter);
3976 if (status)
3977 goto err;
d8110f62 3978
f67ef7ba
PR
3979 if (netif_running(adapter->netdev)) {
3980 status = be_open(adapter->netdev);
d8110f62
PR
3981 if (status)
3982 goto err;
f67ef7ba 3983 }
d8110f62 3984
f67ef7ba
PR
3985 dev_err(&adapter->pdev->dev,
3986 "Adapter SLIPORT recovery succeeded\n");
3987 return 0;
3988err:
67297ad8
PR
3989 if (adapter->eeh_error)
3990 dev_err(&adapter->pdev->dev,
3991 "Adapter SLIPORT recovery failed\n");
d8110f62 3992
f67ef7ba
PR
3993 return status;
3994}
3995
3996static void be_func_recovery_task(struct work_struct *work)
3997{
3998 struct be_adapter *adapter =
3999 container_of(work, struct be_adapter, func_recovery_work.work);
4000 int status;
d8110f62 4001
f67ef7ba 4002 be_detect_error(adapter);
d8110f62 4003
f67ef7ba 4004 if (adapter->hw_error && lancer_chip(adapter)) {
d8110f62 4005
f67ef7ba
PR
4006 if (adapter->eeh_error)
4007 goto out;
d8110f62 4008
f67ef7ba
PR
4009 rtnl_lock();
4010 netif_device_detach(adapter->netdev);
4011 rtnl_unlock();
d8110f62 4012
f67ef7ba 4013 status = lancer_recover_func(adapter);
d8110f62 4014
f67ef7ba
PR
4015 if (!status)
4016 netif_device_attach(adapter->netdev);
d8110f62 4017 }
f67ef7ba
PR
4018
4019out:
4020 schedule_delayed_work(&adapter->func_recovery_work,
4021 msecs_to_jiffies(1000));
d8110f62
PR
4022}
4023
4024static void be_worker(struct work_struct *work)
4025{
4026 struct be_adapter *adapter =
4027 container_of(work, struct be_adapter, work.work);
4028 struct be_rx_obj *rxo;
10ef9ab4 4029 struct be_eq_obj *eqo;
d8110f62
PR
4030 int i;
4031
d8110f62
PR
4032 /* when interrupts are not yet enabled, just reap any pending
4033 * mcc completions */
4034 if (!netif_running(adapter->netdev)) {
072a9c48 4035 local_bh_disable();
10ef9ab4 4036 be_process_mcc(adapter);
072a9c48 4037 local_bh_enable();
d8110f62
PR
4038 goto reschedule;
4039 }
4040
4041 if (!adapter->stats_cmd_sent) {
4042 if (lancer_chip(adapter))
4043 lancer_cmd_get_pport_stats(adapter,
4044 &adapter->stats_cmd);
4045 else
4046 be_cmd_get_stats(adapter, &adapter->stats_cmd);
4047 }
4048
7aeb2156
PR
4049 if (MODULO(adapter->work_counter, adapter->be_get_temp_freq) == 0)
4050 be_cmd_get_die_temperature(adapter);
4051
d8110f62 4052 for_all_rx_queues(adapter, rxo, i) {
d8110f62
PR
4053 if (rxo->rx_post_starved) {
4054 rxo->rx_post_starved = false;
4055 be_post_rx_frags(rxo, GFP_KERNEL);
4056 }
4057 }
4058
10ef9ab4
SP
4059 for_all_evt_queues(adapter, eqo, i)
4060 be_eqd_update(adapter, eqo);
4061
d8110f62
PR
4062reschedule:
4063 adapter->work_counter++;
4064 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
4065}
4066
39f1d94d
SP
4067static bool be_reset_required(struct be_adapter *adapter)
4068{
d79c0a20 4069 return be_find_vfs(adapter, ENABLED) > 0 ? false : true;
39f1d94d
SP
4070}
4071
d379142b
SP
4072static char *mc_name(struct be_adapter *adapter)
4073{
4074 if (adapter->function_mode & FLEX10_MODE)
4075 return "FLEX10";
4076 else if (adapter->function_mode & VNIC_MODE)
4077 return "vNIC";
4078 else if (adapter->function_mode & UMC_ENABLED)
4079 return "UMC";
4080 else
4081 return "";
4082}
4083
4084static inline char *func_name(struct be_adapter *adapter)
4085{
4086 return be_physfn(adapter) ? "PF" : "VF";
4087}
4088
1dd06ae8 4089static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id)
6b7c5b94
SP
4090{
4091 int status = 0;
4092 struct be_adapter *adapter;
4093 struct net_device *netdev;
b4e32a71 4094 char port_name;
6b7c5b94
SP
4095
4096 status = pci_enable_device(pdev);
4097 if (status)
4098 goto do_none;
4099
4100 status = pci_request_regions(pdev, DRV_NAME);
4101 if (status)
4102 goto disable_dev;
4103 pci_set_master(pdev);
4104
7f640062 4105 netdev = alloc_etherdev_mqs(sizeof(*adapter), MAX_TX_QS, MAX_RX_QS);
6b7c5b94
SP
4106 if (netdev == NULL) {
4107 status = -ENOMEM;
4108 goto rel_reg;
4109 }
4110 adapter = netdev_priv(netdev);
4111 adapter->pdev = pdev;
4112 pci_set_drvdata(pdev, adapter);
4113 adapter->netdev = netdev;
2243e2e9 4114 SET_NETDEV_DEV(netdev, &pdev->dev);
6b7c5b94 4115
2b7bcebf 4116 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
6b7c5b94
SP
4117 if (!status) {
4118 netdev->features |= NETIF_F_HIGHDMA;
4119 } else {
2b7bcebf 4120 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
6b7c5b94
SP
4121 if (status) {
4122 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
4123 goto free_netdev;
4124 }
4125 }
4126
d6b6d987
SP
4127 status = pci_enable_pcie_error_reporting(pdev);
4128 if (status)
4129 dev_err(&pdev->dev, "Could not use PCIe error reporting\n");
4130
6b7c5b94
SP
4131 status = be_ctrl_init(adapter);
4132 if (status)
39f1d94d 4133 goto free_netdev;
6b7c5b94 4134
2243e2e9 4135 /* sync up with fw's ready state */
ba343c77 4136 if (be_physfn(adapter)) {
bf99e50d 4137 status = be_fw_wait_ready(adapter);
ba343c77
SB
4138 if (status)
4139 goto ctrl_clean;
ba343c77 4140 }
6b7c5b94 4141
2243e2e9
SP
4142 /* tell fw we're ready to fire cmds */
4143 status = be_cmd_fw_init(adapter);
6b7c5b94 4144 if (status)
2243e2e9
SP
4145 goto ctrl_clean;
4146
39f1d94d
SP
4147 if (be_reset_required(adapter)) {
4148 status = be_cmd_reset_function(adapter);
4149 if (status)
4150 goto ctrl_clean;
4151 }
556ae191 4152
8cef7a78
SK
4153 /* Wait for interrupts to quiesce after an FLR */
4154 msleep(100);
4155
4156 /* Allow interrupts for other ULPs running on NIC function */
4157 be_intr_set(adapter, true);
10ef9ab4 4158
2243e2e9
SP
4159 status = be_stats_init(adapter);
4160 if (status)
4161 goto ctrl_clean;
4162
39f1d94d 4163 status = be_get_initial_config(adapter);
6b7c5b94
SP
4164 if (status)
4165 goto stats_clean;
6b7c5b94
SP
4166
4167 INIT_DELAYED_WORK(&adapter->work, be_worker);
f67ef7ba 4168 INIT_DELAYED_WORK(&adapter->func_recovery_work, be_func_recovery_task);
a54769f5 4169 adapter->rx_fc = adapter->tx_fc = true;
6b7c5b94 4170
5fb379ee
SP
4171 status = be_setup(adapter);
4172 if (status)
55f5c3c5 4173 goto stats_clean;
2243e2e9 4174
3abcdeda 4175 be_netdev_init(netdev);
6b7c5b94
SP
4176 status = register_netdev(netdev);
4177 if (status != 0)
5fb379ee 4178 goto unsetup;
6b7c5b94 4179
045508a8
PP
4180 be_roce_dev_add(adapter);
4181
f67ef7ba
PR
4182 schedule_delayed_work(&adapter->func_recovery_work,
4183 msecs_to_jiffies(1000));
b4e32a71
PR
4184
4185 be_cmd_query_port_name(adapter, &port_name);
4186
d379142b
SP
4187 dev_info(&pdev->dev, "%s: %s %s port %c\n", nic_name(pdev),
4188 func_name(adapter), mc_name(adapter), port_name);
34b1ef04 4189
6b7c5b94
SP
4190 return 0;
4191
5fb379ee
SP
4192unsetup:
4193 be_clear(adapter);
6b7c5b94
SP
4194stats_clean:
4195 be_stats_cleanup(adapter);
4196ctrl_clean:
4197 be_ctrl_cleanup(adapter);
f9449ab7 4198free_netdev:
fe6d2a38 4199 free_netdev(netdev);
8d56ff11 4200 pci_set_drvdata(pdev, NULL);
6b7c5b94
SP
4201rel_reg:
4202 pci_release_regions(pdev);
4203disable_dev:
4204 pci_disable_device(pdev);
4205do_none:
c4ca2374 4206 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
6b7c5b94
SP
4207 return status;
4208}
4209
4210static int be_suspend(struct pci_dev *pdev, pm_message_t state)
4211{
4212 struct be_adapter *adapter = pci_get_drvdata(pdev);
4213 struct net_device *netdev = adapter->netdev;
4214
71d8d1b5
AK
4215 if (adapter->wol)
4216 be_setup_wol(adapter, true);
4217
f67ef7ba
PR
4218 cancel_delayed_work_sync(&adapter->func_recovery_work);
4219
6b7c5b94
SP
4220 netif_device_detach(netdev);
4221 if (netif_running(netdev)) {
4222 rtnl_lock();
4223 be_close(netdev);
4224 rtnl_unlock();
4225 }
9b0365f1 4226 be_clear(adapter);
6b7c5b94
SP
4227
4228 pci_save_state(pdev);
4229 pci_disable_device(pdev);
4230 pci_set_power_state(pdev, pci_choose_state(pdev, state));
4231 return 0;
4232}
4233
4234static int be_resume(struct pci_dev *pdev)
4235{
4236 int status = 0;
4237 struct be_adapter *adapter = pci_get_drvdata(pdev);
4238 struct net_device *netdev = adapter->netdev;
4239
4240 netif_device_detach(netdev);
4241
4242 status = pci_enable_device(pdev);
4243 if (status)
4244 return status;
4245
4246 pci_set_power_state(pdev, 0);
4247 pci_restore_state(pdev);
4248
2243e2e9
SP
4249 /* tell fw we're ready to fire cmds */
4250 status = be_cmd_fw_init(adapter);
4251 if (status)
4252 return status;
4253
9b0365f1 4254 be_setup(adapter);
6b7c5b94
SP
4255 if (netif_running(netdev)) {
4256 rtnl_lock();
4257 be_open(netdev);
4258 rtnl_unlock();
4259 }
f67ef7ba
PR
4260
4261 schedule_delayed_work(&adapter->func_recovery_work,
4262 msecs_to_jiffies(1000));
6b7c5b94 4263 netif_device_attach(netdev);
71d8d1b5
AK
4264
4265 if (adapter->wol)
4266 be_setup_wol(adapter, false);
a4ca055f 4267
6b7c5b94
SP
4268 return 0;
4269}
4270
82456b03
SP
4271/*
4272 * An FLR will stop BE from DMAing any data.
4273 */
4274static void be_shutdown(struct pci_dev *pdev)
4275{
4276 struct be_adapter *adapter = pci_get_drvdata(pdev);
82456b03 4277
2d5d4154
AK
4278 if (!adapter)
4279 return;
82456b03 4280
0f4a6828 4281 cancel_delayed_work_sync(&adapter->work);
f67ef7ba 4282 cancel_delayed_work_sync(&adapter->func_recovery_work);
a4ca055f 4283
2d5d4154 4284 netif_device_detach(adapter->netdev);
82456b03 4285
57841869
AK
4286 be_cmd_reset_function(adapter);
4287
82456b03 4288 pci_disable_device(pdev);
82456b03
SP
4289}
4290
cf588477
SP
4291static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
4292 pci_channel_state_t state)
4293{
4294 struct be_adapter *adapter = pci_get_drvdata(pdev);
4295 struct net_device *netdev = adapter->netdev;
4296
4297 dev_err(&adapter->pdev->dev, "EEH error detected\n");
4298
f67ef7ba
PR
4299 adapter->eeh_error = true;
4300
4301 cancel_delayed_work_sync(&adapter->func_recovery_work);
cf588477 4302
f67ef7ba 4303 rtnl_lock();
cf588477 4304 netif_device_detach(netdev);
f67ef7ba 4305 rtnl_unlock();
cf588477
SP
4306
4307 if (netif_running(netdev)) {
4308 rtnl_lock();
4309 be_close(netdev);
4310 rtnl_unlock();
4311 }
4312 be_clear(adapter);
4313
4314 if (state == pci_channel_io_perm_failure)
4315 return PCI_ERS_RESULT_DISCONNECT;
4316
4317 pci_disable_device(pdev);
4318
eeb7fc7b
SK
4319 /* The error could cause the FW to trigger a flash debug dump.
4320 * Resetting the card while flash dump is in progress
c8a54163
PR
4321 * can cause it not to recover; wait for it to finish.
4322 * Wait only for first function as it is needed only once per
4323 * adapter.
eeb7fc7b 4324 */
c8a54163
PR
4325 if (pdev->devfn == 0)
4326 ssleep(30);
4327
cf588477
SP
4328 return PCI_ERS_RESULT_NEED_RESET;
4329}
4330
4331static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
4332{
4333 struct be_adapter *adapter = pci_get_drvdata(pdev);
4334 int status;
4335
4336 dev_info(&adapter->pdev->dev, "EEH reset\n");
f67ef7ba 4337 be_clear_all_error(adapter);
cf588477
SP
4338
4339 status = pci_enable_device(pdev);
4340 if (status)
4341 return PCI_ERS_RESULT_DISCONNECT;
4342
4343 pci_set_master(pdev);
4344 pci_set_power_state(pdev, 0);
4345 pci_restore_state(pdev);
4346
4347 /* Check if card is ok and fw is ready */
c5b3ad4c
SP
4348 dev_info(&adapter->pdev->dev,
4349 "Waiting for FW to be ready after EEH reset\n");
bf99e50d 4350 status = be_fw_wait_ready(adapter);
cf588477
SP
4351 if (status)
4352 return PCI_ERS_RESULT_DISCONNECT;
4353
d6b6d987 4354 pci_cleanup_aer_uncorrect_error_status(pdev);
cf588477
SP
4355 return PCI_ERS_RESULT_RECOVERED;
4356}
4357
4358static void be_eeh_resume(struct pci_dev *pdev)
4359{
4360 int status = 0;
4361 struct be_adapter *adapter = pci_get_drvdata(pdev);
4362 struct net_device *netdev = adapter->netdev;
4363
4364 dev_info(&adapter->pdev->dev, "EEH resume\n");
4365
4366 pci_save_state(pdev);
4367
4368 /* tell fw we're ready to fire cmds */
4369 status = be_cmd_fw_init(adapter);
4370 if (status)
4371 goto err;
4372
bf99e50d
PR
4373 status = be_cmd_reset_function(adapter);
4374 if (status)
4375 goto err;
4376
cf588477
SP
4377 status = be_setup(adapter);
4378 if (status)
4379 goto err;
4380
4381 if (netif_running(netdev)) {
4382 status = be_open(netdev);
4383 if (status)
4384 goto err;
4385 }
f67ef7ba
PR
4386
4387 schedule_delayed_work(&adapter->func_recovery_work,
4388 msecs_to_jiffies(1000));
cf588477
SP
4389 netif_device_attach(netdev);
4390 return;
4391err:
4392 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
cf588477
SP
4393}
4394
3646f0e5 4395static const struct pci_error_handlers be_eeh_handlers = {
cf588477
SP
4396 .error_detected = be_eeh_err_detected,
4397 .slot_reset = be_eeh_reset,
4398 .resume = be_eeh_resume,
4399};
4400
6b7c5b94
SP
4401static struct pci_driver be_driver = {
4402 .name = DRV_NAME,
4403 .id_table = be_dev_ids,
4404 .probe = be_probe,
4405 .remove = be_remove,
4406 .suspend = be_suspend,
cf588477 4407 .resume = be_resume,
82456b03 4408 .shutdown = be_shutdown,
cf588477 4409 .err_handler = &be_eeh_handlers
6b7c5b94
SP
4410};
4411
4412static int __init be_init_module(void)
4413{
8e95a202
JP
4414 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
4415 rx_frag_size != 2048) {
6b7c5b94
SP
4416 printk(KERN_WARNING DRV_NAME
4417 " : Module param rx_frag_size must be 2048/4096/8192."
4418 " Using 2048\n");
4419 rx_frag_size = 2048;
4420 }
6b7c5b94
SP
4421
4422 return pci_register_driver(&be_driver);
4423}
4424module_init(be_init_module);
4425
4426static void __exit be_exit_module(void)
4427{
4428 pci_unregister_driver(&be_driver);
4429}
4430module_exit(be_exit_module);