be2net: remove return statements for void functions
[linux-2.6-block.git] / drivers / net / ethernet / emulex / benet / be_main.c
CommitLineData
6b7c5b94 1/*
40263820 2 * Copyright (C) 2005 - 2014 Emulex
6b7c5b94
SP
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
d2145cde 11 * linux-drivers@emulex.com
6b7c5b94 12 *
d2145cde
AK
13 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
6b7c5b94
SP
16 */
17
70c71606 18#include <linux/prefetch.h>
9d9779e7 19#include <linux/module.h>
6b7c5b94 20#include "be.h"
8788fdc2 21#include "be_cmds.h"
65f71b8b 22#include <asm/div64.h>
d6b6d987 23#include <linux/aer.h>
a77dcb8c 24#include <linux/if_bridge.h>
6384a4d0 25#include <net/busy_poll.h>
c9c47142 26#include <net/vxlan.h>
6b7c5b94
SP
27
28MODULE_VERSION(DRV_VER);
29MODULE_DEVICE_TABLE(pci, be_dev_ids);
30MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
00d3d51e 31MODULE_AUTHOR("Emulex Corporation");
6b7c5b94
SP
32MODULE_LICENSE("GPL");
33
ba343c77 34static unsigned int num_vfs;
ba343c77 35module_param(num_vfs, uint, S_IRUGO);
ba343c77 36MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
6b7c5b94 37
11ac75ed
SP
38static ushort rx_frag_size = 2048;
39module_param(rx_frag_size, ushort, S_IRUGO);
40MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
41
9baa3c34 42static const struct pci_device_id be_dev_ids[] = {
c4ca2374 43 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
59fd5d87 44 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
c4ca2374
AK
45 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
46 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
fe6d2a38 47 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
12f4d0a8 48 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
ecedb6ae 49 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
76b73530 50 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID6)},
6b7c5b94
SP
51 { 0 }
52};
53MODULE_DEVICE_TABLE(pci, be_dev_ids);
7c185276 54/* UE Status Low CSR */
42c8b11e 55static const char * const ue_status_low_desc[] = {
7c185276
AK
56 "CEV",
57 "CTX",
58 "DBUF",
59 "ERX",
60 "Host",
61 "MPU",
62 "NDMA",
63 "PTC ",
64 "RDMA ",
65 "RXF ",
66 "RXIPS ",
67 "RXULP0 ",
68 "RXULP1 ",
69 "RXULP2 ",
70 "TIM ",
71 "TPOST ",
72 "TPRE ",
73 "TXIPS ",
74 "TXULP0 ",
75 "TXULP1 ",
76 "UC ",
77 "WDMA ",
78 "TXULP2 ",
79 "HOST1 ",
80 "P0_OB_LINK ",
81 "P1_OB_LINK ",
82 "HOST_GPIO ",
83 "MBOX ",
6bdf8f55
VV
84 "ERX2 ",
85 "SPARE ",
86 "JTAG ",
87 "MPU_INTPEND "
7c185276
AK
88};
89/* UE Status High CSR */
42c8b11e 90static const char * const ue_status_hi_desc[] = {
7c185276
AK
91 "LPCMEMHOST",
92 "MGMT_MAC",
93 "PCS0ONLINE",
94 "MPU_IRAM",
95 "PCS1ONLINE",
96 "PCTL0",
97 "PCTL1",
98 "PMEM",
99 "RR",
100 "TXPB",
101 "RXPP",
102 "XAUI",
103 "TXP",
104 "ARM",
105 "IPC",
106 "HOST2",
107 "HOST3",
108 "HOST4",
109 "HOST5",
110 "HOST6",
111 "HOST7",
6bdf8f55
VV
112 "ECRC",
113 "Poison TLP",
42c8b11e 114 "NETC",
6bdf8f55
VV
115 "PERIPH",
116 "LLTXULP",
117 "D2P",
118 "RCON",
119 "LDMA",
120 "LLTXP",
121 "LLTXPB",
7c185276
AK
122 "Unknown"
123};
6b7c5b94 124
752961a1 125
6b7c5b94
SP
126static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
127{
128 struct be_dma_mem *mem = &q->dma_mem;
1cfafab9 129 if (mem->va) {
2b7bcebf
IV
130 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
131 mem->dma);
1cfafab9
SP
132 mem->va = NULL;
133 }
6b7c5b94
SP
134}
135
136static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
748b539a 137 u16 len, u16 entry_size)
6b7c5b94
SP
138{
139 struct be_dma_mem *mem = &q->dma_mem;
140
141 memset(q, 0, sizeof(*q));
142 q->len = len;
143 q->entry_size = entry_size;
144 mem->size = len * entry_size;
ede23fa8
JP
145 mem->va = dma_zalloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
146 GFP_KERNEL);
6b7c5b94 147 if (!mem->va)
10ef9ab4 148 return -ENOMEM;
6b7c5b94
SP
149 return 0;
150}
151
68c45a2d 152static void be_reg_intr_set(struct be_adapter *adapter, bool enable)
6b7c5b94 153{
db3ea781 154 u32 reg, enabled;
5f0b849e 155
db3ea781 156 pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
748b539a 157 &reg);
db3ea781
SP
158 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
159
5f0b849e 160 if (!enabled && enable)
6b7c5b94 161 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
5f0b849e 162 else if (enabled && !enable)
6b7c5b94 163 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
5f0b849e 164 else
6b7c5b94 165 return;
5f0b849e 166
db3ea781 167 pci_write_config_dword(adapter->pdev,
748b539a 168 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
6b7c5b94
SP
169}
170
68c45a2d
SK
171static void be_intr_set(struct be_adapter *adapter, bool enable)
172{
173 int status = 0;
174
175 /* On lancer interrupts can't be controlled via this register */
176 if (lancer_chip(adapter))
177 return;
178
179 if (adapter->eeh_error)
180 return;
181
182 status = be_cmd_intr_set(adapter, enable);
183 if (status)
184 be_reg_intr_set(adapter, enable);
185}
186
8788fdc2 187static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
6b7c5b94
SP
188{
189 u32 val = 0;
190 val |= qid & DB_RQ_RING_ID_MASK;
191 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
f3eb62d2
SP
192
193 wmb();
8788fdc2 194 iowrite32(val, adapter->db + DB_RQ_OFFSET);
6b7c5b94
SP
195}
196
94d73aaa
VV
197static void be_txq_notify(struct be_adapter *adapter, struct be_tx_obj *txo,
198 u16 posted)
6b7c5b94
SP
199{
200 u32 val = 0;
94d73aaa 201 val |= txo->q.id & DB_TXULP_RING_ID_MASK;
6b7c5b94 202 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
f3eb62d2
SP
203
204 wmb();
94d73aaa 205 iowrite32(val, adapter->db + txo->db_offset);
6b7c5b94
SP
206}
207
8788fdc2 208static void be_eq_notify(struct be_adapter *adapter, u16 qid,
748b539a 209 bool arm, bool clear_int, u16 num_popped)
6b7c5b94
SP
210{
211 u32 val = 0;
212 val |= qid & DB_EQ_RING_ID_MASK;
748b539a 213 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) << DB_EQ_RING_ID_EXT_MASK_SHIFT);
cf588477 214
f67ef7ba 215 if (adapter->eeh_error)
cf588477
SP
216 return;
217
6b7c5b94
SP
218 if (arm)
219 val |= 1 << DB_EQ_REARM_SHIFT;
220 if (clear_int)
221 val |= 1 << DB_EQ_CLR_SHIFT;
222 val |= 1 << DB_EQ_EVNT_SHIFT;
223 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
8788fdc2 224 iowrite32(val, adapter->db + DB_EQ_OFFSET);
6b7c5b94
SP
225}
226
8788fdc2 227void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
6b7c5b94
SP
228{
229 u32 val = 0;
230 val |= qid & DB_CQ_RING_ID_MASK;
fe6d2a38
SP
231 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
232 DB_CQ_RING_ID_EXT_MASK_SHIFT);
cf588477 233
f67ef7ba 234 if (adapter->eeh_error)
cf588477
SP
235 return;
236
6b7c5b94
SP
237 if (arm)
238 val |= 1 << DB_CQ_REARM_SHIFT;
239 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
8788fdc2 240 iowrite32(val, adapter->db + DB_CQ_OFFSET);
6b7c5b94
SP
241}
242
6b7c5b94
SP
243static int be_mac_addr_set(struct net_device *netdev, void *p)
244{
245 struct be_adapter *adapter = netdev_priv(netdev);
5a712c13 246 struct device *dev = &adapter->pdev->dev;
6b7c5b94 247 struct sockaddr *addr = p;
5a712c13
SP
248 int status;
249 u8 mac[ETH_ALEN];
250 u32 old_pmac_id = adapter->pmac_id[0], curr_pmac_id = 0;
6b7c5b94 251
ca9e4988
AK
252 if (!is_valid_ether_addr(addr->sa_data))
253 return -EADDRNOTAVAIL;
254
ff32f8ab
VV
255 /* Proceed further only if, User provided MAC is different
256 * from active MAC
257 */
258 if (ether_addr_equal(addr->sa_data, netdev->dev_addr))
259 return 0;
260
5a712c13
SP
261 /* The PMAC_ADD cmd may fail if the VF doesn't have FILTMGMT
262 * privilege or if PF did not provision the new MAC address.
263 * On BE3, this cmd will always fail if the VF doesn't have the
264 * FILTMGMT privilege. This failure is OK, only if the PF programmed
265 * the MAC for the VF.
704e4c88 266 */
5a712c13
SP
267 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
268 adapter->if_handle, &adapter->pmac_id[0], 0);
269 if (!status) {
270 curr_pmac_id = adapter->pmac_id[0];
271
272 /* Delete the old programmed MAC. This call may fail if the
273 * old MAC was already deleted by the PF driver.
274 */
275 if (adapter->pmac_id[0] != old_pmac_id)
276 be_cmd_pmac_del(adapter, adapter->if_handle,
277 old_pmac_id, 0);
704e4c88
PR
278 }
279
5a712c13
SP
280 /* Decide if the new MAC is successfully activated only after
281 * querying the FW
704e4c88 282 */
b188f090
SR
283 status = be_cmd_get_active_mac(adapter, curr_pmac_id, mac,
284 adapter->if_handle, true, 0);
a65027e4 285 if (status)
e3a7ae2c 286 goto err;
6b7c5b94 287
5a712c13
SP
288 /* The MAC change did not happen, either due to lack of privilege
289 * or PF didn't pre-provision.
290 */
61d23e9f 291 if (!ether_addr_equal(addr->sa_data, mac)) {
5a712c13
SP
292 status = -EPERM;
293 goto err;
294 }
295
e3a7ae2c 296 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
5a712c13 297 dev_info(dev, "MAC address changed to %pM\n", mac);
e3a7ae2c
SK
298 return 0;
299err:
5a712c13 300 dev_warn(dev, "MAC address change to %pM failed\n", addr->sa_data);
6b7c5b94
SP
301 return status;
302}
303
ca34fe38
SP
304/* BE2 supports only v0 cmd */
305static void *hw_stats_from_cmd(struct be_adapter *adapter)
306{
307 if (BE2_chip(adapter)) {
308 struct be_cmd_resp_get_stats_v0 *cmd = adapter->stats_cmd.va;
309
310 return &cmd->hw_stats;
61000861 311 } else if (BE3_chip(adapter)) {
ca34fe38
SP
312 struct be_cmd_resp_get_stats_v1 *cmd = adapter->stats_cmd.va;
313
61000861
AK
314 return &cmd->hw_stats;
315 } else {
316 struct be_cmd_resp_get_stats_v2 *cmd = adapter->stats_cmd.va;
317
ca34fe38
SP
318 return &cmd->hw_stats;
319 }
320}
321
322/* BE2 supports only v0 cmd */
323static void *be_erx_stats_from_cmd(struct be_adapter *adapter)
324{
325 if (BE2_chip(adapter)) {
326 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
327
328 return &hw_stats->erx;
61000861 329 } else if (BE3_chip(adapter)) {
ca34fe38
SP
330 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
331
61000861
AK
332 return &hw_stats->erx;
333 } else {
334 struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
335
ca34fe38
SP
336 return &hw_stats->erx;
337 }
338}
339
340static void populate_be_v0_stats(struct be_adapter *adapter)
89a88ab8 341{
ac124ff9
SP
342 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
343 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
344 struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
89a88ab8 345 struct be_port_rxf_stats_v0 *port_stats =
ac124ff9
SP
346 &rxf_stats->port[adapter->port_num];
347 struct be_drv_stats *drvs = &adapter->drv_stats;
89a88ab8 348
ac124ff9 349 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
89a88ab8
AK
350 drvs->rx_pause_frames = port_stats->rx_pause_frames;
351 drvs->rx_crc_errors = port_stats->rx_crc_errors;
352 drvs->rx_control_frames = port_stats->rx_control_frames;
353 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
354 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
355 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
356 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
357 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
358 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
359 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
360 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
361 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
362 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
363 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
ac124ff9 364 drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
89a88ab8
AK
365 drvs->rx_dropped_header_too_small =
366 port_stats->rx_dropped_header_too_small;
18fb06a1
SR
367 drvs->rx_address_filtered =
368 port_stats->rx_address_filtered +
369 port_stats->rx_vlan_filtered;
89a88ab8
AK
370 drvs->rx_alignment_symbol_errors =
371 port_stats->rx_alignment_symbol_errors;
372
373 drvs->tx_pauseframes = port_stats->tx_pauseframes;
374 drvs->tx_controlframes = port_stats->tx_controlframes;
375
376 if (adapter->port_num)
ac124ff9 377 drvs->jabber_events = rxf_stats->port1_jabber_events;
89a88ab8 378 else
ac124ff9 379 drvs->jabber_events = rxf_stats->port0_jabber_events;
89a88ab8 380 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
89a88ab8 381 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
89a88ab8
AK
382 drvs->forwarded_packets = rxf_stats->forwarded_packets;
383 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
ac124ff9
SP
384 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
385 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
89a88ab8
AK
386 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
387}
388
ca34fe38 389static void populate_be_v1_stats(struct be_adapter *adapter)
89a88ab8 390{
ac124ff9
SP
391 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
392 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
393 struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
89a88ab8 394 struct be_port_rxf_stats_v1 *port_stats =
ac124ff9
SP
395 &rxf_stats->port[adapter->port_num];
396 struct be_drv_stats *drvs = &adapter->drv_stats;
89a88ab8 397
ac124ff9 398 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
02fe7027
AK
399 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
400 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
89a88ab8
AK
401 drvs->rx_pause_frames = port_stats->rx_pause_frames;
402 drvs->rx_crc_errors = port_stats->rx_crc_errors;
403 drvs->rx_control_frames = port_stats->rx_control_frames;
404 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
405 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
406 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
407 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
408 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
409 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
410 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
411 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
412 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
413 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
414 drvs->rx_dropped_header_too_small =
415 port_stats->rx_dropped_header_too_small;
416 drvs->rx_input_fifo_overflow_drop =
417 port_stats->rx_input_fifo_overflow_drop;
18fb06a1 418 drvs->rx_address_filtered = port_stats->rx_address_filtered;
89a88ab8
AK
419 drvs->rx_alignment_symbol_errors =
420 port_stats->rx_alignment_symbol_errors;
ac124ff9 421 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
89a88ab8
AK
422 drvs->tx_pauseframes = port_stats->tx_pauseframes;
423 drvs->tx_controlframes = port_stats->tx_controlframes;
b5adffc4 424 drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
89a88ab8
AK
425 drvs->jabber_events = port_stats->jabber_events;
426 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
89a88ab8 427 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
89a88ab8
AK
428 drvs->forwarded_packets = rxf_stats->forwarded_packets;
429 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
ac124ff9
SP
430 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
431 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
89a88ab8
AK
432 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
433}
434
61000861
AK
435static void populate_be_v2_stats(struct be_adapter *adapter)
436{
437 struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
438 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
439 struct be_rxf_stats_v2 *rxf_stats = &hw_stats->rxf;
440 struct be_port_rxf_stats_v2 *port_stats =
441 &rxf_stats->port[adapter->port_num];
442 struct be_drv_stats *drvs = &adapter->drv_stats;
443
444 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
445 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
446 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
447 drvs->rx_pause_frames = port_stats->rx_pause_frames;
448 drvs->rx_crc_errors = port_stats->rx_crc_errors;
449 drvs->rx_control_frames = port_stats->rx_control_frames;
450 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
451 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
452 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
453 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
454 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
455 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
456 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
457 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
458 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
459 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
460 drvs->rx_dropped_header_too_small =
461 port_stats->rx_dropped_header_too_small;
462 drvs->rx_input_fifo_overflow_drop =
463 port_stats->rx_input_fifo_overflow_drop;
464 drvs->rx_address_filtered = port_stats->rx_address_filtered;
465 drvs->rx_alignment_symbol_errors =
466 port_stats->rx_alignment_symbol_errors;
467 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
468 drvs->tx_pauseframes = port_stats->tx_pauseframes;
469 drvs->tx_controlframes = port_stats->tx_controlframes;
470 drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
471 drvs->jabber_events = port_stats->jabber_events;
472 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
473 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
474 drvs->forwarded_packets = rxf_stats->forwarded_packets;
475 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
476 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
477 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
478 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
748b539a 479 if (be_roce_supported(adapter)) {
461ae379
AK
480 drvs->rx_roce_bytes_lsd = port_stats->roce_bytes_received_lsd;
481 drvs->rx_roce_bytes_msd = port_stats->roce_bytes_received_msd;
482 drvs->rx_roce_frames = port_stats->roce_frames_received;
483 drvs->roce_drops_crc = port_stats->roce_drops_crc;
484 drvs->roce_drops_payload_len =
485 port_stats->roce_drops_payload_len;
486 }
61000861
AK
487}
488
005d5696
SX
489static void populate_lancer_stats(struct be_adapter *adapter)
490{
89a88ab8 491
005d5696 492 struct be_drv_stats *drvs = &adapter->drv_stats;
748b539a 493 struct lancer_pport_stats *pport_stats = pport_stats_from_cmd(adapter);
ac124ff9
SP
494
495 be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
496 drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
497 drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
498 drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
005d5696 499 drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
ac124ff9 500 drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
005d5696
SX
501 drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
502 drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
503 drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
504 drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
505 drvs->rx_dropped_tcp_length =
506 pport_stats->rx_dropped_invalid_tcp_length;
507 drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
508 drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
509 drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
510 drvs->rx_dropped_header_too_small =
511 pport_stats->rx_dropped_header_too_small;
512 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
18fb06a1
SR
513 drvs->rx_address_filtered =
514 pport_stats->rx_address_filtered +
515 pport_stats->rx_vlan_filtered;
ac124ff9 516 drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
005d5696 517 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
ac124ff9
SP
518 drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
519 drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
005d5696 520 drvs->jabber_events = pport_stats->rx_jabbers;
ac124ff9
SP
521 drvs->forwarded_packets = pport_stats->num_forwards_lo;
522 drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
005d5696 523 drvs->rx_drops_too_many_frags =
ac124ff9 524 pport_stats->rx_drops_too_many_frags_lo;
005d5696 525}
89a88ab8 526
09c1c68f
SP
527static void accumulate_16bit_val(u32 *acc, u16 val)
528{
529#define lo(x) (x & 0xFFFF)
530#define hi(x) (x & 0xFFFF0000)
531 bool wrapped = val < lo(*acc);
532 u32 newacc = hi(*acc) + val;
533
534 if (wrapped)
535 newacc += 65536;
536 ACCESS_ONCE(*acc) = newacc;
537}
538
4188e7df 539static void populate_erx_stats(struct be_adapter *adapter,
748b539a 540 struct be_rx_obj *rxo, u32 erx_stat)
a6c578ef
AK
541{
542 if (!BEx_chip(adapter))
543 rx_stats(rxo)->rx_drops_no_frags = erx_stat;
544 else
545 /* below erx HW counter can actually wrap around after
546 * 65535. Driver accumulates a 32-bit value
547 */
548 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
549 (u16)erx_stat);
550}
551
89a88ab8
AK
552void be_parse_stats(struct be_adapter *adapter)
553{
61000861 554 struct be_erx_stats_v2 *erx = be_erx_stats_from_cmd(adapter);
ac124ff9
SP
555 struct be_rx_obj *rxo;
556 int i;
a6c578ef 557 u32 erx_stat;
ac124ff9 558
ca34fe38
SP
559 if (lancer_chip(adapter)) {
560 populate_lancer_stats(adapter);
005d5696 561 } else {
ca34fe38
SP
562 if (BE2_chip(adapter))
563 populate_be_v0_stats(adapter);
61000861
AK
564 else if (BE3_chip(adapter))
565 /* for BE3 */
ca34fe38 566 populate_be_v1_stats(adapter);
61000861
AK
567 else
568 populate_be_v2_stats(adapter);
d51ebd33 569
61000861 570 /* erx_v2 is longer than v0, v1. use v2 for v0, v1 access */
ca34fe38 571 for_all_rx_queues(adapter, rxo, i) {
a6c578ef
AK
572 erx_stat = erx->rx_drops_no_fragments[rxo->q.id];
573 populate_erx_stats(adapter, rxo, erx_stat);
ca34fe38 574 }
09c1c68f 575 }
89a88ab8
AK
576}
577
ab1594e9 578static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
748b539a 579 struct rtnl_link_stats64 *stats)
6b7c5b94 580{
ab1594e9 581 struct be_adapter *adapter = netdev_priv(netdev);
89a88ab8 582 struct be_drv_stats *drvs = &adapter->drv_stats;
3abcdeda 583 struct be_rx_obj *rxo;
3c8def97 584 struct be_tx_obj *txo;
ab1594e9
SP
585 u64 pkts, bytes;
586 unsigned int start;
3abcdeda 587 int i;
6b7c5b94 588
3abcdeda 589 for_all_rx_queues(adapter, rxo, i) {
ab1594e9
SP
590 const struct be_rx_stats *rx_stats = rx_stats(rxo);
591 do {
57a7744e 592 start = u64_stats_fetch_begin_irq(&rx_stats->sync);
ab1594e9
SP
593 pkts = rx_stats(rxo)->rx_pkts;
594 bytes = rx_stats(rxo)->rx_bytes;
57a7744e 595 } while (u64_stats_fetch_retry_irq(&rx_stats->sync, start));
ab1594e9
SP
596 stats->rx_packets += pkts;
597 stats->rx_bytes += bytes;
598 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
599 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
600 rx_stats(rxo)->rx_drops_no_frags;
3abcdeda
SP
601 }
602
3c8def97 603 for_all_tx_queues(adapter, txo, i) {
ab1594e9
SP
604 const struct be_tx_stats *tx_stats = tx_stats(txo);
605 do {
57a7744e 606 start = u64_stats_fetch_begin_irq(&tx_stats->sync);
ab1594e9
SP
607 pkts = tx_stats(txo)->tx_pkts;
608 bytes = tx_stats(txo)->tx_bytes;
57a7744e 609 } while (u64_stats_fetch_retry_irq(&tx_stats->sync, start));
ab1594e9
SP
610 stats->tx_packets += pkts;
611 stats->tx_bytes += bytes;
3c8def97 612 }
6b7c5b94
SP
613
614 /* bad pkts received */
ab1594e9 615 stats->rx_errors = drvs->rx_crc_errors +
89a88ab8
AK
616 drvs->rx_alignment_symbol_errors +
617 drvs->rx_in_range_errors +
618 drvs->rx_out_range_errors +
619 drvs->rx_frame_too_long +
620 drvs->rx_dropped_too_small +
621 drvs->rx_dropped_too_short +
622 drvs->rx_dropped_header_too_small +
623 drvs->rx_dropped_tcp_length +
ab1594e9 624 drvs->rx_dropped_runt;
68110868 625
6b7c5b94 626 /* detailed rx errors */
ab1594e9 627 stats->rx_length_errors = drvs->rx_in_range_errors +
89a88ab8
AK
628 drvs->rx_out_range_errors +
629 drvs->rx_frame_too_long;
68110868 630
ab1594e9 631 stats->rx_crc_errors = drvs->rx_crc_errors;
6b7c5b94
SP
632
633 /* frame alignment errors */
ab1594e9 634 stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
68110868 635
6b7c5b94
SP
636 /* receiver fifo overrun */
637 /* drops_no_pbuf is no per i/f, it's per BE card */
ab1594e9 638 stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
89a88ab8
AK
639 drvs->rx_input_fifo_overflow_drop +
640 drvs->rx_drops_no_pbuf;
ab1594e9 641 return stats;
6b7c5b94
SP
642}
643
b236916a 644void be_link_status_update(struct be_adapter *adapter, u8 link_status)
6b7c5b94 645{
6b7c5b94
SP
646 struct net_device *netdev = adapter->netdev;
647
b236916a 648 if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
ea172a01 649 netif_carrier_off(netdev);
b236916a 650 adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
6b7c5b94 651 }
b236916a 652
bdce2ad7 653 if (link_status)
b236916a
AK
654 netif_carrier_on(netdev);
655 else
656 netif_carrier_off(netdev);
6b7c5b94
SP
657}
658
3c8def97 659static void be_tx_stats_update(struct be_tx_obj *txo,
748b539a
SP
660 u32 wrb_cnt, u32 copied, u32 gso_segs,
661 bool stopped)
6b7c5b94 662{
3c8def97
SP
663 struct be_tx_stats *stats = tx_stats(txo);
664
ab1594e9 665 u64_stats_update_begin(&stats->sync);
ac124ff9
SP
666 stats->tx_reqs++;
667 stats->tx_wrbs += wrb_cnt;
668 stats->tx_bytes += copied;
669 stats->tx_pkts += (gso_segs ? gso_segs : 1);
6b7c5b94 670 if (stopped)
ac124ff9 671 stats->tx_stops++;
ab1594e9 672 u64_stats_update_end(&stats->sync);
6b7c5b94
SP
673}
674
675/* Determine number of WRB entries needed to xmit data in an skb */
fe6d2a38 676static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
748b539a 677 bool *dummy)
6b7c5b94 678{
ebc8d2ab
DM
679 int cnt = (skb->len > skb->data_len);
680
681 cnt += skb_shinfo(skb)->nr_frags;
682
6b7c5b94
SP
683 /* to account for hdr wrb */
684 cnt++;
fe6d2a38
SP
685 if (lancer_chip(adapter) || !(cnt & 1)) {
686 *dummy = false;
687 } else {
6b7c5b94
SP
688 /* add a dummy to make it an even num */
689 cnt++;
690 *dummy = true;
fe6d2a38 691 }
6b7c5b94
SP
692 BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
693 return cnt;
694}
695
696static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
697{
698 wrb->frag_pa_hi = upper_32_bits(addr);
699 wrb->frag_pa_lo = addr & 0xFFFFFFFF;
700 wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
89b1f496 701 wrb->rsvd0 = 0;
6b7c5b94
SP
702}
703
1ded132d 704static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
748b539a 705 struct sk_buff *skb)
1ded132d
AK
706{
707 u8 vlan_prio;
708 u16 vlan_tag;
709
710 vlan_tag = vlan_tx_tag_get(skb);
711 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
712 /* If vlan priority provided by OS is NOT in available bmap */
713 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
714 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
715 adapter->recommended_prio;
716
717 return vlan_tag;
718}
719
c9c47142
SP
720/* Used only for IP tunnel packets */
721static u16 skb_inner_ip_proto(struct sk_buff *skb)
722{
723 return (inner_ip_hdr(skb)->version == 4) ?
724 inner_ip_hdr(skb)->protocol : inner_ipv6_hdr(skb)->nexthdr;
725}
726
727static u16 skb_ip_proto(struct sk_buff *skb)
728{
729 return (ip_hdr(skb)->version == 4) ?
730 ip_hdr(skb)->protocol : ipv6_hdr(skb)->nexthdr;
731}
732
cc4ce020 733static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
748b539a
SP
734 struct sk_buff *skb, u32 wrb_cnt, u32 len,
735 bool skip_hw_vlan)
6b7c5b94 736{
c9c47142 737 u16 vlan_tag, proto;
cc4ce020 738
6b7c5b94
SP
739 memset(hdr, 0, sizeof(*hdr));
740
c3c18bc1 741 SET_TX_WRB_HDR_BITS(crc, hdr, 1);
6b7c5b94 742
49e4b847 743 if (skb_is_gso(skb)) {
c3c18bc1
SP
744 SET_TX_WRB_HDR_BITS(lso, hdr, 1);
745 SET_TX_WRB_HDR_BITS(lso_mss, hdr, skb_shinfo(skb)->gso_size);
fe6d2a38 746 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
c3c18bc1 747 SET_TX_WRB_HDR_BITS(lso6, hdr, 1);
6b7c5b94 748 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
c9c47142 749 if (skb->encapsulation) {
c3c18bc1 750 SET_TX_WRB_HDR_BITS(ipcs, hdr, 1);
c9c47142
SP
751 proto = skb_inner_ip_proto(skb);
752 } else {
753 proto = skb_ip_proto(skb);
754 }
755 if (proto == IPPROTO_TCP)
c3c18bc1 756 SET_TX_WRB_HDR_BITS(tcpcs, hdr, 1);
c9c47142 757 else if (proto == IPPROTO_UDP)
c3c18bc1 758 SET_TX_WRB_HDR_BITS(udpcs, hdr, 1);
6b7c5b94
SP
759 }
760
4c5102f9 761 if (vlan_tx_tag_present(skb)) {
c3c18bc1 762 SET_TX_WRB_HDR_BITS(vlan, hdr, 1);
1ded132d 763 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
c3c18bc1 764 SET_TX_WRB_HDR_BITS(vlan_tag, hdr, vlan_tag);
6b7c5b94
SP
765 }
766
bc0c3405 767 /* To skip HW VLAN tagging: evt = 1, compl = 0 */
c3c18bc1
SP
768 SET_TX_WRB_HDR_BITS(complete, hdr, !skip_hw_vlan);
769 SET_TX_WRB_HDR_BITS(event, hdr, 1);
770 SET_TX_WRB_HDR_BITS(num_wrb, hdr, wrb_cnt);
771 SET_TX_WRB_HDR_BITS(len, hdr, len);
6b7c5b94
SP
772}
773
2b7bcebf 774static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
748b539a 775 bool unmap_single)
7101e111
SP
776{
777 dma_addr_t dma;
778
779 be_dws_le_to_cpu(wrb, sizeof(*wrb));
780
781 dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
b681ee77 782 if (wrb->frag_len) {
7101e111 783 if (unmap_single)
2b7bcebf
IV
784 dma_unmap_single(dev, dma, wrb->frag_len,
785 DMA_TO_DEVICE);
7101e111 786 else
2b7bcebf 787 dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
7101e111
SP
788 }
789}
6b7c5b94 790
3c8def97 791static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
748b539a
SP
792 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb,
793 bool skip_hw_vlan)
6b7c5b94 794{
7101e111
SP
795 dma_addr_t busaddr;
796 int i, copied = 0;
2b7bcebf 797 struct device *dev = &adapter->pdev->dev;
6b7c5b94 798 struct sk_buff *first_skb = skb;
6b7c5b94
SP
799 struct be_eth_wrb *wrb;
800 struct be_eth_hdr_wrb *hdr;
7101e111
SP
801 bool map_single = false;
802 u16 map_head;
6b7c5b94 803
6b7c5b94
SP
804 hdr = queue_head_node(txq);
805 queue_head_inc(txq);
7101e111 806 map_head = txq->head;
6b7c5b94 807
ebc8d2ab 808 if (skb->len > skb->data_len) {
e743d313 809 int len = skb_headlen(skb);
2b7bcebf
IV
810 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
811 if (dma_mapping_error(dev, busaddr))
7101e111
SP
812 goto dma_err;
813 map_single = true;
ebc8d2ab
DM
814 wrb = queue_head_node(txq);
815 wrb_fill(wrb, busaddr, len);
816 be_dws_cpu_to_le(wrb, sizeof(*wrb));
817 queue_head_inc(txq);
818 copied += len;
819 }
6b7c5b94 820
ebc8d2ab 821 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
748b539a 822 const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
b061b39e 823 busaddr = skb_frag_dma_map(dev, frag, 0,
9e903e08 824 skb_frag_size(frag), DMA_TO_DEVICE);
2b7bcebf 825 if (dma_mapping_error(dev, busaddr))
7101e111 826 goto dma_err;
ebc8d2ab 827 wrb = queue_head_node(txq);
9e903e08 828 wrb_fill(wrb, busaddr, skb_frag_size(frag));
ebc8d2ab
DM
829 be_dws_cpu_to_le(wrb, sizeof(*wrb));
830 queue_head_inc(txq);
9e903e08 831 copied += skb_frag_size(frag);
6b7c5b94
SP
832 }
833
834 if (dummy_wrb) {
835 wrb = queue_head_node(txq);
836 wrb_fill(wrb, 0, 0);
837 be_dws_cpu_to_le(wrb, sizeof(*wrb));
838 queue_head_inc(txq);
839 }
840
bc0c3405 841 wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied, skip_hw_vlan);
6b7c5b94
SP
842 be_dws_cpu_to_le(hdr, sizeof(*hdr));
843
844 return copied;
7101e111
SP
845dma_err:
846 txq->head = map_head;
847 while (copied) {
848 wrb = queue_head_node(txq);
2b7bcebf 849 unmap_tx_frag(dev, wrb, map_single);
7101e111
SP
850 map_single = false;
851 copied -= wrb->frag_len;
d3de1540 852 adapter->drv_stats.dma_map_errors++;
7101e111
SP
853 queue_head_inc(txq);
854 }
855 return 0;
6b7c5b94
SP
856}
857
93040ae5 858static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
bc0c3405
AK
859 struct sk_buff *skb,
860 bool *skip_hw_vlan)
93040ae5
SK
861{
862 u16 vlan_tag = 0;
863
864 skb = skb_share_check(skb, GFP_ATOMIC);
865 if (unlikely(!skb))
866 return skb;
867
efee8e87 868 if (vlan_tx_tag_present(skb))
93040ae5 869 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
52fe29e4
SB
870
871 if (qnq_async_evt_rcvd(adapter) && adapter->pvid) {
872 if (!vlan_tag)
873 vlan_tag = adapter->pvid;
874 /* f/w workaround to set skip_hw_vlan = 1, informs the F/W to
875 * skip VLAN insertion
876 */
877 if (skip_hw_vlan)
878 *skip_hw_vlan = true;
879 }
bc0c3405
AK
880
881 if (vlan_tag) {
58717686 882 skb = __vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
bc0c3405
AK
883 if (unlikely(!skb))
884 return skb;
bc0c3405
AK
885 skb->vlan_tci = 0;
886 }
887
888 /* Insert the outer VLAN, if any */
889 if (adapter->qnq_vid) {
890 vlan_tag = adapter->qnq_vid;
58717686 891 skb = __vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
bc0c3405
AK
892 if (unlikely(!skb))
893 return skb;
894 if (skip_hw_vlan)
895 *skip_hw_vlan = true;
896 }
897
93040ae5
SK
898 return skb;
899}
900
bc0c3405
AK
901static bool be_ipv6_exthdr_check(struct sk_buff *skb)
902{
903 struct ethhdr *eh = (struct ethhdr *)skb->data;
904 u16 offset = ETH_HLEN;
905
906 if (eh->h_proto == htons(ETH_P_IPV6)) {
907 struct ipv6hdr *ip6h = (struct ipv6hdr *)(skb->data + offset);
908
909 offset += sizeof(struct ipv6hdr);
910 if (ip6h->nexthdr != NEXTHDR_TCP &&
911 ip6h->nexthdr != NEXTHDR_UDP) {
912 struct ipv6_opt_hdr *ehdr =
913 (struct ipv6_opt_hdr *) (skb->data + offset);
914
915 /* offending pkt: 2nd byte following IPv6 hdr is 0xff */
916 if (ehdr->hdrlen == 0xff)
917 return true;
918 }
919 }
920 return false;
921}
922
923static int be_vlan_tag_tx_chk(struct be_adapter *adapter, struct sk_buff *skb)
924{
925 return vlan_tx_tag_present(skb) || adapter->pvid || adapter->qnq_vid;
926}
927
748b539a 928static int be_ipv6_tx_stall_chk(struct be_adapter *adapter, struct sk_buff *skb)
bc0c3405 929{
ee9c799c 930 return BE3_chip(adapter) && be_ipv6_exthdr_check(skb);
bc0c3405
AK
931}
932
ec495fac
VV
933static struct sk_buff *be_lancer_xmit_workarounds(struct be_adapter *adapter,
934 struct sk_buff *skb,
935 bool *skip_hw_vlan)
6b7c5b94 936{
d2cb6ce7 937 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
ee9c799c
SP
938 unsigned int eth_hdr_len;
939 struct iphdr *ip;
93040ae5 940
1297f9db
AK
941 /* For padded packets, BE HW modifies tot_len field in IP header
942 * incorrecly when VLAN tag is inserted by HW.
3904dcc4 943 * For padded packets, Lancer computes incorrect checksum.
1ded132d 944 */
ee9c799c
SP
945 eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
946 VLAN_ETH_HLEN : ETH_HLEN;
3904dcc4
SK
947 if (skb->len <= 60 &&
948 (lancer_chip(adapter) || vlan_tx_tag_present(skb)) &&
ee9c799c 949 is_ipv4_pkt(skb)) {
93040ae5
SK
950 ip = (struct iphdr *)ip_hdr(skb);
951 pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len));
952 }
1ded132d 953
d2cb6ce7 954 /* If vlan tag is already inlined in the packet, skip HW VLAN
f93f160b 955 * tagging in pvid-tagging mode
d2cb6ce7 956 */
f93f160b 957 if (be_pvid_tagging_enabled(adapter) &&
d2cb6ce7 958 veh->h_vlan_proto == htons(ETH_P_8021Q))
748b539a 959 *skip_hw_vlan = true;
d2cb6ce7 960
93040ae5
SK
961 /* HW has a bug wherein it will calculate CSUM for VLAN
962 * pkts even though it is disabled.
963 * Manually insert VLAN in pkt.
964 */
965 if (skb->ip_summed != CHECKSUM_PARTIAL &&
ee9c799c
SP
966 vlan_tx_tag_present(skb)) {
967 skb = be_insert_vlan_in_pkt(adapter, skb, skip_hw_vlan);
bc0c3405 968 if (unlikely(!skb))
c9128951 969 goto err;
bc0c3405
AK
970 }
971
972 /* HW may lockup when VLAN HW tagging is requested on
973 * certain ipv6 packets. Drop such pkts if the HW workaround to
974 * skip HW tagging is not enabled by FW.
975 */
976 if (unlikely(be_ipv6_tx_stall_chk(adapter, skb) &&
ee9c799c
SP
977 (adapter->pvid || adapter->qnq_vid) &&
978 !qnq_async_evt_rcvd(adapter)))
bc0c3405
AK
979 goto tx_drop;
980
981 /* Manual VLAN tag insertion to prevent:
982 * ASIC lockup when the ASIC inserts VLAN tag into
983 * certain ipv6 packets. Insert VLAN tags in driver,
984 * and set event, completion, vlan bits accordingly
985 * in the Tx WRB.
986 */
987 if (be_ipv6_tx_stall_chk(adapter, skb) &&
988 be_vlan_tag_tx_chk(adapter, skb)) {
ee9c799c 989 skb = be_insert_vlan_in_pkt(adapter, skb, skip_hw_vlan);
1ded132d 990 if (unlikely(!skb))
c9128951 991 goto err;
1ded132d
AK
992 }
993
ee9c799c
SP
994 return skb;
995tx_drop:
996 dev_kfree_skb_any(skb);
c9128951 997err:
ee9c799c
SP
998 return NULL;
999}
1000
ec495fac
VV
1001static struct sk_buff *be_xmit_workarounds(struct be_adapter *adapter,
1002 struct sk_buff *skb,
1003 bool *skip_hw_vlan)
1004{
1005 /* Lancer, SH-R ASICs have a bug wherein Packets that are 32 bytes or
1006 * less may cause a transmit stall on that port. So the work-around is
1007 * to pad short packets (<= 32 bytes) to a 36-byte length.
1008 */
1009 if (unlikely(!BEx_chip(adapter) && skb->len <= 32)) {
1010 if (skb_padto(skb, 36))
1011 return NULL;
1012 skb->len = 36;
1013 }
1014
1015 if (BEx_chip(adapter) || lancer_chip(adapter)) {
1016 skb = be_lancer_xmit_workarounds(adapter, skb, skip_hw_vlan);
1017 if (!skb)
1018 return NULL;
1019 }
1020
1021 return skb;
1022}
1023
ee9c799c
SP
1024static netdev_tx_t be_xmit(struct sk_buff *skb, struct net_device *netdev)
1025{
1026 struct be_adapter *adapter = netdev_priv(netdev);
1027 struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
1028 struct be_queue_info *txq = &txo->q;
1029 bool dummy_wrb, stopped = false;
1030 u32 wrb_cnt = 0, copied = 0;
1031 bool skip_hw_vlan = false;
1032 u32 start = txq->head;
1033
1034 skb = be_xmit_workarounds(adapter, skb, &skip_hw_vlan);
bc617526
SP
1035 if (!skb) {
1036 tx_stats(txo)->tx_drv_drops++;
ee9c799c 1037 return NETDEV_TX_OK;
bc617526 1038 }
ee9c799c 1039
fe6d2a38 1040 wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
6b7c5b94 1041
bc0c3405
AK
1042 copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb,
1043 skip_hw_vlan);
c190e3c8 1044 if (copied) {
cd8f76c0
ED
1045 int gso_segs = skb_shinfo(skb)->gso_segs;
1046
c190e3c8 1047 /* record the sent skb in the sent_skb table */
3c8def97
SP
1048 BUG_ON(txo->sent_skb_list[start]);
1049 txo->sent_skb_list[start] = skb;
c190e3c8
AK
1050
1051 /* Ensure txq has space for the next skb; Else stop the queue
1052 * *BEFORE* ringing the tx doorbell, so that we serialze the
1053 * tx compls of the current transmit which'll wake up the queue
1054 */
7101e111 1055 atomic_add(wrb_cnt, &txq->used);
c190e3c8
AK
1056 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
1057 txq->len) {
3c8def97 1058 netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
c190e3c8
AK
1059 stopped = true;
1060 }
6b7c5b94 1061
94d73aaa 1062 be_txq_notify(adapter, txo, wrb_cnt);
6b7c5b94 1063
cd8f76c0 1064 be_tx_stats_update(txo, wrb_cnt, copied, gso_segs, stopped);
c190e3c8
AK
1065 } else {
1066 txq->head = start;
bc617526 1067 tx_stats(txo)->tx_drv_drops++;
c190e3c8 1068 dev_kfree_skb_any(skb);
6b7c5b94 1069 }
6b7c5b94
SP
1070 return NETDEV_TX_OK;
1071}
1072
1073static int be_change_mtu(struct net_device *netdev, int new_mtu)
1074{
1075 struct be_adapter *adapter = netdev_priv(netdev);
0d3f5cce
KA
1076 struct device *dev = &adapter->pdev->dev;
1077
1078 if (new_mtu < BE_MIN_MTU || new_mtu > BE_MAX_MTU) {
1079 dev_info(dev, "MTU must be between %d and %d bytes\n",
1080 BE_MIN_MTU, BE_MAX_MTU);
6b7c5b94
SP
1081 return -EINVAL;
1082 }
0d3f5cce
KA
1083
1084 dev_info(dev, "MTU changed from %d to %d bytes\n",
748b539a 1085 netdev->mtu, new_mtu);
6b7c5b94
SP
1086 netdev->mtu = new_mtu;
1087 return 0;
1088}
1089
1090/*
82903e4b
AK
1091 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
1092 * If the user configures more, place BE in vlan promiscuous mode.
6b7c5b94 1093 */
10329df8 1094static int be_vid_config(struct be_adapter *adapter)
6b7c5b94 1095{
50762667 1096 struct device *dev = &adapter->pdev->dev;
10329df8 1097 u16 vids[BE_NUM_VLANS_SUPPORTED];
f6cbd364 1098 u16 num = 0, i = 0;
82903e4b 1099 int status = 0;
1da87b7f 1100
c0e64ef4
SP
1101 /* No need to further configure vids if in promiscuous mode */
1102 if (adapter->promiscuous)
1103 return 0;
1104
92bf14ab 1105 if (adapter->vlans_added > be_max_vlans(adapter))
0fc16ebf
PR
1106 goto set_vlan_promisc;
1107
1108 /* Construct VLAN Table to give to HW */
f6cbd364
RN
1109 for_each_set_bit(i, adapter->vids, VLAN_N_VID)
1110 vids[num++] = cpu_to_le16(i);
0fc16ebf 1111
4d567d97 1112 status = be_cmd_vlan_config(adapter, adapter->if_handle, vids, num);
0fc16ebf 1113 if (status) {
d9d604f8 1114 /* Set to VLAN promisc mode as setting VLAN filter failed */
4c60005f
KA
1115 if (addl_status(status) ==
1116 MCC_ADDL_STATUS_INSUFFICIENT_RESOURCES)
d9d604f8 1117 goto set_vlan_promisc;
50762667 1118 dev_err(dev, "Setting HW VLAN filtering failed\n");
d9d604f8
AK
1119 } else {
1120 if (adapter->flags & BE_FLAGS_VLAN_PROMISC) {
1121 /* hw VLAN filtering re-enabled. */
1122 status = be_cmd_rx_filter(adapter,
1123 BE_FLAGS_VLAN_PROMISC, OFF);
1124 if (!status) {
50762667
VV
1125 dev_info(dev,
1126 "Disabling VLAN Promiscuous mode\n");
d9d604f8 1127 adapter->flags &= ~BE_FLAGS_VLAN_PROMISC;
d9d604f8
AK
1128 }
1129 }
6b7c5b94 1130 }
1da87b7f 1131
b31c50a7 1132 return status;
0fc16ebf
PR
1133
1134set_vlan_promisc:
a6b74e01
SK
1135 if (adapter->flags & BE_FLAGS_VLAN_PROMISC)
1136 return 0;
d9d604f8
AK
1137
1138 status = be_cmd_rx_filter(adapter, BE_FLAGS_VLAN_PROMISC, ON);
1139 if (!status) {
50762667 1140 dev_info(dev, "Enable VLAN Promiscuous mode\n");
d9d604f8
AK
1141 adapter->flags |= BE_FLAGS_VLAN_PROMISC;
1142 } else
50762667 1143 dev_err(dev, "Failed to enable VLAN Promiscuous mode\n");
0fc16ebf 1144 return status;
6b7c5b94
SP
1145}
1146
80d5c368 1147static int be_vlan_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
6b7c5b94
SP
1148{
1149 struct be_adapter *adapter = netdev_priv(netdev);
80817cbf 1150 int status = 0;
6b7c5b94 1151
a85e9986
PR
1152 /* Packets with VID 0 are always received by Lancer by default */
1153 if (lancer_chip(adapter) && vid == 0)
48291c22
VV
1154 return status;
1155
f6cbd364 1156 if (test_bit(vid, adapter->vids))
48291c22 1157 return status;
a85e9986 1158
f6cbd364 1159 set_bit(vid, adapter->vids);
a6b74e01 1160 adapter->vlans_added++;
8e586137 1161
a6b74e01
SK
1162 status = be_vid_config(adapter);
1163 if (status) {
1164 adapter->vlans_added--;
f6cbd364 1165 clear_bit(vid, adapter->vids);
a6b74e01 1166 }
48291c22 1167
80817cbf 1168 return status;
6b7c5b94
SP
1169}
1170
80d5c368 1171static int be_vlan_rem_vid(struct net_device *netdev, __be16 proto, u16 vid)
6b7c5b94
SP
1172{
1173 struct be_adapter *adapter = netdev_priv(netdev);
1174
a85e9986
PR
1175 /* Packets with VID 0 are always received by Lancer by default */
1176 if (lancer_chip(adapter) && vid == 0)
9d4dfe4a 1177 return 0;
a85e9986 1178
f6cbd364 1179 clear_bit(vid, adapter->vids);
9d4dfe4a
KA
1180 adapter->vlans_added--;
1181
1182 return be_vid_config(adapter);
6b7c5b94
SP
1183}
1184
7ad09458
S
1185static void be_clear_promisc(struct be_adapter *adapter)
1186{
1187 adapter->promiscuous = false;
a0794885 1188 adapter->flags &= ~(BE_FLAGS_VLAN_PROMISC | BE_FLAGS_MCAST_PROMISC);
7ad09458
S
1189
1190 be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
1191}
1192
a54769f5 1193static void be_set_rx_mode(struct net_device *netdev)
6b7c5b94
SP
1194{
1195 struct be_adapter *adapter = netdev_priv(netdev);
0fc16ebf 1196 int status;
6b7c5b94 1197
24307eef 1198 if (netdev->flags & IFF_PROMISC) {
5b8821b7 1199 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
24307eef
SP
1200 adapter->promiscuous = true;
1201 goto done;
6b7c5b94
SP
1202 }
1203
25985edc 1204 /* BE was previously in promiscuous mode; disable it */
24307eef 1205 if (adapter->promiscuous) {
7ad09458 1206 be_clear_promisc(adapter);
c0e64ef4 1207 if (adapter->vlans_added)
10329df8 1208 be_vid_config(adapter);
6b7c5b94
SP
1209 }
1210
e7b909a6 1211 /* Enable multicast promisc if num configured exceeds what we support */
4cd24eaf 1212 if (netdev->flags & IFF_ALLMULTI ||
a0794885
KA
1213 netdev_mc_count(netdev) > be_max_mc(adapter))
1214 goto set_mcast_promisc;
6b7c5b94 1215
fbc13f01
AK
1216 if (netdev_uc_count(netdev) != adapter->uc_macs) {
1217 struct netdev_hw_addr *ha;
1218 int i = 1; /* First slot is claimed by the Primary MAC */
1219
1220 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++) {
1221 be_cmd_pmac_del(adapter, adapter->if_handle,
1222 adapter->pmac_id[i], 0);
1223 }
1224
92bf14ab 1225 if (netdev_uc_count(netdev) > be_max_uc(adapter)) {
fbc13f01
AK
1226 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
1227 adapter->promiscuous = true;
1228 goto done;
1229 }
1230
1231 netdev_for_each_uc_addr(ha, adapter->netdev) {
1232 adapter->uc_macs++; /* First slot is for Primary MAC */
1233 be_cmd_pmac_add(adapter, (u8 *)ha->addr,
1234 adapter->if_handle,
1235 &adapter->pmac_id[adapter->uc_macs], 0);
1236 }
1237 }
1238
0fc16ebf 1239 status = be_cmd_rx_filter(adapter, IFF_MULTICAST, ON);
a0794885
KA
1240 if (!status) {
1241 if (adapter->flags & BE_FLAGS_MCAST_PROMISC)
1242 adapter->flags &= ~BE_FLAGS_MCAST_PROMISC;
1243 goto done;
0fc16ebf 1244 }
a0794885
KA
1245
1246set_mcast_promisc:
1247 if (adapter->flags & BE_FLAGS_MCAST_PROMISC)
1248 return;
1249
1250 /* Set to MCAST promisc mode if setting MULTICAST address fails
1251 * or if num configured exceeds what we support
1252 */
1253 status = be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
1254 if (!status)
1255 adapter->flags |= BE_FLAGS_MCAST_PROMISC;
24307eef
SP
1256done:
1257 return;
6b7c5b94
SP
1258}
1259
ba343c77
SB
1260static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
1261{
1262 struct be_adapter *adapter = netdev_priv(netdev);
11ac75ed 1263 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
ba343c77
SB
1264 int status;
1265
11ac75ed 1266 if (!sriov_enabled(adapter))
ba343c77
SB
1267 return -EPERM;
1268
11ac75ed 1269 if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
ba343c77
SB
1270 return -EINVAL;
1271
3c31aaf3
VV
1272 /* Proceed further only if user provided MAC is different
1273 * from active MAC
1274 */
1275 if (ether_addr_equal(mac, vf_cfg->mac_addr))
1276 return 0;
1277
3175d8c2
SP
1278 if (BEx_chip(adapter)) {
1279 be_cmd_pmac_del(adapter, vf_cfg->if_handle, vf_cfg->pmac_id,
1280 vf + 1);
ba343c77 1281
11ac75ed
SP
1282 status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
1283 &vf_cfg->pmac_id, vf + 1);
3175d8c2
SP
1284 } else {
1285 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
1286 vf + 1);
590c391d
PR
1287 }
1288
abccf23e
KA
1289 if (status) {
1290 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed: %#x",
1291 mac, vf, status);
1292 return be_cmd_status(status);
1293 }
64600ea5 1294
abccf23e
KA
1295 ether_addr_copy(vf_cfg->mac_addr, mac);
1296
1297 return 0;
ba343c77
SB
1298}
1299
64600ea5 1300static int be_get_vf_config(struct net_device *netdev, int vf,
748b539a 1301 struct ifla_vf_info *vi)
64600ea5
AK
1302{
1303 struct be_adapter *adapter = netdev_priv(netdev);
11ac75ed 1304 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
64600ea5 1305
11ac75ed 1306 if (!sriov_enabled(adapter))
64600ea5
AK
1307 return -EPERM;
1308
11ac75ed 1309 if (vf >= adapter->num_vfs)
64600ea5
AK
1310 return -EINVAL;
1311
1312 vi->vf = vf;
ed616689
SC
1313 vi->max_tx_rate = vf_cfg->tx_rate;
1314 vi->min_tx_rate = 0;
a60b3a13
AK
1315 vi->vlan = vf_cfg->vlan_tag & VLAN_VID_MASK;
1316 vi->qos = vf_cfg->vlan_tag >> VLAN_PRIO_SHIFT;
11ac75ed 1317 memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
bdce2ad7 1318 vi->linkstate = adapter->vf_cfg[vf].plink_tracking;
64600ea5
AK
1319
1320 return 0;
1321}
1322
748b539a 1323static int be_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos)
1da87b7f
AK
1324{
1325 struct be_adapter *adapter = netdev_priv(netdev);
b9fc0e53 1326 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1da87b7f
AK
1327 int status = 0;
1328
11ac75ed 1329 if (!sriov_enabled(adapter))
1da87b7f
AK
1330 return -EPERM;
1331
b9fc0e53 1332 if (vf >= adapter->num_vfs || vlan > 4095 || qos > 7)
1da87b7f
AK
1333 return -EINVAL;
1334
b9fc0e53
AK
1335 if (vlan || qos) {
1336 vlan |= qos << VLAN_PRIO_SHIFT;
c502224e 1337 if (vf_cfg->vlan_tag != vlan)
b9fc0e53
AK
1338 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
1339 vf_cfg->if_handle, 0);
1da87b7f 1340 } else {
f1f3ee1b 1341 /* Reset Transparent Vlan Tagging. */
c502224e
SK
1342 status = be_cmd_set_hsw_config(adapter, BE_RESET_VLAN_TAG_ID,
1343 vf + 1, vf_cfg->if_handle, 0);
1da87b7f
AK
1344 }
1345
abccf23e
KA
1346 if (status) {
1347 dev_err(&adapter->pdev->dev,
1348 "VLAN %d config on VF %d failed : %#x\n", vlan,
1349 vf, status);
1350 return be_cmd_status(status);
1351 }
1352
1353 vf_cfg->vlan_tag = vlan;
1354
1355 return 0;
1da87b7f
AK
1356}
1357
ed616689
SC
1358static int be_set_vf_tx_rate(struct net_device *netdev, int vf,
1359 int min_tx_rate, int max_tx_rate)
e1d18735
AK
1360{
1361 struct be_adapter *adapter = netdev_priv(netdev);
0f77ba73
RN
1362 struct device *dev = &adapter->pdev->dev;
1363 int percent_rate, status = 0;
1364 u16 link_speed = 0;
1365 u8 link_status;
e1d18735 1366
11ac75ed 1367 if (!sriov_enabled(adapter))
e1d18735
AK
1368 return -EPERM;
1369
94f434c2 1370 if (vf >= adapter->num_vfs)
e1d18735
AK
1371 return -EINVAL;
1372
ed616689
SC
1373 if (min_tx_rate)
1374 return -EINVAL;
1375
0f77ba73
RN
1376 if (!max_tx_rate)
1377 goto config_qos;
1378
1379 status = be_cmd_link_status_query(adapter, &link_speed,
1380 &link_status, 0);
1381 if (status)
1382 goto err;
1383
1384 if (!link_status) {
1385 dev_err(dev, "TX-rate setting not allowed when link is down\n");
940a3fcd 1386 status = -ENETDOWN;
0f77ba73
RN
1387 goto err;
1388 }
1389
1390 if (max_tx_rate < 100 || max_tx_rate > link_speed) {
1391 dev_err(dev, "TX-rate must be between 100 and %d Mbps\n",
1392 link_speed);
1393 status = -EINVAL;
1394 goto err;
1395 }
1396
1397 /* On Skyhawk the QOS setting must be done only as a % value */
1398 percent_rate = link_speed / 100;
1399 if (skyhawk_chip(adapter) && (max_tx_rate % percent_rate)) {
1400 dev_err(dev, "TX-rate must be a multiple of %d Mbps\n",
1401 percent_rate);
1402 status = -EINVAL;
1403 goto err;
94f434c2 1404 }
e1d18735 1405
0f77ba73
RN
1406config_qos:
1407 status = be_cmd_config_qos(adapter, max_tx_rate, link_speed, vf + 1);
e1d18735 1408 if (status)
0f77ba73
RN
1409 goto err;
1410
1411 adapter->vf_cfg[vf].tx_rate = max_tx_rate;
1412 return 0;
1413
1414err:
1415 dev_err(dev, "TX-rate setting of %dMbps on VF%d failed\n",
1416 max_tx_rate, vf);
abccf23e 1417 return be_cmd_status(status);
e1d18735 1418}
bdce2ad7
SR
1419static int be_set_vf_link_state(struct net_device *netdev, int vf,
1420 int link_state)
1421{
1422 struct be_adapter *adapter = netdev_priv(netdev);
1423 int status;
1424
1425 if (!sriov_enabled(adapter))
1426 return -EPERM;
1427
1428 if (vf >= adapter->num_vfs)
1429 return -EINVAL;
1430
1431 status = be_cmd_set_logical_link_config(adapter, link_state, vf+1);
abccf23e
KA
1432 if (status) {
1433 dev_err(&adapter->pdev->dev,
1434 "Link state change on VF %d failed: %#x\n", vf, status);
1435 return be_cmd_status(status);
1436 }
bdce2ad7 1437
abccf23e
KA
1438 adapter->vf_cfg[vf].plink_tracking = link_state;
1439
1440 return 0;
bdce2ad7 1441}
e1d18735 1442
2632bafd
SP
1443static void be_aic_update(struct be_aic_obj *aic, u64 rx_pkts, u64 tx_pkts,
1444 ulong now)
6b7c5b94 1445{
2632bafd
SP
1446 aic->rx_pkts_prev = rx_pkts;
1447 aic->tx_reqs_prev = tx_pkts;
1448 aic->jiffies = now;
1449}
ac124ff9 1450
2632bafd
SP
1451static void be_eqd_update(struct be_adapter *adapter)
1452{
1453 struct be_set_eqd set_eqd[MAX_EVT_QS];
1454 int eqd, i, num = 0, start;
1455 struct be_aic_obj *aic;
1456 struct be_eq_obj *eqo;
1457 struct be_rx_obj *rxo;
1458 struct be_tx_obj *txo;
1459 u64 rx_pkts, tx_pkts;
1460 ulong now;
1461 u32 pps, delta;
10ef9ab4 1462
2632bafd
SP
1463 for_all_evt_queues(adapter, eqo, i) {
1464 aic = &adapter->aic_obj[eqo->idx];
1465 if (!aic->enable) {
1466 if (aic->jiffies)
1467 aic->jiffies = 0;
1468 eqd = aic->et_eqd;
1469 goto modify_eqd;
1470 }
6b7c5b94 1471
2632bafd
SP
1472 rxo = &adapter->rx_obj[eqo->idx];
1473 do {
57a7744e 1474 start = u64_stats_fetch_begin_irq(&rxo->stats.sync);
2632bafd 1475 rx_pkts = rxo->stats.rx_pkts;
57a7744e 1476 } while (u64_stats_fetch_retry_irq(&rxo->stats.sync, start));
10ef9ab4 1477
2632bafd
SP
1478 txo = &adapter->tx_obj[eqo->idx];
1479 do {
57a7744e 1480 start = u64_stats_fetch_begin_irq(&txo->stats.sync);
2632bafd 1481 tx_pkts = txo->stats.tx_reqs;
57a7744e 1482 } while (u64_stats_fetch_retry_irq(&txo->stats.sync, start));
6b7c5b94 1483
6b7c5b94 1484
2632bafd
SP
1485 /* Skip, if wrapped around or first calculation */
1486 now = jiffies;
1487 if (!aic->jiffies || time_before(now, aic->jiffies) ||
1488 rx_pkts < aic->rx_pkts_prev ||
1489 tx_pkts < aic->tx_reqs_prev) {
1490 be_aic_update(aic, rx_pkts, tx_pkts, now);
1491 continue;
1492 }
1493
1494 delta = jiffies_to_msecs(now - aic->jiffies);
1495 pps = (((u32)(rx_pkts - aic->rx_pkts_prev) * 1000) / delta) +
1496 (((u32)(tx_pkts - aic->tx_reqs_prev) * 1000) / delta);
1497 eqd = (pps / 15000) << 2;
10ef9ab4 1498
2632bafd
SP
1499 if (eqd < 8)
1500 eqd = 0;
1501 eqd = min_t(u32, eqd, aic->max_eqd);
1502 eqd = max_t(u32, eqd, aic->min_eqd);
1503
1504 be_aic_update(aic, rx_pkts, tx_pkts, now);
10ef9ab4 1505modify_eqd:
2632bafd
SP
1506 if (eqd != aic->prev_eqd) {
1507 set_eqd[num].delay_multiplier = (eqd * 65)/100;
1508 set_eqd[num].eq_id = eqo->q.id;
1509 aic->prev_eqd = eqd;
1510 num++;
1511 }
ac124ff9 1512 }
2632bafd
SP
1513
1514 if (num)
1515 be_cmd_modify_eqd(adapter, set_eqd, num);
6b7c5b94
SP
1516}
1517
3abcdeda 1518static void be_rx_stats_update(struct be_rx_obj *rxo,
748b539a 1519 struct be_rx_compl_info *rxcp)
4097f663 1520{
ac124ff9 1521 struct be_rx_stats *stats = rx_stats(rxo);
1ef78abe 1522
ab1594e9 1523 u64_stats_update_begin(&stats->sync);
3abcdeda 1524 stats->rx_compl++;
2e588f84 1525 stats->rx_bytes += rxcp->pkt_size;
3abcdeda 1526 stats->rx_pkts++;
2e588f84 1527 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
3abcdeda 1528 stats->rx_mcast_pkts++;
2e588f84 1529 if (rxcp->err)
ac124ff9 1530 stats->rx_compl_err++;
ab1594e9 1531 u64_stats_update_end(&stats->sync);
4097f663
SP
1532}
1533
2e588f84 1534static inline bool csum_passed(struct be_rx_compl_info *rxcp)
728a9972 1535{
19fad86f 1536 /* L4 checksum is not reliable for non TCP/UDP packets.
c9c47142
SP
1537 * Also ignore ipcksm for ipv6 pkts
1538 */
2e588f84 1539 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
c9c47142 1540 (rxcp->ip_csum || rxcp->ipv6) && !rxcp->err;
728a9972
AK
1541}
1542
0b0ef1d0 1543static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo)
6b7c5b94 1544{
10ef9ab4 1545 struct be_adapter *adapter = rxo->adapter;
6b7c5b94 1546 struct be_rx_page_info *rx_page_info;
3abcdeda 1547 struct be_queue_info *rxq = &rxo->q;
0b0ef1d0 1548 u16 frag_idx = rxq->tail;
6b7c5b94 1549
3abcdeda 1550 rx_page_info = &rxo->page_info_tbl[frag_idx];
6b7c5b94
SP
1551 BUG_ON(!rx_page_info->page);
1552
e50287be 1553 if (rx_page_info->last_frag) {
2b7bcebf
IV
1554 dma_unmap_page(&adapter->pdev->dev,
1555 dma_unmap_addr(rx_page_info, bus),
1556 adapter->big_page_size, DMA_FROM_DEVICE);
e50287be
SP
1557 rx_page_info->last_frag = false;
1558 } else {
1559 dma_sync_single_for_cpu(&adapter->pdev->dev,
1560 dma_unmap_addr(rx_page_info, bus),
1561 rx_frag_size, DMA_FROM_DEVICE);
205859a2 1562 }
6b7c5b94 1563
0b0ef1d0 1564 queue_tail_inc(rxq);
6b7c5b94
SP
1565 atomic_dec(&rxq->used);
1566 return rx_page_info;
1567}
1568
1569/* Throwaway the data in the Rx completion */
10ef9ab4
SP
1570static void be_rx_compl_discard(struct be_rx_obj *rxo,
1571 struct be_rx_compl_info *rxcp)
6b7c5b94 1572{
6b7c5b94 1573 struct be_rx_page_info *page_info;
2e588f84 1574 u16 i, num_rcvd = rxcp->num_rcvd;
6b7c5b94 1575
e80d9da6 1576 for (i = 0; i < num_rcvd; i++) {
0b0ef1d0 1577 page_info = get_rx_page_info(rxo);
e80d9da6
PR
1578 put_page(page_info->page);
1579 memset(page_info, 0, sizeof(*page_info));
6b7c5b94
SP
1580 }
1581}
1582
1583/*
1584 * skb_fill_rx_data forms a complete skb for an ether frame
1585 * indicated by rxcp.
1586 */
10ef9ab4
SP
1587static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
1588 struct be_rx_compl_info *rxcp)
6b7c5b94 1589{
6b7c5b94 1590 struct be_rx_page_info *page_info;
2e588f84
SP
1591 u16 i, j;
1592 u16 hdr_len, curr_frag_len, remaining;
6b7c5b94 1593 u8 *start;
6b7c5b94 1594
0b0ef1d0 1595 page_info = get_rx_page_info(rxo);
6b7c5b94
SP
1596 start = page_address(page_info->page) + page_info->page_offset;
1597 prefetch(start);
1598
1599 /* Copy data in the first descriptor of this completion */
2e588f84 1600 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
6b7c5b94 1601
6b7c5b94
SP
1602 skb->len = curr_frag_len;
1603 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
ac1ae5f3 1604 memcpy(skb->data, start, curr_frag_len);
6b7c5b94
SP
1605 /* Complete packet has now been moved to data */
1606 put_page(page_info->page);
1607 skb->data_len = 0;
1608 skb->tail += curr_frag_len;
1609 } else {
ac1ae5f3
ED
1610 hdr_len = ETH_HLEN;
1611 memcpy(skb->data, start, hdr_len);
6b7c5b94 1612 skb_shinfo(skb)->nr_frags = 1;
b061b39e 1613 skb_frag_set_page(skb, 0, page_info->page);
6b7c5b94
SP
1614 skb_shinfo(skb)->frags[0].page_offset =
1615 page_info->page_offset + hdr_len;
748b539a
SP
1616 skb_frag_size_set(&skb_shinfo(skb)->frags[0],
1617 curr_frag_len - hdr_len);
6b7c5b94 1618 skb->data_len = curr_frag_len - hdr_len;
bdb28a97 1619 skb->truesize += rx_frag_size;
6b7c5b94
SP
1620 skb->tail += hdr_len;
1621 }
205859a2 1622 page_info->page = NULL;
6b7c5b94 1623
2e588f84
SP
1624 if (rxcp->pkt_size <= rx_frag_size) {
1625 BUG_ON(rxcp->num_rcvd != 1);
1626 return;
6b7c5b94
SP
1627 }
1628
1629 /* More frags present for this completion */
2e588f84
SP
1630 remaining = rxcp->pkt_size - curr_frag_len;
1631 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
0b0ef1d0 1632 page_info = get_rx_page_info(rxo);
2e588f84 1633 curr_frag_len = min(remaining, rx_frag_size);
6b7c5b94 1634
bd46cb6c
AK
1635 /* Coalesce all frags from the same physical page in one slot */
1636 if (page_info->page_offset == 0) {
1637 /* Fresh page */
1638 j++;
b061b39e 1639 skb_frag_set_page(skb, j, page_info->page);
bd46cb6c
AK
1640 skb_shinfo(skb)->frags[j].page_offset =
1641 page_info->page_offset;
9e903e08 1642 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
bd46cb6c
AK
1643 skb_shinfo(skb)->nr_frags++;
1644 } else {
1645 put_page(page_info->page);
1646 }
1647
9e903e08 1648 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
6b7c5b94
SP
1649 skb->len += curr_frag_len;
1650 skb->data_len += curr_frag_len;
bdb28a97 1651 skb->truesize += rx_frag_size;
2e588f84 1652 remaining -= curr_frag_len;
205859a2 1653 page_info->page = NULL;
6b7c5b94 1654 }
bd46cb6c 1655 BUG_ON(j > MAX_SKB_FRAGS);
6b7c5b94
SP
1656}
1657
5be93b9a 1658/* Process the RX completion indicated by rxcp when GRO is disabled */
6384a4d0 1659static void be_rx_compl_process(struct be_rx_obj *rxo, struct napi_struct *napi,
10ef9ab4 1660 struct be_rx_compl_info *rxcp)
6b7c5b94 1661{
10ef9ab4 1662 struct be_adapter *adapter = rxo->adapter;
6332c8d3 1663 struct net_device *netdev = adapter->netdev;
6b7c5b94 1664 struct sk_buff *skb;
89420424 1665
bb349bb4 1666 skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE);
a058a632 1667 if (unlikely(!skb)) {
ac124ff9 1668 rx_stats(rxo)->rx_drops_no_skbs++;
10ef9ab4 1669 be_rx_compl_discard(rxo, rxcp);
6b7c5b94
SP
1670 return;
1671 }
1672
10ef9ab4 1673 skb_fill_rx_data(rxo, skb, rxcp);
6b7c5b94 1674
6332c8d3 1675 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
728a9972 1676 skb->ip_summed = CHECKSUM_UNNECESSARY;
c6ce2f4b
SK
1677 else
1678 skb_checksum_none_assert(skb);
6b7c5b94 1679
6332c8d3 1680 skb->protocol = eth_type_trans(skb, netdev);
aaa6daec 1681 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
10ef9ab4 1682 if (netdev->features & NETIF_F_RXHASH)
d2464c8c 1683 skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
c9c47142 1684
b6c0e89d 1685 skb->csum_level = rxcp->tunneled;
6384a4d0 1686 skb_mark_napi_id(skb, napi);
6b7c5b94 1687
343e43c0 1688 if (rxcp->vlanf)
86a9bad3 1689 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
4c5102f9
AK
1690
1691 netif_receive_skb(skb);
6b7c5b94
SP
1692}
1693
5be93b9a 1694/* Process the RX completion indicated by rxcp when GRO is enabled */
4188e7df
JH
1695static void be_rx_compl_process_gro(struct be_rx_obj *rxo,
1696 struct napi_struct *napi,
1697 struct be_rx_compl_info *rxcp)
6b7c5b94 1698{
10ef9ab4 1699 struct be_adapter *adapter = rxo->adapter;
6b7c5b94 1700 struct be_rx_page_info *page_info;
5be93b9a 1701 struct sk_buff *skb = NULL;
2e588f84
SP
1702 u16 remaining, curr_frag_len;
1703 u16 i, j;
3968fa1e 1704
10ef9ab4 1705 skb = napi_get_frags(napi);
5be93b9a 1706 if (!skb) {
10ef9ab4 1707 be_rx_compl_discard(rxo, rxcp);
5be93b9a
AK
1708 return;
1709 }
1710
2e588f84
SP
1711 remaining = rxcp->pkt_size;
1712 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
0b0ef1d0 1713 page_info = get_rx_page_info(rxo);
6b7c5b94
SP
1714
1715 curr_frag_len = min(remaining, rx_frag_size);
1716
bd46cb6c
AK
1717 /* Coalesce all frags from the same physical page in one slot */
1718 if (i == 0 || page_info->page_offset == 0) {
1719 /* First frag or Fresh page */
1720 j++;
b061b39e 1721 skb_frag_set_page(skb, j, page_info->page);
5be93b9a
AK
1722 skb_shinfo(skb)->frags[j].page_offset =
1723 page_info->page_offset;
9e903e08 1724 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
bd46cb6c
AK
1725 } else {
1726 put_page(page_info->page);
1727 }
9e903e08 1728 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
bdb28a97 1729 skb->truesize += rx_frag_size;
bd46cb6c 1730 remaining -= curr_frag_len;
6b7c5b94
SP
1731 memset(page_info, 0, sizeof(*page_info));
1732 }
bd46cb6c 1733 BUG_ON(j > MAX_SKB_FRAGS);
6b7c5b94 1734
5be93b9a 1735 skb_shinfo(skb)->nr_frags = j + 1;
2e588f84
SP
1736 skb->len = rxcp->pkt_size;
1737 skb->data_len = rxcp->pkt_size;
5be93b9a 1738 skb->ip_summed = CHECKSUM_UNNECESSARY;
aaa6daec 1739 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
4b972914 1740 if (adapter->netdev->features & NETIF_F_RXHASH)
d2464c8c 1741 skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
c9c47142 1742
b6c0e89d 1743 skb->csum_level = rxcp->tunneled;
6384a4d0 1744 skb_mark_napi_id(skb, napi);
5be93b9a 1745
343e43c0 1746 if (rxcp->vlanf)
86a9bad3 1747 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
4c5102f9 1748
10ef9ab4 1749 napi_gro_frags(napi);
2e588f84
SP
1750}
1751
10ef9ab4
SP
1752static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
1753 struct be_rx_compl_info *rxcp)
2e588f84 1754{
c3c18bc1
SP
1755 rxcp->pkt_size = GET_RX_COMPL_V1_BITS(pktsize, compl);
1756 rxcp->vlanf = GET_RX_COMPL_V1_BITS(vtp, compl);
1757 rxcp->err = GET_RX_COMPL_V1_BITS(err, compl);
1758 rxcp->tcpf = GET_RX_COMPL_V1_BITS(tcpf, compl);
1759 rxcp->udpf = GET_RX_COMPL_V1_BITS(udpf, compl);
1760 rxcp->ip_csum = GET_RX_COMPL_V1_BITS(ipcksm, compl);
1761 rxcp->l4_csum = GET_RX_COMPL_V1_BITS(l4_cksm, compl);
1762 rxcp->ipv6 = GET_RX_COMPL_V1_BITS(ip_version, compl);
1763 rxcp->num_rcvd = GET_RX_COMPL_V1_BITS(numfrags, compl);
1764 rxcp->pkt_type = GET_RX_COMPL_V1_BITS(cast_enc, compl);
1765 rxcp->rss_hash = GET_RX_COMPL_V1_BITS(rsshash, compl);
15d72184 1766 if (rxcp->vlanf) {
c3c18bc1
SP
1767 rxcp->qnq = GET_RX_COMPL_V1_BITS(qnq, compl);
1768 rxcp->vlan_tag = GET_RX_COMPL_V1_BITS(vlan_tag, compl);
15d72184 1769 }
c3c18bc1 1770 rxcp->port = GET_RX_COMPL_V1_BITS(port, compl);
c9c47142 1771 rxcp->tunneled =
c3c18bc1 1772 GET_RX_COMPL_V1_BITS(tunneled, compl);
2e588f84
SP
1773}
1774
10ef9ab4
SP
1775static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
1776 struct be_rx_compl_info *rxcp)
2e588f84 1777{
c3c18bc1
SP
1778 rxcp->pkt_size = GET_RX_COMPL_V0_BITS(pktsize, compl);
1779 rxcp->vlanf = GET_RX_COMPL_V0_BITS(vtp, compl);
1780 rxcp->err = GET_RX_COMPL_V0_BITS(err, compl);
1781 rxcp->tcpf = GET_RX_COMPL_V0_BITS(tcpf, compl);
1782 rxcp->udpf = GET_RX_COMPL_V0_BITS(udpf, compl);
1783 rxcp->ip_csum = GET_RX_COMPL_V0_BITS(ipcksm, compl);
1784 rxcp->l4_csum = GET_RX_COMPL_V0_BITS(l4_cksm, compl);
1785 rxcp->ipv6 = GET_RX_COMPL_V0_BITS(ip_version, compl);
1786 rxcp->num_rcvd = GET_RX_COMPL_V0_BITS(numfrags, compl);
1787 rxcp->pkt_type = GET_RX_COMPL_V0_BITS(cast_enc, compl);
1788 rxcp->rss_hash = GET_RX_COMPL_V0_BITS(rsshash, compl);
15d72184 1789 if (rxcp->vlanf) {
c3c18bc1
SP
1790 rxcp->qnq = GET_RX_COMPL_V0_BITS(qnq, compl);
1791 rxcp->vlan_tag = GET_RX_COMPL_V0_BITS(vlan_tag, compl);
15d72184 1792 }
c3c18bc1
SP
1793 rxcp->port = GET_RX_COMPL_V0_BITS(port, compl);
1794 rxcp->ip_frag = GET_RX_COMPL_V0_BITS(ip_frag, compl);
2e588f84
SP
1795}
1796
1797static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1798{
1799 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1800 struct be_rx_compl_info *rxcp = &rxo->rxcp;
1801 struct be_adapter *adapter = rxo->adapter;
6b7c5b94 1802
2e588f84
SP
1803 /* For checking the valid bit it is Ok to use either definition as the
1804 * valid bit is at the same position in both v0 and v1 Rx compl */
1805 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
1806 return NULL;
6b7c5b94 1807
2e588f84
SP
1808 rmb();
1809 be_dws_le_to_cpu(compl, sizeof(*compl));
6b7c5b94 1810
2e588f84 1811 if (adapter->be3_native)
10ef9ab4 1812 be_parse_rx_compl_v1(compl, rxcp);
2e588f84 1813 else
10ef9ab4 1814 be_parse_rx_compl_v0(compl, rxcp);
6b7c5b94 1815
e38b1706
SK
1816 if (rxcp->ip_frag)
1817 rxcp->l4_csum = 0;
1818
15d72184 1819 if (rxcp->vlanf) {
f93f160b
VV
1820 /* In QNQ modes, if qnq bit is not set, then the packet was
1821 * tagged only with the transparent outer vlan-tag and must
1822 * not be treated as a vlan packet by host
1823 */
1824 if (be_is_qnq_mode(adapter) && !rxcp->qnq)
15d72184 1825 rxcp->vlanf = 0;
6b7c5b94 1826
15d72184 1827 if (!lancer_chip(adapter))
3c709f8f 1828 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
6b7c5b94 1829
939cf306 1830 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
f6cbd364 1831 !test_bit(rxcp->vlan_tag, adapter->vids))
15d72184
SP
1832 rxcp->vlanf = 0;
1833 }
2e588f84
SP
1834
1835 /* As the compl has been parsed, reset it; we wont touch it again */
1836 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
6b7c5b94 1837
3abcdeda 1838 queue_tail_inc(&rxo->cq);
6b7c5b94
SP
1839 return rxcp;
1840}
1841
1829b086 1842static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
6b7c5b94 1843{
6b7c5b94 1844 u32 order = get_order(size);
1829b086 1845
6b7c5b94 1846 if (order > 0)
1829b086
ED
1847 gfp |= __GFP_COMP;
1848 return alloc_pages(gfp, order);
6b7c5b94
SP
1849}
1850
1851/*
1852 * Allocate a page, split it to fragments of size rx_frag_size and post as
1853 * receive buffers to BE
1854 */
c30d7266 1855static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp, u32 frags_needed)
6b7c5b94 1856{
3abcdeda 1857 struct be_adapter *adapter = rxo->adapter;
26d92f92 1858 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
3abcdeda 1859 struct be_queue_info *rxq = &rxo->q;
6b7c5b94 1860 struct page *pagep = NULL;
ba42fad0 1861 struct device *dev = &adapter->pdev->dev;
6b7c5b94
SP
1862 struct be_eth_rx_d *rxd;
1863 u64 page_dmaaddr = 0, frag_dmaaddr;
c30d7266 1864 u32 posted, page_offset = 0, notify = 0;
6b7c5b94 1865
3abcdeda 1866 page_info = &rxo->page_info_tbl[rxq->head];
c30d7266 1867 for (posted = 0; posted < frags_needed && !page_info->page; posted++) {
6b7c5b94 1868 if (!pagep) {
1829b086 1869 pagep = be_alloc_pages(adapter->big_page_size, gfp);
6b7c5b94 1870 if (unlikely(!pagep)) {
ac124ff9 1871 rx_stats(rxo)->rx_post_fail++;
6b7c5b94
SP
1872 break;
1873 }
ba42fad0
IV
1874 page_dmaaddr = dma_map_page(dev, pagep, 0,
1875 adapter->big_page_size,
2b7bcebf 1876 DMA_FROM_DEVICE);
ba42fad0
IV
1877 if (dma_mapping_error(dev, page_dmaaddr)) {
1878 put_page(pagep);
1879 pagep = NULL;
d3de1540 1880 adapter->drv_stats.dma_map_errors++;
ba42fad0
IV
1881 break;
1882 }
e50287be 1883 page_offset = 0;
6b7c5b94
SP
1884 } else {
1885 get_page(pagep);
e50287be 1886 page_offset += rx_frag_size;
6b7c5b94 1887 }
e50287be 1888 page_info->page_offset = page_offset;
6b7c5b94 1889 page_info->page = pagep;
6b7c5b94
SP
1890
1891 rxd = queue_head_node(rxq);
e50287be 1892 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
6b7c5b94
SP
1893 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1894 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
6b7c5b94
SP
1895
1896 /* Any space left in the current big page for another frag? */
1897 if ((page_offset + rx_frag_size + rx_frag_size) >
1898 adapter->big_page_size) {
1899 pagep = NULL;
e50287be
SP
1900 page_info->last_frag = true;
1901 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
1902 } else {
1903 dma_unmap_addr_set(page_info, bus, frag_dmaaddr);
6b7c5b94 1904 }
26d92f92
SP
1905
1906 prev_page_info = page_info;
1907 queue_head_inc(rxq);
10ef9ab4 1908 page_info = &rxo->page_info_tbl[rxq->head];
6b7c5b94 1909 }
e50287be
SP
1910
1911 /* Mark the last frag of a page when we break out of the above loop
1912 * with no more slots available in the RXQ
1913 */
1914 if (pagep) {
1915 prev_page_info->last_frag = true;
1916 dma_unmap_addr_set(prev_page_info, bus, page_dmaaddr);
1917 }
6b7c5b94
SP
1918
1919 if (posted) {
6b7c5b94 1920 atomic_add(posted, &rxq->used);
6384a4d0
SP
1921 if (rxo->rx_post_starved)
1922 rxo->rx_post_starved = false;
c30d7266
AK
1923 do {
1924 notify = min(256u, posted);
1925 be_rxq_notify(adapter, rxq->id, notify);
1926 posted -= notify;
1927 } while (posted);
ea1dae11
SP
1928 } else if (atomic_read(&rxq->used) == 0) {
1929 /* Let be_worker replenish when memory is available */
3abcdeda 1930 rxo->rx_post_starved = true;
6b7c5b94 1931 }
6b7c5b94
SP
1932}
1933
5fb379ee 1934static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
6b7c5b94 1935{
6b7c5b94
SP
1936 struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1937
1938 if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1939 return NULL;
1940
f3eb62d2 1941 rmb();
6b7c5b94
SP
1942 be_dws_le_to_cpu(txcp, sizeof(*txcp));
1943
1944 txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1945
1946 queue_tail_inc(tx_cq);
1947 return txcp;
1948}
1949
3c8def97 1950static u16 be_tx_compl_process(struct be_adapter *adapter,
748b539a 1951 struct be_tx_obj *txo, u16 last_index)
6b7c5b94 1952{
3c8def97 1953 struct be_queue_info *txq = &txo->q;
a73b796e 1954 struct be_eth_wrb *wrb;
3c8def97 1955 struct sk_buff **sent_skbs = txo->sent_skb_list;
6b7c5b94 1956 struct sk_buff *sent_skb;
ec43b1a6
SP
1957 u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1958 bool unmap_skb_hdr = true;
6b7c5b94 1959
ec43b1a6 1960 sent_skb = sent_skbs[txq->tail];
6b7c5b94 1961 BUG_ON(!sent_skb);
ec43b1a6
SP
1962 sent_skbs[txq->tail] = NULL;
1963
1964 /* skip header wrb */
a73b796e 1965 queue_tail_inc(txq);
6b7c5b94 1966
ec43b1a6 1967 do {
6b7c5b94 1968 cur_index = txq->tail;
a73b796e 1969 wrb = queue_tail_node(txq);
2b7bcebf
IV
1970 unmap_tx_frag(&adapter->pdev->dev, wrb,
1971 (unmap_skb_hdr && skb_headlen(sent_skb)));
ec43b1a6
SP
1972 unmap_skb_hdr = false;
1973
6b7c5b94
SP
1974 num_wrbs++;
1975 queue_tail_inc(txq);
ec43b1a6 1976 } while (cur_index != last_index);
6b7c5b94 1977
96d49225 1978 dev_consume_skb_any(sent_skb);
4d586b82 1979 return num_wrbs;
6b7c5b94
SP
1980}
1981
10ef9ab4
SP
1982/* Return the number of events in the event queue */
1983static inline int events_get(struct be_eq_obj *eqo)
859b1e4e 1984{
10ef9ab4
SP
1985 struct be_eq_entry *eqe;
1986 int num = 0;
859b1e4e 1987
10ef9ab4
SP
1988 do {
1989 eqe = queue_tail_node(&eqo->q);
1990 if (eqe->evt == 0)
1991 break;
859b1e4e 1992
10ef9ab4
SP
1993 rmb();
1994 eqe->evt = 0;
1995 num++;
1996 queue_tail_inc(&eqo->q);
1997 } while (true);
1998
1999 return num;
859b1e4e
SP
2000}
2001
10ef9ab4
SP
2002/* Leaves the EQ is disarmed state */
2003static void be_eq_clean(struct be_eq_obj *eqo)
859b1e4e 2004{
10ef9ab4 2005 int num = events_get(eqo);
859b1e4e 2006
10ef9ab4 2007 be_eq_notify(eqo->adapter, eqo->q.id, false, true, num);
859b1e4e
SP
2008}
2009
10ef9ab4 2010static void be_rx_cq_clean(struct be_rx_obj *rxo)
6b7c5b94
SP
2011{
2012 struct be_rx_page_info *page_info;
3abcdeda
SP
2013 struct be_queue_info *rxq = &rxo->q;
2014 struct be_queue_info *rx_cq = &rxo->cq;
2e588f84 2015 struct be_rx_compl_info *rxcp;
d23e946c
SP
2016 struct be_adapter *adapter = rxo->adapter;
2017 int flush_wait = 0;
6b7c5b94 2018
d23e946c
SP
2019 /* Consume pending rx completions.
2020 * Wait for the flush completion (identified by zero num_rcvd)
2021 * to arrive. Notify CQ even when there are no more CQ entries
2022 * for HW to flush partially coalesced CQ entries.
2023 * In Lancer, there is no need to wait for flush compl.
2024 */
2025 for (;;) {
2026 rxcp = be_rx_compl_get(rxo);
ddf1169f 2027 if (!rxcp) {
d23e946c
SP
2028 if (lancer_chip(adapter))
2029 break;
2030
2031 if (flush_wait++ > 10 || be_hw_error(adapter)) {
2032 dev_warn(&adapter->pdev->dev,
2033 "did not receive flush compl\n");
2034 break;
2035 }
2036 be_cq_notify(adapter, rx_cq->id, true, 0);
2037 mdelay(1);
2038 } else {
2039 be_rx_compl_discard(rxo, rxcp);
3f5dffe6 2040 be_cq_notify(adapter, rx_cq->id, false, 1);
d23e946c
SP
2041 if (rxcp->num_rcvd == 0)
2042 break;
2043 }
6b7c5b94
SP
2044 }
2045
d23e946c
SP
2046 /* After cleanup, leave the CQ in unarmed state */
2047 be_cq_notify(adapter, rx_cq->id, false, 0);
2048
2049 /* Then free posted rx buffers that were not used */
0b0ef1d0
SR
2050 while (atomic_read(&rxq->used) > 0) {
2051 page_info = get_rx_page_info(rxo);
6b7c5b94
SP
2052 put_page(page_info->page);
2053 memset(page_info, 0, sizeof(*page_info));
2054 }
2055 BUG_ON(atomic_read(&rxq->used));
482c9e79 2056 rxq->tail = rxq->head = 0;
6b7c5b94
SP
2057}
2058
0ae57bb3 2059static void be_tx_compl_clean(struct be_adapter *adapter)
6b7c5b94 2060{
0ae57bb3
SP
2061 struct be_tx_obj *txo;
2062 struct be_queue_info *txq;
a8e9179a 2063 struct be_eth_tx_compl *txcp;
4d586b82 2064 u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
b03388d6
SP
2065 struct sk_buff *sent_skb;
2066 bool dummy_wrb;
0ae57bb3 2067 int i, pending_txqs;
a8e9179a 2068
1a3d0717 2069 /* Stop polling for compls when HW has been silent for 10ms */
a8e9179a 2070 do {
0ae57bb3
SP
2071 pending_txqs = adapter->num_tx_qs;
2072
2073 for_all_tx_queues(adapter, txo, i) {
1a3d0717
VV
2074 cmpl = 0;
2075 num_wrbs = 0;
0ae57bb3
SP
2076 txq = &txo->q;
2077 while ((txcp = be_tx_compl_get(&txo->cq))) {
c3c18bc1 2078 end_idx = GET_TX_COMPL_BITS(wrb_index, txcp);
0ae57bb3
SP
2079 num_wrbs += be_tx_compl_process(adapter, txo,
2080 end_idx);
2081 cmpl++;
2082 }
2083 if (cmpl) {
2084 be_cq_notify(adapter, txo->cq.id, false, cmpl);
2085 atomic_sub(num_wrbs, &txq->used);
1a3d0717 2086 timeo = 0;
0ae57bb3
SP
2087 }
2088 if (atomic_read(&txq->used) == 0)
2089 pending_txqs--;
a8e9179a
SP
2090 }
2091
1a3d0717 2092 if (pending_txqs == 0 || ++timeo > 10 || be_hw_error(adapter))
a8e9179a
SP
2093 break;
2094
2095 mdelay(1);
2096 } while (true);
2097
0ae57bb3
SP
2098 for_all_tx_queues(adapter, txo, i) {
2099 txq = &txo->q;
2100 if (atomic_read(&txq->used))
2101 dev_err(&adapter->pdev->dev, "%d pending tx-compls\n",
2102 atomic_read(&txq->used));
2103
2104 /* free posted tx for which compls will never arrive */
2105 while (atomic_read(&txq->used)) {
2106 sent_skb = txo->sent_skb_list[txq->tail];
2107 end_idx = txq->tail;
2108 num_wrbs = wrb_cnt_for_skb(adapter, sent_skb,
2109 &dummy_wrb);
2110 index_adv(&end_idx, num_wrbs - 1, txq->len);
2111 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
2112 atomic_sub(num_wrbs, &txq->used);
2113 }
b03388d6 2114 }
6b7c5b94
SP
2115}
2116
10ef9ab4
SP
2117static void be_evt_queues_destroy(struct be_adapter *adapter)
2118{
2119 struct be_eq_obj *eqo;
2120 int i;
2121
2122 for_all_evt_queues(adapter, eqo, i) {
19d59aa7
PR
2123 if (eqo->q.created) {
2124 be_eq_clean(eqo);
10ef9ab4 2125 be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
6384a4d0 2126 napi_hash_del(&eqo->napi);
68d7bdcb 2127 netif_napi_del(&eqo->napi);
19d59aa7 2128 }
10ef9ab4
SP
2129 be_queue_free(adapter, &eqo->q);
2130 }
2131}
2132
2133static int be_evt_queues_create(struct be_adapter *adapter)
2134{
2135 struct be_queue_info *eq;
2136 struct be_eq_obj *eqo;
2632bafd 2137 struct be_aic_obj *aic;
10ef9ab4
SP
2138 int i, rc;
2139
92bf14ab
SP
2140 adapter->num_evt_qs = min_t(u16, num_irqs(adapter),
2141 adapter->cfg_num_qs);
10ef9ab4
SP
2142
2143 for_all_evt_queues(adapter, eqo, i) {
68d7bdcb
SP
2144 netif_napi_add(adapter->netdev, &eqo->napi, be_poll,
2145 BE_NAPI_WEIGHT);
6384a4d0 2146 napi_hash_add(&eqo->napi);
2632bafd 2147 aic = &adapter->aic_obj[i];
10ef9ab4 2148 eqo->adapter = adapter;
10ef9ab4 2149 eqo->idx = i;
2632bafd
SP
2150 aic->max_eqd = BE_MAX_EQD;
2151 aic->enable = true;
10ef9ab4
SP
2152
2153 eq = &eqo->q;
2154 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
748b539a 2155 sizeof(struct be_eq_entry));
10ef9ab4
SP
2156 if (rc)
2157 return rc;
2158
f2f781a7 2159 rc = be_cmd_eq_create(adapter, eqo);
10ef9ab4
SP
2160 if (rc)
2161 return rc;
2162 }
1cfafab9 2163 return 0;
10ef9ab4
SP
2164}
2165
5fb379ee
SP
2166static void be_mcc_queues_destroy(struct be_adapter *adapter)
2167{
2168 struct be_queue_info *q;
5fb379ee 2169
8788fdc2 2170 q = &adapter->mcc_obj.q;
5fb379ee 2171 if (q->created)
8788fdc2 2172 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
5fb379ee
SP
2173 be_queue_free(adapter, q);
2174
8788fdc2 2175 q = &adapter->mcc_obj.cq;
5fb379ee 2176 if (q->created)
8788fdc2 2177 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
5fb379ee
SP
2178 be_queue_free(adapter, q);
2179}
2180
2181/* Must be called only after TX qs are created as MCC shares TX EQ */
2182static int be_mcc_queues_create(struct be_adapter *adapter)
2183{
2184 struct be_queue_info *q, *cq;
5fb379ee 2185
8788fdc2 2186 cq = &adapter->mcc_obj.cq;
5fb379ee 2187 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
748b539a 2188 sizeof(struct be_mcc_compl)))
5fb379ee
SP
2189 goto err;
2190
10ef9ab4
SP
2191 /* Use the default EQ for MCC completions */
2192 if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0))
5fb379ee
SP
2193 goto mcc_cq_free;
2194
8788fdc2 2195 q = &adapter->mcc_obj.q;
5fb379ee
SP
2196 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
2197 goto mcc_cq_destroy;
2198
8788fdc2 2199 if (be_cmd_mccq_create(adapter, q, cq))
5fb379ee
SP
2200 goto mcc_q_free;
2201
2202 return 0;
2203
2204mcc_q_free:
2205 be_queue_free(adapter, q);
2206mcc_cq_destroy:
8788fdc2 2207 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
5fb379ee
SP
2208mcc_cq_free:
2209 be_queue_free(adapter, cq);
2210err:
2211 return -1;
2212}
2213
6b7c5b94
SP
2214static void be_tx_queues_destroy(struct be_adapter *adapter)
2215{
2216 struct be_queue_info *q;
3c8def97
SP
2217 struct be_tx_obj *txo;
2218 u8 i;
6b7c5b94 2219
3c8def97
SP
2220 for_all_tx_queues(adapter, txo, i) {
2221 q = &txo->q;
2222 if (q->created)
2223 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
2224 be_queue_free(adapter, q);
6b7c5b94 2225
3c8def97
SP
2226 q = &txo->cq;
2227 if (q->created)
2228 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2229 be_queue_free(adapter, q);
2230 }
6b7c5b94
SP
2231}
2232
7707133c 2233static int be_tx_qs_create(struct be_adapter *adapter)
6b7c5b94 2234{
10ef9ab4 2235 struct be_queue_info *cq, *eq;
3c8def97 2236 struct be_tx_obj *txo;
92bf14ab 2237 int status, i;
6b7c5b94 2238
92bf14ab 2239 adapter->num_tx_qs = min(adapter->num_evt_qs, be_max_txqs(adapter));
dafc0fe3 2240
10ef9ab4
SP
2241 for_all_tx_queues(adapter, txo, i) {
2242 cq = &txo->cq;
2243 status = be_queue_alloc(adapter, cq, TX_CQ_LEN,
2244 sizeof(struct be_eth_tx_compl));
2245 if (status)
2246 return status;
3c8def97 2247
827da44c
JS
2248 u64_stats_init(&txo->stats.sync);
2249 u64_stats_init(&txo->stats.sync_compl);
2250
10ef9ab4
SP
2251 /* If num_evt_qs is less than num_tx_qs, then more than
2252 * one txq share an eq
2253 */
2254 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2255 status = be_cmd_cq_create(adapter, cq, eq, false, 3);
2256 if (status)
2257 return status;
6b7c5b94 2258
10ef9ab4
SP
2259 status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
2260 sizeof(struct be_eth_wrb));
2261 if (status)
2262 return status;
6b7c5b94 2263
94d73aaa 2264 status = be_cmd_txq_create(adapter, txo);
10ef9ab4
SP
2265 if (status)
2266 return status;
3c8def97 2267 }
6b7c5b94 2268
d379142b
SP
2269 dev_info(&adapter->pdev->dev, "created %d TX queue(s)\n",
2270 adapter->num_tx_qs);
10ef9ab4 2271 return 0;
6b7c5b94
SP
2272}
2273
10ef9ab4 2274static void be_rx_cqs_destroy(struct be_adapter *adapter)
6b7c5b94
SP
2275{
2276 struct be_queue_info *q;
3abcdeda
SP
2277 struct be_rx_obj *rxo;
2278 int i;
2279
2280 for_all_rx_queues(adapter, rxo, i) {
3abcdeda
SP
2281 q = &rxo->cq;
2282 if (q->created)
2283 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2284 be_queue_free(adapter, q);
ac6a0c4a
SP
2285 }
2286}
2287
10ef9ab4 2288static int be_rx_cqs_create(struct be_adapter *adapter)
6b7c5b94 2289{
10ef9ab4 2290 struct be_queue_info *eq, *cq;
3abcdeda
SP
2291 struct be_rx_obj *rxo;
2292 int rc, i;
6b7c5b94 2293
92bf14ab
SP
2294 /* We can create as many RSS rings as there are EQs. */
2295 adapter->num_rx_qs = adapter->num_evt_qs;
2296
2297 /* We'll use RSS only if atleast 2 RSS rings are supported.
2298 * When RSS is used, we'll need a default RXQ for non-IP traffic.
10ef9ab4 2299 */
92bf14ab
SP
2300 if (adapter->num_rx_qs > 1)
2301 adapter->num_rx_qs++;
2302
6b7c5b94 2303 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
3abcdeda
SP
2304 for_all_rx_queues(adapter, rxo, i) {
2305 rxo->adapter = adapter;
3abcdeda
SP
2306 cq = &rxo->cq;
2307 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
748b539a 2308 sizeof(struct be_eth_rx_compl));
3abcdeda 2309 if (rc)
10ef9ab4 2310 return rc;
3abcdeda 2311
827da44c 2312 u64_stats_init(&rxo->stats.sync);
10ef9ab4
SP
2313 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2314 rc = be_cmd_cq_create(adapter, cq, eq, false, 3);
3abcdeda 2315 if (rc)
10ef9ab4 2316 return rc;
3abcdeda 2317 }
6b7c5b94 2318
d379142b
SP
2319 dev_info(&adapter->pdev->dev,
2320 "created %d RSS queue(s) and 1 default RX queue\n",
2321 adapter->num_rx_qs - 1);
10ef9ab4 2322 return 0;
b628bde2
SP
2323}
2324
6b7c5b94
SP
2325static irqreturn_t be_intx(int irq, void *dev)
2326{
e49cc34f
SP
2327 struct be_eq_obj *eqo = dev;
2328 struct be_adapter *adapter = eqo->adapter;
2329 int num_evts = 0;
6b7c5b94 2330
d0b9cec3
SP
2331 /* IRQ is not expected when NAPI is scheduled as the EQ
2332 * will not be armed.
2333 * But, this can happen on Lancer INTx where it takes
2334 * a while to de-assert INTx or in BE2 where occasionaly
2335 * an interrupt may be raised even when EQ is unarmed.
2336 * If NAPI is already scheduled, then counting & notifying
2337 * events will orphan them.
e49cc34f 2338 */
d0b9cec3 2339 if (napi_schedule_prep(&eqo->napi)) {
e49cc34f 2340 num_evts = events_get(eqo);
d0b9cec3
SP
2341 __napi_schedule(&eqo->napi);
2342 if (num_evts)
2343 eqo->spurious_intr = 0;
2344 }
2345 be_eq_notify(adapter, eqo->q.id, false, true, num_evts);
e49cc34f 2346
d0b9cec3
SP
2347 /* Return IRQ_HANDLED only for the the first spurious intr
2348 * after a valid intr to stop the kernel from branding
2349 * this irq as a bad one!
e49cc34f 2350 */
d0b9cec3
SP
2351 if (num_evts || eqo->spurious_intr++ == 0)
2352 return IRQ_HANDLED;
2353 else
2354 return IRQ_NONE;
6b7c5b94
SP
2355}
2356
10ef9ab4 2357static irqreturn_t be_msix(int irq, void *dev)
6b7c5b94 2358{
10ef9ab4 2359 struct be_eq_obj *eqo = dev;
6b7c5b94 2360
0b545a62
SP
2361 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
2362 napi_schedule(&eqo->napi);
6b7c5b94
SP
2363 return IRQ_HANDLED;
2364}
2365
2e588f84 2366static inline bool do_gro(struct be_rx_compl_info *rxcp)
6b7c5b94 2367{
e38b1706 2368 return (rxcp->tcpf && !rxcp->err && rxcp->l4_csum) ? true : false;
6b7c5b94
SP
2369}
2370
10ef9ab4 2371static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
748b539a 2372 int budget, int polling)
6b7c5b94 2373{
3abcdeda
SP
2374 struct be_adapter *adapter = rxo->adapter;
2375 struct be_queue_info *rx_cq = &rxo->cq;
2e588f84 2376 struct be_rx_compl_info *rxcp;
6b7c5b94 2377 u32 work_done;
c30d7266 2378 u32 frags_consumed = 0;
6b7c5b94
SP
2379
2380 for (work_done = 0; work_done < budget; work_done++) {
3abcdeda 2381 rxcp = be_rx_compl_get(rxo);
6b7c5b94
SP
2382 if (!rxcp)
2383 break;
2384
12004ae9
SP
2385 /* Is it a flush compl that has no data */
2386 if (unlikely(rxcp->num_rcvd == 0))
2387 goto loop_continue;
2388
2389 /* Discard compl with partial DMA Lancer B0 */
2390 if (unlikely(!rxcp->pkt_size)) {
10ef9ab4 2391 be_rx_compl_discard(rxo, rxcp);
12004ae9
SP
2392 goto loop_continue;
2393 }
2394
2395 /* On BE drop pkts that arrive due to imperfect filtering in
2396 * promiscuous mode on some skews
2397 */
2398 if (unlikely(rxcp->port != adapter->port_num &&
748b539a 2399 !lancer_chip(adapter))) {
10ef9ab4 2400 be_rx_compl_discard(rxo, rxcp);
12004ae9 2401 goto loop_continue;
64642811 2402 }
009dd872 2403
6384a4d0
SP
2404 /* Don't do gro when we're busy_polling */
2405 if (do_gro(rxcp) && polling != BUSY_POLLING)
10ef9ab4 2406 be_rx_compl_process_gro(rxo, napi, rxcp);
12004ae9 2407 else
6384a4d0
SP
2408 be_rx_compl_process(rxo, napi, rxcp);
2409
12004ae9 2410loop_continue:
c30d7266 2411 frags_consumed += rxcp->num_rcvd;
2e588f84 2412 be_rx_stats_update(rxo, rxcp);
6b7c5b94
SP
2413 }
2414
10ef9ab4
SP
2415 if (work_done) {
2416 be_cq_notify(adapter, rx_cq->id, true, work_done);
9372cacb 2417
6384a4d0
SP
2418 /* When an rx-obj gets into post_starved state, just
2419 * let be_worker do the posting.
2420 */
2421 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM &&
2422 !rxo->rx_post_starved)
c30d7266
AK
2423 be_post_rx_frags(rxo, GFP_ATOMIC,
2424 max_t(u32, MAX_RX_POST,
2425 frags_consumed));
6b7c5b94 2426 }
10ef9ab4 2427
6b7c5b94
SP
2428 return work_done;
2429}
2430
512bb8a2
KA
2431static inline void be_update_tx_err(struct be_tx_obj *txo, u32 status)
2432{
2433 switch (status) {
2434 case BE_TX_COMP_HDR_PARSE_ERR:
2435 tx_stats(txo)->tx_hdr_parse_err++;
2436 break;
2437 case BE_TX_COMP_NDMA_ERR:
2438 tx_stats(txo)->tx_dma_err++;
2439 break;
2440 case BE_TX_COMP_ACL_ERR:
2441 tx_stats(txo)->tx_spoof_check_err++;
2442 break;
2443 }
2444}
2445
2446static inline void lancer_update_tx_err(struct be_tx_obj *txo, u32 status)
2447{
2448 switch (status) {
2449 case LANCER_TX_COMP_LSO_ERR:
2450 tx_stats(txo)->tx_tso_err++;
2451 break;
2452 case LANCER_TX_COMP_HSW_DROP_MAC_ERR:
2453 case LANCER_TX_COMP_HSW_DROP_VLAN_ERR:
2454 tx_stats(txo)->tx_spoof_check_err++;
2455 break;
2456 case LANCER_TX_COMP_QINQ_ERR:
2457 tx_stats(txo)->tx_qinq_err++;
2458 break;
2459 case LANCER_TX_COMP_PARITY_ERR:
2460 tx_stats(txo)->tx_internal_parity_err++;
2461 break;
2462 case LANCER_TX_COMP_DMA_ERR:
2463 tx_stats(txo)->tx_dma_err++;
2464 break;
2465 }
2466}
2467
c8f64615
SP
2468static void be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
2469 int idx)
6b7c5b94 2470{
6b7c5b94 2471 struct be_eth_tx_compl *txcp;
c8f64615 2472 int num_wrbs = 0, work_done = 0;
512bb8a2 2473 u32 compl_status;
c8f64615
SP
2474 u16 last_idx;
2475
2476 while ((txcp = be_tx_compl_get(&txo->cq))) {
2477 last_idx = GET_TX_COMPL_BITS(wrb_index, txcp);
2478 num_wrbs += be_tx_compl_process(adapter, txo, last_idx);
2479 work_done++;
3c8def97 2480
512bb8a2
KA
2481 compl_status = GET_TX_COMPL_BITS(status, txcp);
2482 if (compl_status) {
2483 if (lancer_chip(adapter))
2484 lancer_update_tx_err(txo, compl_status);
2485 else
2486 be_update_tx_err(txo, compl_status);
2487 }
10ef9ab4 2488 }
6b7c5b94 2489
10ef9ab4
SP
2490 if (work_done) {
2491 be_cq_notify(adapter, txo->cq.id, true, work_done);
2492 atomic_sub(num_wrbs, &txo->q.used);
3c8def97 2493
10ef9ab4
SP
2494 /* As Tx wrbs have been freed up, wake up netdev queue
2495 * if it was stopped due to lack of tx wrbs. */
2496 if (__netif_subqueue_stopped(adapter->netdev, idx) &&
748b539a 2497 atomic_read(&txo->q.used) < txo->q.len / 2) {
10ef9ab4 2498 netif_wake_subqueue(adapter->netdev, idx);
3c8def97 2499 }
10ef9ab4
SP
2500
2501 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
2502 tx_stats(txo)->tx_compl += work_done;
2503 u64_stats_update_end(&tx_stats(txo)->sync_compl);
6b7c5b94 2504 }
10ef9ab4 2505}
6b7c5b94 2506
68d7bdcb 2507int be_poll(struct napi_struct *napi, int budget)
10ef9ab4
SP
2508{
2509 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2510 struct be_adapter *adapter = eqo->adapter;
0b545a62 2511 int max_work = 0, work, i, num_evts;
6384a4d0 2512 struct be_rx_obj *rxo;
a4906ea0 2513 struct be_tx_obj *txo;
f31e50a8 2514
0b545a62
SP
2515 num_evts = events_get(eqo);
2516
a4906ea0
SP
2517 for_all_tx_queues_on_eq(adapter, eqo, txo, i)
2518 be_process_tx(adapter, txo, i);
f31e50a8 2519
6384a4d0
SP
2520 if (be_lock_napi(eqo)) {
2521 /* This loop will iterate twice for EQ0 in which
2522 * completions of the last RXQ (default one) are also processed
2523 * For other EQs the loop iterates only once
2524 */
2525 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
2526 work = be_process_rx(rxo, napi, budget, NAPI_POLLING);
2527 max_work = max(work, max_work);
2528 }
2529 be_unlock_napi(eqo);
2530 } else {
2531 max_work = budget;
10ef9ab4 2532 }
6b7c5b94 2533
10ef9ab4
SP
2534 if (is_mcc_eqo(eqo))
2535 be_process_mcc(adapter);
93c86700 2536
10ef9ab4
SP
2537 if (max_work < budget) {
2538 napi_complete(napi);
0b545a62 2539 be_eq_notify(adapter, eqo->q.id, true, false, num_evts);
10ef9ab4
SP
2540 } else {
2541 /* As we'll continue in polling mode, count and clear events */
0b545a62 2542 be_eq_notify(adapter, eqo->q.id, false, false, num_evts);
93c86700 2543 }
10ef9ab4 2544 return max_work;
6b7c5b94
SP
2545}
2546
6384a4d0
SP
2547#ifdef CONFIG_NET_RX_BUSY_POLL
2548static int be_busy_poll(struct napi_struct *napi)
2549{
2550 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2551 struct be_adapter *adapter = eqo->adapter;
2552 struct be_rx_obj *rxo;
2553 int i, work = 0;
2554
2555 if (!be_lock_busy_poll(eqo))
2556 return LL_FLUSH_BUSY;
2557
2558 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
2559 work = be_process_rx(rxo, napi, 4, BUSY_POLLING);
2560 if (work)
2561 break;
2562 }
2563
2564 be_unlock_busy_poll(eqo);
2565 return work;
2566}
2567#endif
2568
f67ef7ba 2569void be_detect_error(struct be_adapter *adapter)
7c185276 2570{
e1cfb67a
PR
2571 u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
2572 u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
7c185276 2573 u32 i;
eb0eecc1
SK
2574 bool error_detected = false;
2575 struct device *dev = &adapter->pdev->dev;
2576 struct net_device *netdev = adapter->netdev;
7c185276 2577
d23e946c 2578 if (be_hw_error(adapter))
72f02485
SP
2579 return;
2580
e1cfb67a
PR
2581 if (lancer_chip(adapter)) {
2582 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2583 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2584 sliport_err1 = ioread32(adapter->db +
748b539a 2585 SLIPORT_ERROR1_OFFSET);
e1cfb67a 2586 sliport_err2 = ioread32(adapter->db +
748b539a 2587 SLIPORT_ERROR2_OFFSET);
eb0eecc1
SK
2588 adapter->hw_error = true;
2589 /* Do not log error messages if its a FW reset */
2590 if (sliport_err1 == SLIPORT_ERROR_FW_RESET1 &&
2591 sliport_err2 == SLIPORT_ERROR_FW_RESET2) {
2592 dev_info(dev, "Firmware update in progress\n");
2593 } else {
2594 error_detected = true;
2595 dev_err(dev, "Error detected in the card\n");
2596 dev_err(dev, "ERR: sliport status 0x%x\n",
2597 sliport_status);
2598 dev_err(dev, "ERR: sliport error1 0x%x\n",
2599 sliport_err1);
2600 dev_err(dev, "ERR: sliport error2 0x%x\n",
2601 sliport_err2);
2602 }
e1cfb67a
PR
2603 }
2604 } else {
2605 pci_read_config_dword(adapter->pdev,
748b539a 2606 PCICFG_UE_STATUS_LOW, &ue_lo);
e1cfb67a 2607 pci_read_config_dword(adapter->pdev,
748b539a 2608 PCICFG_UE_STATUS_HIGH, &ue_hi);
e1cfb67a 2609 pci_read_config_dword(adapter->pdev,
748b539a 2610 PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask);
e1cfb67a 2611 pci_read_config_dword(adapter->pdev,
748b539a 2612 PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
e1cfb67a 2613
f67ef7ba
PR
2614 ue_lo = (ue_lo & ~ue_lo_mask);
2615 ue_hi = (ue_hi & ~ue_hi_mask);
7c185276 2616
eb0eecc1
SK
2617 /* On certain platforms BE hardware can indicate spurious UEs.
2618 * Allow HW to stop working completely in case of a real UE.
2619 * Hence not setting the hw_error for UE detection.
2620 */
f67ef7ba 2621
eb0eecc1
SK
2622 if (ue_lo || ue_hi) {
2623 error_detected = true;
2624 dev_err(dev,
2625 "Unrecoverable Error detected in the adapter");
2626 dev_err(dev, "Please reboot server to recover");
2627 if (skyhawk_chip(adapter))
2628 adapter->hw_error = true;
2629 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
2630 if (ue_lo & 1)
2631 dev_err(dev, "UE: %s bit set\n",
2632 ue_status_low_desc[i]);
2633 }
2634 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
2635 if (ue_hi & 1)
2636 dev_err(dev, "UE: %s bit set\n",
2637 ue_status_hi_desc[i]);
2638 }
7c185276
AK
2639 }
2640 }
eb0eecc1
SK
2641 if (error_detected)
2642 netif_carrier_off(netdev);
7c185276
AK
2643}
2644
8d56ff11
SP
2645static void be_msix_disable(struct be_adapter *adapter)
2646{
ac6a0c4a 2647 if (msix_enabled(adapter)) {
8d56ff11 2648 pci_disable_msix(adapter->pdev);
ac6a0c4a 2649 adapter->num_msix_vec = 0;
68d7bdcb 2650 adapter->num_msix_roce_vec = 0;
3abcdeda
SP
2651 }
2652}
2653
c2bba3df 2654static int be_msix_enable(struct be_adapter *adapter)
6b7c5b94 2655{
7dc4c064 2656 int i, num_vec;
d379142b 2657 struct device *dev = &adapter->pdev->dev;
6b7c5b94 2658
92bf14ab
SP
2659 /* If RoCE is supported, program the max number of NIC vectors that
2660 * may be configured via set-channels, along with vectors needed for
2661 * RoCe. Else, just program the number we'll use initially.
2662 */
2663 if (be_roce_supported(adapter))
2664 num_vec = min_t(int, 2 * be_max_eqs(adapter),
2665 2 * num_online_cpus());
2666 else
2667 num_vec = adapter->cfg_num_qs;
3abcdeda 2668
ac6a0c4a 2669 for (i = 0; i < num_vec; i++)
6b7c5b94
SP
2670 adapter->msix_entries[i].entry = i;
2671
7dc4c064
AG
2672 num_vec = pci_enable_msix_range(adapter->pdev, adapter->msix_entries,
2673 MIN_MSIX_VECTORS, num_vec);
2674 if (num_vec < 0)
2675 goto fail;
92bf14ab 2676
92bf14ab
SP
2677 if (be_roce_supported(adapter) && num_vec > MIN_MSIX_VECTORS) {
2678 adapter->num_msix_roce_vec = num_vec / 2;
2679 dev_info(dev, "enabled %d MSI-x vector(s) for RoCE\n",
2680 adapter->num_msix_roce_vec);
2681 }
2682
2683 adapter->num_msix_vec = num_vec - adapter->num_msix_roce_vec;
2684
2685 dev_info(dev, "enabled %d MSI-x vector(s) for NIC\n",
2686 adapter->num_msix_vec);
c2bba3df 2687 return 0;
7dc4c064
AG
2688
2689fail:
2690 dev_warn(dev, "MSIx enable failed\n");
2691
2692 /* INTx is not supported in VFs, so fail probe if enable_msix fails */
2693 if (!be_physfn(adapter))
2694 return num_vec;
2695 return 0;
6b7c5b94
SP
2696}
2697
fe6d2a38 2698static inline int be_msix_vec_get(struct be_adapter *adapter,
748b539a 2699 struct be_eq_obj *eqo)
b628bde2 2700{
f2f781a7 2701 return adapter->msix_entries[eqo->msix_idx].vector;
b628bde2 2702}
6b7c5b94 2703
b628bde2
SP
2704static int be_msix_register(struct be_adapter *adapter)
2705{
10ef9ab4
SP
2706 struct net_device *netdev = adapter->netdev;
2707 struct be_eq_obj *eqo;
2708 int status, i, vec;
6b7c5b94 2709
10ef9ab4
SP
2710 for_all_evt_queues(adapter, eqo, i) {
2711 sprintf(eqo->desc, "%s-q%d", netdev->name, i);
2712 vec = be_msix_vec_get(adapter, eqo);
2713 status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
3abcdeda
SP
2714 if (status)
2715 goto err_msix;
2716 }
b628bde2 2717
6b7c5b94 2718 return 0;
3abcdeda 2719err_msix:
10ef9ab4
SP
2720 for (i--, eqo = &adapter->eq_obj[i]; i >= 0; i--, eqo--)
2721 free_irq(be_msix_vec_get(adapter, eqo), eqo);
2722 dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
748b539a 2723 status);
ac6a0c4a 2724 be_msix_disable(adapter);
6b7c5b94
SP
2725 return status;
2726}
2727
2728static int be_irq_register(struct be_adapter *adapter)
2729{
2730 struct net_device *netdev = adapter->netdev;
2731 int status;
2732
ac6a0c4a 2733 if (msix_enabled(adapter)) {
6b7c5b94
SP
2734 status = be_msix_register(adapter);
2735 if (status == 0)
2736 goto done;
ba343c77
SB
2737 /* INTx is not supported for VF */
2738 if (!be_physfn(adapter))
2739 return status;
6b7c5b94
SP
2740 }
2741
e49cc34f 2742 /* INTx: only the first EQ is used */
6b7c5b94
SP
2743 netdev->irq = adapter->pdev->irq;
2744 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
e49cc34f 2745 &adapter->eq_obj[0]);
6b7c5b94
SP
2746 if (status) {
2747 dev_err(&adapter->pdev->dev,
2748 "INTx request IRQ failed - err %d\n", status);
2749 return status;
2750 }
2751done:
2752 adapter->isr_registered = true;
2753 return 0;
2754}
2755
2756static void be_irq_unregister(struct be_adapter *adapter)
2757{
2758 struct net_device *netdev = adapter->netdev;
10ef9ab4 2759 struct be_eq_obj *eqo;
3abcdeda 2760 int i;
6b7c5b94
SP
2761
2762 if (!adapter->isr_registered)
2763 return;
2764
2765 /* INTx */
ac6a0c4a 2766 if (!msix_enabled(adapter)) {
e49cc34f 2767 free_irq(netdev->irq, &adapter->eq_obj[0]);
6b7c5b94
SP
2768 goto done;
2769 }
2770
2771 /* MSIx */
10ef9ab4
SP
2772 for_all_evt_queues(adapter, eqo, i)
2773 free_irq(be_msix_vec_get(adapter, eqo), eqo);
3abcdeda 2774
6b7c5b94
SP
2775done:
2776 adapter->isr_registered = false;
6b7c5b94
SP
2777}
2778
10ef9ab4 2779static void be_rx_qs_destroy(struct be_adapter *adapter)
482c9e79
SP
2780{
2781 struct be_queue_info *q;
2782 struct be_rx_obj *rxo;
2783 int i;
2784
2785 for_all_rx_queues(adapter, rxo, i) {
2786 q = &rxo->q;
2787 if (q->created) {
2788 be_cmd_rxq_destroy(adapter, q);
10ef9ab4 2789 be_rx_cq_clean(rxo);
482c9e79 2790 }
10ef9ab4 2791 be_queue_free(adapter, q);
482c9e79
SP
2792 }
2793}
2794
889cd4b2
SP
2795static int be_close(struct net_device *netdev)
2796{
2797 struct be_adapter *adapter = netdev_priv(netdev);
10ef9ab4
SP
2798 struct be_eq_obj *eqo;
2799 int i;
889cd4b2 2800
e1ad8e33
KA
2801 /* This protection is needed as be_close() may be called even when the
2802 * adapter is in cleared state (after eeh perm failure)
2803 */
2804 if (!(adapter->flags & BE_FLAGS_SETUP_DONE))
2805 return 0;
2806
045508a8
PP
2807 be_roce_dev_close(adapter);
2808
dff345c5
IV
2809 if (adapter->flags & BE_FLAGS_NAPI_ENABLED) {
2810 for_all_evt_queues(adapter, eqo, i) {
04d3d624 2811 napi_disable(&eqo->napi);
6384a4d0
SP
2812 be_disable_busy_poll(eqo);
2813 }
71237b6f 2814 adapter->flags &= ~BE_FLAGS_NAPI_ENABLED;
04d3d624 2815 }
a323d9bf
SP
2816
2817 be_async_mcc_disable(adapter);
2818
2819 /* Wait for all pending tx completions to arrive so that
2820 * all tx skbs are freed.
2821 */
fba87559 2822 netif_tx_disable(netdev);
6e1f9975 2823 be_tx_compl_clean(adapter);
a323d9bf
SP
2824
2825 be_rx_qs_destroy(adapter);
2826
d11a347d
AK
2827 for (i = 1; i < (adapter->uc_macs + 1); i++)
2828 be_cmd_pmac_del(adapter, adapter->if_handle,
2829 adapter->pmac_id[i], 0);
2830 adapter->uc_macs = 0;
2831
a323d9bf 2832 for_all_evt_queues(adapter, eqo, i) {
10ef9ab4
SP
2833 if (msix_enabled(adapter))
2834 synchronize_irq(be_msix_vec_get(adapter, eqo));
2835 else
2836 synchronize_irq(netdev->irq);
2837 be_eq_clean(eqo);
63fcb27f
PR
2838 }
2839
889cd4b2
SP
2840 be_irq_unregister(adapter);
2841
482c9e79
SP
2842 return 0;
2843}
2844
10ef9ab4 2845static int be_rx_qs_create(struct be_adapter *adapter)
482c9e79
SP
2846{
2847 struct be_rx_obj *rxo;
e9008ee9 2848 int rc, i, j;
e2557877
VD
2849 u8 rss_hkey[RSS_HASH_KEY_LEN];
2850 struct rss_info *rss = &adapter->rss_info;
482c9e79
SP
2851
2852 for_all_rx_queues(adapter, rxo, i) {
10ef9ab4
SP
2853 rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
2854 sizeof(struct be_eth_rx_d));
2855 if (rc)
2856 return rc;
2857 }
2858
2859 /* The FW would like the default RXQ to be created first */
2860 rxo = default_rxo(adapter);
2861 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id, rx_frag_size,
2862 adapter->if_handle, false, &rxo->rss_id);
2863 if (rc)
2864 return rc;
2865
2866 for_all_rss_queues(adapter, rxo, i) {
482c9e79 2867 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
10ef9ab4
SP
2868 rx_frag_size, adapter->if_handle,
2869 true, &rxo->rss_id);
482c9e79
SP
2870 if (rc)
2871 return rc;
2872 }
2873
2874 if (be_multi_rxq(adapter)) {
e2557877
VD
2875 for (j = 0; j < RSS_INDIR_TABLE_LEN;
2876 j += adapter->num_rx_qs - 1) {
e9008ee9 2877 for_all_rss_queues(adapter, rxo, i) {
e2557877 2878 if ((j + i) >= RSS_INDIR_TABLE_LEN)
e9008ee9 2879 break;
e2557877
VD
2880 rss->rsstable[j + i] = rxo->rss_id;
2881 rss->rss_queue[j + i] = i;
e9008ee9
PR
2882 }
2883 }
e2557877
VD
2884 rss->rss_flags = RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_IPV4 |
2885 RSS_ENABLE_TCP_IPV6 | RSS_ENABLE_IPV6;
594ad54a
SR
2886
2887 if (!BEx_chip(adapter))
e2557877
VD
2888 rss->rss_flags |= RSS_ENABLE_UDP_IPV4 |
2889 RSS_ENABLE_UDP_IPV6;
da1388d6
VV
2890 } else {
2891 /* Disable RSS, if only default RX Q is created */
e2557877 2892 rss->rss_flags = RSS_ENABLE_NONE;
da1388d6 2893 }
594ad54a 2894
e2557877 2895 get_random_bytes(rss_hkey, RSS_HASH_KEY_LEN);
748b539a 2896 rc = be_cmd_rss_config(adapter, rss->rsstable, rss->rss_flags,
e2557877 2897 128, rss_hkey);
da1388d6 2898 if (rc) {
e2557877 2899 rss->rss_flags = RSS_ENABLE_NONE;
da1388d6 2900 return rc;
482c9e79
SP
2901 }
2902
e2557877
VD
2903 memcpy(rss->rss_hkey, rss_hkey, RSS_HASH_KEY_LEN);
2904
482c9e79 2905 /* First time posting */
10ef9ab4 2906 for_all_rx_queues(adapter, rxo, i)
c30d7266 2907 be_post_rx_frags(rxo, GFP_KERNEL, MAX_RX_POST);
889cd4b2
SP
2908 return 0;
2909}
2910
6b7c5b94
SP
2911static int be_open(struct net_device *netdev)
2912{
2913 struct be_adapter *adapter = netdev_priv(netdev);
10ef9ab4 2914 struct be_eq_obj *eqo;
3abcdeda 2915 struct be_rx_obj *rxo;
10ef9ab4 2916 struct be_tx_obj *txo;
b236916a 2917 u8 link_status;
3abcdeda 2918 int status, i;
5fb379ee 2919
10ef9ab4 2920 status = be_rx_qs_create(adapter);
482c9e79
SP
2921 if (status)
2922 goto err;
2923
c2bba3df
SK
2924 status = be_irq_register(adapter);
2925 if (status)
2926 goto err;
5fb379ee 2927
10ef9ab4 2928 for_all_rx_queues(adapter, rxo, i)
3abcdeda 2929 be_cq_notify(adapter, rxo->cq.id, true, 0);
5fb379ee 2930
10ef9ab4
SP
2931 for_all_tx_queues(adapter, txo, i)
2932 be_cq_notify(adapter, txo->cq.id, true, 0);
2933
7a1e9b20
SP
2934 be_async_mcc_enable(adapter);
2935
10ef9ab4
SP
2936 for_all_evt_queues(adapter, eqo, i) {
2937 napi_enable(&eqo->napi);
6384a4d0 2938 be_enable_busy_poll(eqo);
4cad9f3b 2939 be_eq_notify(adapter, eqo->q.id, true, true, 0);
10ef9ab4 2940 }
04d3d624 2941 adapter->flags |= BE_FLAGS_NAPI_ENABLED;
10ef9ab4 2942
323ff71e 2943 status = be_cmd_link_status_query(adapter, NULL, &link_status, 0);
b236916a
AK
2944 if (!status)
2945 be_link_status_update(adapter, link_status);
2946
fba87559 2947 netif_tx_start_all_queues(netdev);
045508a8 2948 be_roce_dev_open(adapter);
c9c47142 2949
c5abe7c0 2950#ifdef CONFIG_BE2NET_VXLAN
c9c47142
SP
2951 if (skyhawk_chip(adapter))
2952 vxlan_get_rx_port(netdev);
c5abe7c0
SP
2953#endif
2954
889cd4b2
SP
2955 return 0;
2956err:
2957 be_close(adapter->netdev);
2958 return -EIO;
5fb379ee
SP
2959}
2960
71d8d1b5
AK
2961static int be_setup_wol(struct be_adapter *adapter, bool enable)
2962{
2963 struct be_dma_mem cmd;
2964 int status = 0;
2965 u8 mac[ETH_ALEN];
2966
2967 memset(mac, 0, ETH_ALEN);
2968
2969 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
ede23fa8
JP
2970 cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2971 GFP_KERNEL);
ddf1169f 2972 if (!cmd.va)
6b568689 2973 return -ENOMEM;
71d8d1b5
AK
2974
2975 if (enable) {
2976 status = pci_write_config_dword(adapter->pdev,
748b539a
SP
2977 PCICFG_PM_CONTROL_OFFSET,
2978 PCICFG_PM_CONTROL_MASK);
71d8d1b5
AK
2979 if (status) {
2980 dev_err(&adapter->pdev->dev,
2381a55c 2981 "Could not enable Wake-on-lan\n");
2b7bcebf
IV
2982 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2983 cmd.dma);
71d8d1b5
AK
2984 return status;
2985 }
2986 status = be_cmd_enable_magic_wol(adapter,
748b539a
SP
2987 adapter->netdev->dev_addr,
2988 &cmd);
71d8d1b5
AK
2989 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2990 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2991 } else {
2992 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2993 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2994 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2995 }
2996
2b7bcebf 2997 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
71d8d1b5
AK
2998 return status;
2999}
3000
6d87f5c3
AK
3001/*
3002 * Generate a seed MAC address from the PF MAC Address using jhash.
3003 * MAC Address for VFs are assigned incrementally starting from the seed.
3004 * These addresses are programmed in the ASIC by the PF and the VF driver
3005 * queries for the MAC address during its probe.
3006 */
4c876616 3007static int be_vf_eth_addr_config(struct be_adapter *adapter)
6d87f5c3 3008{
f9449ab7 3009 u32 vf;
3abcdeda 3010 int status = 0;
6d87f5c3 3011 u8 mac[ETH_ALEN];
11ac75ed 3012 struct be_vf_cfg *vf_cfg;
6d87f5c3
AK
3013
3014 be_vf_eth_addr_generate(adapter, mac);
3015
11ac75ed 3016 for_all_vfs(adapter, vf_cfg, vf) {
3175d8c2 3017 if (BEx_chip(adapter))
590c391d 3018 status = be_cmd_pmac_add(adapter, mac,
11ac75ed
SP
3019 vf_cfg->if_handle,
3020 &vf_cfg->pmac_id, vf + 1);
3175d8c2
SP
3021 else
3022 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
3023 vf + 1);
590c391d 3024
6d87f5c3
AK
3025 if (status)
3026 dev_err(&adapter->pdev->dev,
748b539a
SP
3027 "Mac address assignment failed for VF %d\n",
3028 vf);
6d87f5c3 3029 else
11ac75ed 3030 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
6d87f5c3
AK
3031
3032 mac[5] += 1;
3033 }
3034 return status;
3035}
3036
4c876616
SP
3037static int be_vfs_mac_query(struct be_adapter *adapter)
3038{
3039 int status, vf;
3040 u8 mac[ETH_ALEN];
3041 struct be_vf_cfg *vf_cfg;
4c876616
SP
3042
3043 for_all_vfs(adapter, vf_cfg, vf) {
b188f090
SR
3044 status = be_cmd_get_active_mac(adapter, vf_cfg->pmac_id,
3045 mac, vf_cfg->if_handle,
3046 false, vf+1);
4c876616
SP
3047 if (status)
3048 return status;
3049 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
3050 }
3051 return 0;
3052}
3053
f9449ab7 3054static void be_vf_clear(struct be_adapter *adapter)
6d87f5c3 3055{
11ac75ed 3056 struct be_vf_cfg *vf_cfg;
6d87f5c3
AK
3057 u32 vf;
3058
257a3feb 3059 if (pci_vfs_assigned(adapter->pdev)) {
4c876616
SP
3060 dev_warn(&adapter->pdev->dev,
3061 "VFs are assigned to VMs: not disabling VFs\n");
39f1d94d
SP
3062 goto done;
3063 }
3064
b4c1df93
SP
3065 pci_disable_sriov(adapter->pdev);
3066
11ac75ed 3067 for_all_vfs(adapter, vf_cfg, vf) {
3175d8c2 3068 if (BEx_chip(adapter))
11ac75ed
SP
3069 be_cmd_pmac_del(adapter, vf_cfg->if_handle,
3070 vf_cfg->pmac_id, vf + 1);
3175d8c2
SP
3071 else
3072 be_cmd_set_mac(adapter, NULL, vf_cfg->if_handle,
3073 vf + 1);
f9449ab7 3074
11ac75ed
SP
3075 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
3076 }
39f1d94d
SP
3077done:
3078 kfree(adapter->vf_cfg);
3079 adapter->num_vfs = 0;
f174c7ec 3080 adapter->flags &= ~BE_FLAGS_SRIOV_ENABLED;
6d87f5c3
AK
3081}
3082
7707133c
SP
3083static void be_clear_queues(struct be_adapter *adapter)
3084{
3085 be_mcc_queues_destroy(adapter);
3086 be_rx_cqs_destroy(adapter);
3087 be_tx_queues_destroy(adapter);
3088 be_evt_queues_destroy(adapter);
3089}
3090
68d7bdcb 3091static void be_cancel_worker(struct be_adapter *adapter)
a54769f5 3092{
191eb756
SP
3093 if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) {
3094 cancel_delayed_work_sync(&adapter->work);
3095 adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED;
3096 }
68d7bdcb
SP
3097}
3098
b05004ad 3099static void be_mac_clear(struct be_adapter *adapter)
68d7bdcb
SP
3100{
3101 int i;
3102
b05004ad
SK
3103 if (adapter->pmac_id) {
3104 for (i = 0; i < (adapter->uc_macs + 1); i++)
3105 be_cmd_pmac_del(adapter, adapter->if_handle,
3106 adapter->pmac_id[i], 0);
3107 adapter->uc_macs = 0;
3108
3109 kfree(adapter->pmac_id);
3110 adapter->pmac_id = NULL;
3111 }
3112}
3113
c5abe7c0 3114#ifdef CONFIG_BE2NET_VXLAN
c9c47142
SP
3115static void be_disable_vxlan_offloads(struct be_adapter *adapter)
3116{
3117 if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS)
3118 be_cmd_manage_iface(adapter, adapter->if_handle,
3119 OP_CONVERT_TUNNEL_TO_NORMAL);
3120
3121 if (adapter->vxlan_port)
3122 be_cmd_set_vxlan_port(adapter, 0);
3123
3124 adapter->flags &= ~BE_FLAGS_VXLAN_OFFLOADS;
3125 adapter->vxlan_port = 0;
3126}
c5abe7c0 3127#endif
c9c47142 3128
b05004ad
SK
3129static int be_clear(struct be_adapter *adapter)
3130{
68d7bdcb 3131 be_cancel_worker(adapter);
191eb756 3132
11ac75ed 3133 if (sriov_enabled(adapter))
f9449ab7
SP
3134 be_vf_clear(adapter);
3135
bec84e6b
VV
3136 /* Re-configure FW to distribute resources evenly across max-supported
3137 * number of VFs, only when VFs are not already enabled.
3138 */
3139 if (be_physfn(adapter) && !pci_vfs_assigned(adapter->pdev))
3140 be_cmd_set_sriov_config(adapter, adapter->pool_res,
3141 pci_sriov_get_totalvfs(adapter->pdev));
3142
c5abe7c0 3143#ifdef CONFIG_BE2NET_VXLAN
c9c47142 3144 be_disable_vxlan_offloads(adapter);
c5abe7c0 3145#endif
2d17f403 3146 /* delete the primary mac along with the uc-mac list */
b05004ad 3147 be_mac_clear(adapter);
fbc13f01 3148
f9449ab7 3149 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
a54769f5 3150
7707133c 3151 be_clear_queues(adapter);
a54769f5 3152
10ef9ab4 3153 be_msix_disable(adapter);
e1ad8e33 3154 adapter->flags &= ~BE_FLAGS_SETUP_DONE;
a54769f5
SP
3155 return 0;
3156}
3157
4c876616 3158static int be_vfs_if_create(struct be_adapter *adapter)
abb93951 3159{
92bf14ab 3160 struct be_resources res = {0};
4c876616
SP
3161 struct be_vf_cfg *vf_cfg;
3162 u32 cap_flags, en_flags, vf;
922bbe88 3163 int status = 0;
abb93951 3164
4c876616
SP
3165 cap_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
3166 BE_IF_FLAGS_MULTICAST;
abb93951 3167
4c876616 3168 for_all_vfs(adapter, vf_cfg, vf) {
92bf14ab
SP
3169 if (!BE3_chip(adapter)) {
3170 status = be_cmd_get_profile_config(adapter, &res,
3171 vf + 1);
3172 if (!status)
3173 cap_flags = res.if_cap_flags;
3174 }
4c876616
SP
3175
3176 /* If a FW profile exists, then cap_flags are updated */
3177 en_flags = cap_flags & (BE_IF_FLAGS_UNTAGGED |
748b539a
SP
3178 BE_IF_FLAGS_BROADCAST |
3179 BE_IF_FLAGS_MULTICAST);
3180 status =
3181 be_cmd_if_create(adapter, cap_flags, en_flags,
3182 &vf_cfg->if_handle, vf + 1);
4c876616
SP
3183 if (status)
3184 goto err;
3185 }
3186err:
3187 return status;
abb93951
PR
3188}
3189
39f1d94d 3190static int be_vf_setup_init(struct be_adapter *adapter)
30128031 3191{
11ac75ed 3192 struct be_vf_cfg *vf_cfg;
30128031
SP
3193 int vf;
3194
39f1d94d
SP
3195 adapter->vf_cfg = kcalloc(adapter->num_vfs, sizeof(*vf_cfg),
3196 GFP_KERNEL);
3197 if (!adapter->vf_cfg)
3198 return -ENOMEM;
3199
11ac75ed
SP
3200 for_all_vfs(adapter, vf_cfg, vf) {
3201 vf_cfg->if_handle = -1;
3202 vf_cfg->pmac_id = -1;
30128031 3203 }
39f1d94d 3204 return 0;
30128031
SP
3205}
3206
f9449ab7
SP
3207static int be_vf_setup(struct be_adapter *adapter)
3208{
c502224e 3209 struct device *dev = &adapter->pdev->dev;
11ac75ed 3210 struct be_vf_cfg *vf_cfg;
4c876616 3211 int status, old_vfs, vf;
04a06028 3212 u32 privileges;
39f1d94d 3213
257a3feb 3214 old_vfs = pci_num_vf(adapter->pdev);
39f1d94d
SP
3215
3216 status = be_vf_setup_init(adapter);
3217 if (status)
3218 goto err;
30128031 3219
4c876616
SP
3220 if (old_vfs) {
3221 for_all_vfs(adapter, vf_cfg, vf) {
3222 status = be_cmd_get_if_id(adapter, vf_cfg, vf);
3223 if (status)
3224 goto err;
3225 }
f9449ab7 3226
4c876616
SP
3227 status = be_vfs_mac_query(adapter);
3228 if (status)
3229 goto err;
3230 } else {
bec84e6b
VV
3231 status = be_vfs_if_create(adapter);
3232 if (status)
3233 goto err;
3234
39f1d94d
SP
3235 status = be_vf_eth_addr_config(adapter);
3236 if (status)
3237 goto err;
3238 }
f9449ab7 3239
11ac75ed 3240 for_all_vfs(adapter, vf_cfg, vf) {
04a06028
SP
3241 /* Allow VFs to programs MAC/VLAN filters */
3242 status = be_cmd_get_fn_privileges(adapter, &privileges, vf + 1);
3243 if (!status && !(privileges & BE_PRIV_FILTMGMT)) {
3244 status = be_cmd_set_fn_privileges(adapter,
3245 privileges |
3246 BE_PRIV_FILTMGMT,
3247 vf + 1);
3248 if (!status)
3249 dev_info(dev, "VF%d has FILTMGMT privilege\n",
3250 vf);
3251 }
3252
0f77ba73
RN
3253 /* Allow full available bandwidth */
3254 if (!old_vfs)
3255 be_cmd_config_qos(adapter, 0, 0, vf + 1);
f1f3ee1b 3256
bdce2ad7 3257 if (!old_vfs) {
0599863d 3258 be_cmd_enable_vf(adapter, vf + 1);
bdce2ad7
SR
3259 be_cmd_set_logical_link_config(adapter,
3260 IFLA_VF_LINK_STATE_AUTO,
3261 vf+1);
3262 }
f9449ab7 3263 }
b4c1df93
SP
3264
3265 if (!old_vfs) {
3266 status = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
3267 if (status) {
3268 dev_err(dev, "SRIOV enable failed\n");
3269 adapter->num_vfs = 0;
3270 goto err;
3271 }
3272 }
f174c7ec
VV
3273
3274 adapter->flags |= BE_FLAGS_SRIOV_ENABLED;
f9449ab7
SP
3275 return 0;
3276err:
4c876616
SP
3277 dev_err(dev, "VF setup failed\n");
3278 be_vf_clear(adapter);
f9449ab7
SP
3279 return status;
3280}
3281
f93f160b
VV
3282/* Converting function_mode bits on BE3 to SH mc_type enums */
3283
3284static u8 be_convert_mc_type(u32 function_mode)
3285{
66064dbc 3286 if (function_mode & VNIC_MODE && function_mode & QNQ_MODE)
f93f160b 3287 return vNIC1;
66064dbc 3288 else if (function_mode & QNQ_MODE)
f93f160b
VV
3289 return FLEX10;
3290 else if (function_mode & VNIC_MODE)
3291 return vNIC2;
3292 else if (function_mode & UMC_ENABLED)
3293 return UMC;
3294 else
3295 return MC_NONE;
3296}
3297
92bf14ab
SP
3298/* On BE2/BE3 FW does not suggest the supported limits */
3299static void BEx_get_resources(struct be_adapter *adapter,
3300 struct be_resources *res)
3301{
bec84e6b 3302 bool use_sriov = adapter->num_vfs ? 1 : 0;
92bf14ab
SP
3303
3304 if (be_physfn(adapter))
3305 res->max_uc_mac = BE_UC_PMAC_COUNT;
3306 else
3307 res->max_uc_mac = BE_VF_UC_PMAC_COUNT;
3308
f93f160b
VV
3309 adapter->mc_type = be_convert_mc_type(adapter->function_mode);
3310
3311 if (be_is_mc(adapter)) {
3312 /* Assuming that there are 4 channels per port,
3313 * when multi-channel is enabled
3314 */
3315 if (be_is_qnq_mode(adapter))
3316 res->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
3317 else
3318 /* In a non-qnq multichannel mode, the pvid
3319 * takes up one vlan entry
3320 */
3321 res->max_vlans = (BE_NUM_VLANS_SUPPORTED / 4) - 1;
3322 } else {
92bf14ab 3323 res->max_vlans = BE_NUM_VLANS_SUPPORTED;
f93f160b
VV
3324 }
3325
92bf14ab
SP
3326 res->max_mcast_mac = BE_MAX_MC;
3327
a5243dab
VV
3328 /* 1) For BE3 1Gb ports, FW does not support multiple TXQs
3329 * 2) Create multiple TX rings on a BE3-R multi-channel interface
3330 * *only* if it is RSS-capable.
3331 */
3332 if (BE2_chip(adapter) || use_sriov || (adapter->port_num > 1) ||
3333 !be_physfn(adapter) || (be_is_mc(adapter) &&
a28277dc 3334 !(adapter->function_caps & BE_FUNCTION_CAPS_RSS))) {
92bf14ab 3335 res->max_tx_qs = 1;
a28277dc
SR
3336 } else if (adapter->function_caps & BE_FUNCTION_CAPS_SUPER_NIC) {
3337 struct be_resources super_nic_res = {0};
3338
3339 /* On a SuperNIC profile, the driver needs to use the
3340 * GET_PROFILE_CONFIG cmd to query the per-function TXQ limits
3341 */
3342 be_cmd_get_profile_config(adapter, &super_nic_res, 0);
3343 /* Some old versions of BE3 FW don't report max_tx_qs value */
3344 res->max_tx_qs = super_nic_res.max_tx_qs ? : BE3_MAX_TX_QS;
3345 } else {
92bf14ab 3346 res->max_tx_qs = BE3_MAX_TX_QS;
a28277dc 3347 }
92bf14ab
SP
3348
3349 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
3350 !use_sriov && be_physfn(adapter))
3351 res->max_rss_qs = (adapter->be3_native) ?
3352 BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
3353 res->max_rx_qs = res->max_rss_qs + 1;
3354
e3dc867c 3355 if (be_physfn(adapter))
d3518e21 3356 res->max_evt_qs = (be_max_vfs(adapter) > 0) ?
e3dc867c
SR
3357 BE3_SRIOV_MAX_EVT_QS : BE3_MAX_EVT_QS;
3358 else
3359 res->max_evt_qs = 1;
92bf14ab
SP
3360
3361 res->if_cap_flags = BE_IF_CAP_FLAGS_WANT;
3362 if (!(adapter->function_caps & BE_FUNCTION_CAPS_RSS))
3363 res->if_cap_flags &= ~BE_IF_FLAGS_RSS;
3364}
3365
30128031
SP
3366static void be_setup_init(struct be_adapter *adapter)
3367{
3368 adapter->vlan_prio_bmap = 0xff;
42f11cf2 3369 adapter->phy.link_speed = -1;
30128031
SP
3370 adapter->if_handle = -1;
3371 adapter->be3_native = false;
3372 adapter->promiscuous = false;
f25b119c
PR
3373 if (be_physfn(adapter))
3374 adapter->cmd_privileges = MAX_PRIVILEGES;
3375 else
3376 adapter->cmd_privileges = MIN_PRIVILEGES;
30128031
SP
3377}
3378
bec84e6b
VV
3379static int be_get_sriov_config(struct be_adapter *adapter)
3380{
3381 struct device *dev = &adapter->pdev->dev;
3382 struct be_resources res = {0};
d3d18312 3383 int max_vfs, old_vfs;
bec84e6b
VV
3384
3385 /* Some old versions of BE3 FW don't report max_vfs value */
d3d18312
SP
3386 be_cmd_get_profile_config(adapter, &res, 0);
3387
bec84e6b
VV
3388 if (BE3_chip(adapter) && !res.max_vfs) {
3389 max_vfs = pci_sriov_get_totalvfs(adapter->pdev);
3390 res.max_vfs = max_vfs > 0 ? min(MAX_VFS, max_vfs) : 0;
3391 }
3392
d3d18312 3393 adapter->pool_res = res;
bec84e6b
VV
3394
3395 if (!be_max_vfs(adapter)) {
3396 if (num_vfs)
50762667 3397 dev_warn(dev, "SRIOV is disabled. Ignoring num_vfs\n");
bec84e6b
VV
3398 adapter->num_vfs = 0;
3399 return 0;
3400 }
3401
d3d18312
SP
3402 pci_sriov_set_totalvfs(adapter->pdev, be_max_vfs(adapter));
3403
bec84e6b
VV
3404 /* validate num_vfs module param */
3405 old_vfs = pci_num_vf(adapter->pdev);
3406 if (old_vfs) {
3407 dev_info(dev, "%d VFs are already enabled\n", old_vfs);
3408 if (old_vfs != num_vfs)
3409 dev_warn(dev, "Ignoring num_vfs=%d setting\n", num_vfs);
3410 adapter->num_vfs = old_vfs;
3411 } else {
3412 if (num_vfs > be_max_vfs(adapter)) {
3413 dev_info(dev, "Resources unavailable to init %d VFs\n",
3414 num_vfs);
3415 dev_info(dev, "Limiting to %d VFs\n",
3416 be_max_vfs(adapter));
3417 }
3418 adapter->num_vfs = min_t(u16, num_vfs, be_max_vfs(adapter));
3419 }
3420
3421 return 0;
3422}
3423
92bf14ab 3424static int be_get_resources(struct be_adapter *adapter)
abb93951 3425{
92bf14ab
SP
3426 struct device *dev = &adapter->pdev->dev;
3427 struct be_resources res = {0};
3428 int status;
abb93951 3429
92bf14ab
SP
3430 if (BEx_chip(adapter)) {
3431 BEx_get_resources(adapter, &res);
3432 adapter->res = res;
abb93951
PR
3433 }
3434
92bf14ab
SP
3435 /* For Lancer, SH etc read per-function resource limits from FW.
3436 * GET_FUNC_CONFIG returns per function guaranteed limits.
3437 * GET_PROFILE_CONFIG returns PCI-E related limits PF-pool limits
3438 */
3439 if (!BEx_chip(adapter)) {
3440 status = be_cmd_get_func_config(adapter, &res);
3441 if (status)
3442 return status;
abb93951 3443
92bf14ab
SP
3444 /* If RoCE may be enabled stash away half the EQs for RoCE */
3445 if (be_roce_supported(adapter))
3446 res.max_evt_qs /= 2;
3447 adapter->res = res;
abb93951 3448 }
4c876616 3449
acbafeb1
SP
3450 dev_info(dev, "Max: txqs %d, rxqs %d, rss %d, eqs %d, vfs %d\n",
3451 be_max_txqs(adapter), be_max_rxqs(adapter),
3452 be_max_rss(adapter), be_max_eqs(adapter),
3453 be_max_vfs(adapter));
3454 dev_info(dev, "Max: uc-macs %d, mc-macs %d, vlans %d\n",
3455 be_max_uc(adapter), be_max_mc(adapter),
3456 be_max_vlans(adapter));
3457
92bf14ab 3458 return 0;
abb93951
PR
3459}
3460
d3d18312
SP
3461static void be_sriov_config(struct be_adapter *adapter)
3462{
3463 struct device *dev = &adapter->pdev->dev;
3464 int status;
3465
3466 status = be_get_sriov_config(adapter);
3467 if (status) {
3468 dev_err(dev, "Failed to query SR-IOV configuration\n");
3469 dev_err(dev, "SR-IOV cannot be enabled\n");
3470 return;
3471 }
3472
3473 /* When the HW is in SRIOV capable configuration, the PF-pool
3474 * resources are equally distributed across the max-number of
3475 * VFs. The user may request only a subset of the max-vfs to be
3476 * enabled. Based on num_vfs, redistribute the resources across
3477 * num_vfs so that each VF will have access to more number of
3478 * resources. This facility is not available in BE3 FW.
3479 * Also, this is done by FW in Lancer chip.
3480 */
3481 if (be_max_vfs(adapter) && !pci_num_vf(adapter->pdev)) {
3482 status = be_cmd_set_sriov_config(adapter,
3483 adapter->pool_res,
3484 adapter->num_vfs);
3485 if (status)
3486 dev_err(dev, "Failed to optimize SR-IOV resources\n");
3487 }
3488}
3489
39f1d94d
SP
3490static int be_get_config(struct be_adapter *adapter)
3491{
542963b7 3492 u16 profile_id;
4c876616 3493 int status;
39f1d94d 3494
e97e3cda 3495 status = be_cmd_query_fw_cfg(adapter);
abb93951 3496 if (status)
92bf14ab 3497 return status;
abb93951 3498
542963b7
VV
3499 if (be_physfn(adapter)) {
3500 status = be_cmd_get_active_profile(adapter, &profile_id);
3501 if (!status)
3502 dev_info(&adapter->pdev->dev,
3503 "Using profile 0x%x\n", profile_id);
962bcb75 3504 }
bec84e6b 3505
d3d18312
SP
3506 if (!BE2_chip(adapter) && be_physfn(adapter))
3507 be_sriov_config(adapter);
542963b7 3508
92bf14ab
SP
3509 status = be_get_resources(adapter);
3510 if (status)
3511 return status;
abb93951 3512
46ee9c14
RN
3513 adapter->pmac_id = kcalloc(be_max_uc(adapter),
3514 sizeof(*adapter->pmac_id), GFP_KERNEL);
92bf14ab
SP
3515 if (!adapter->pmac_id)
3516 return -ENOMEM;
abb93951 3517
92bf14ab
SP
3518 /* Sanitize cfg_num_qs based on HW and platform limits */
3519 adapter->cfg_num_qs = min(adapter->cfg_num_qs, be_max_qs(adapter));
3520
3521 return 0;
39f1d94d
SP
3522}
3523
95046b92
SP
3524static int be_mac_setup(struct be_adapter *adapter)
3525{
3526 u8 mac[ETH_ALEN];
3527 int status;
3528
3529 if (is_zero_ether_addr(adapter->netdev->dev_addr)) {
3530 status = be_cmd_get_perm_mac(adapter, mac);
3531 if (status)
3532 return status;
3533
3534 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
3535 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
3536 } else {
3537 /* Maybe the HW was reset; dev_addr must be re-programmed */
3538 memcpy(mac, adapter->netdev->dev_addr, ETH_ALEN);
3539 }
3540
2c7a9dc1
AK
3541 /* For BE3-R VFs, the PF programs the initial MAC address */
3542 if (!(BEx_chip(adapter) && be_virtfn(adapter)))
3543 be_cmd_pmac_add(adapter, mac, adapter->if_handle,
3544 &adapter->pmac_id[0], 0);
95046b92
SP
3545 return 0;
3546}
3547
68d7bdcb
SP
3548static void be_schedule_worker(struct be_adapter *adapter)
3549{
3550 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
3551 adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
3552}
3553
7707133c 3554static int be_setup_queues(struct be_adapter *adapter)
5fb379ee 3555{
68d7bdcb 3556 struct net_device *netdev = adapter->netdev;
10ef9ab4 3557 int status;
ba343c77 3558
7707133c 3559 status = be_evt_queues_create(adapter);
abb93951
PR
3560 if (status)
3561 goto err;
73d540f2 3562
7707133c 3563 status = be_tx_qs_create(adapter);
c2bba3df
SK
3564 if (status)
3565 goto err;
10ef9ab4 3566
7707133c 3567 status = be_rx_cqs_create(adapter);
10ef9ab4 3568 if (status)
a54769f5 3569 goto err;
6b7c5b94 3570
7707133c 3571 status = be_mcc_queues_create(adapter);
10ef9ab4
SP
3572 if (status)
3573 goto err;
3574
68d7bdcb
SP
3575 status = netif_set_real_num_rx_queues(netdev, adapter->num_rx_qs);
3576 if (status)
3577 goto err;
3578
3579 status = netif_set_real_num_tx_queues(netdev, adapter->num_tx_qs);
3580 if (status)
3581 goto err;
3582
7707133c
SP
3583 return 0;
3584err:
3585 dev_err(&adapter->pdev->dev, "queue_setup failed\n");
3586 return status;
3587}
3588
68d7bdcb
SP
3589int be_update_queues(struct be_adapter *adapter)
3590{
3591 struct net_device *netdev = adapter->netdev;
3592 int status;
3593
3594 if (netif_running(netdev))
3595 be_close(netdev);
3596
3597 be_cancel_worker(adapter);
3598
3599 /* If any vectors have been shared with RoCE we cannot re-program
3600 * the MSIx table.
3601 */
3602 if (!adapter->num_msix_roce_vec)
3603 be_msix_disable(adapter);
3604
3605 be_clear_queues(adapter);
3606
3607 if (!msix_enabled(adapter)) {
3608 status = be_msix_enable(adapter);
3609 if (status)
3610 return status;
3611 }
3612
3613 status = be_setup_queues(adapter);
3614 if (status)
3615 return status;
3616
3617 be_schedule_worker(adapter);
3618
3619 if (netif_running(netdev))
3620 status = be_open(netdev);
3621
3622 return status;
3623}
3624
7707133c
SP
3625static int be_setup(struct be_adapter *adapter)
3626{
3627 struct device *dev = &adapter->pdev->dev;
3628 u32 tx_fc, rx_fc, en_flags;
3629 int status;
3630
3631 be_setup_init(adapter);
3632
3633 if (!lancer_chip(adapter))
3634 be_cmd_req_native_mode(adapter);
3635
3636 status = be_get_config(adapter);
10ef9ab4 3637 if (status)
a54769f5 3638 goto err;
6b7c5b94 3639
7707133c 3640 status = be_msix_enable(adapter);
10ef9ab4 3641 if (status)
a54769f5 3642 goto err;
6b7c5b94 3643
f9449ab7 3644 en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
7707133c 3645 BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
abb93951 3646 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS)
f9449ab7 3647 en_flags |= BE_IF_FLAGS_RSS;
92bf14ab
SP
3648 en_flags = en_flags & be_if_cap_flags(adapter);
3649 status = be_cmd_if_create(adapter, be_if_cap_flags(adapter), en_flags,
1578e777 3650 &adapter->if_handle, 0);
7707133c 3651 if (status)
a54769f5 3652 goto err;
6b7c5b94 3653
68d7bdcb
SP
3654 /* Updating real_num_tx/rx_queues() requires rtnl_lock() */
3655 rtnl_lock();
7707133c 3656 status = be_setup_queues(adapter);
68d7bdcb 3657 rtnl_unlock();
95046b92 3658 if (status)
1578e777
PR
3659 goto err;
3660
7707133c 3661 be_cmd_get_fn_privileges(adapter, &adapter->cmd_privileges, 0);
7707133c
SP
3662
3663 status = be_mac_setup(adapter);
10ef9ab4
SP
3664 if (status)
3665 goto err;
3666
e97e3cda 3667 be_cmd_get_fw_ver(adapter);
acbafeb1 3668 dev_info(dev, "FW version is %s\n", adapter->fw_ver);
5a56eb10 3669
e9e2a904 3670 if (BE2_chip(adapter) && fw_major_num(adapter->fw_ver) < 4) {
50762667 3671 dev_err(dev, "Firmware on card is old(%s), IRQs may not work",
e9e2a904
SK
3672 adapter->fw_ver);
3673 dev_err(dev, "Please upgrade firmware to version >= 4.0\n");
3674 }
3675
1d1e9a46 3676 if (adapter->vlans_added)
10329df8 3677 be_vid_config(adapter);
7ab8b0b4 3678
a54769f5 3679 be_set_rx_mode(adapter->netdev);
5fb379ee 3680
76a9e08e
SR
3681 be_cmd_get_acpi_wol_cap(adapter);
3682
ddc3f5cb 3683 be_cmd_get_flow_control(adapter, &tx_fc, &rx_fc);
590c391d 3684
ddc3f5cb
AK
3685 if (rx_fc != adapter->rx_fc || tx_fc != adapter->tx_fc)
3686 be_cmd_set_flow_control(adapter, adapter->tx_fc,
a54769f5 3687 adapter->rx_fc);
2dc1deb6 3688
bdce2ad7
SR
3689 if (be_physfn(adapter))
3690 be_cmd_set_logical_link_config(adapter,
3691 IFLA_VF_LINK_STATE_AUTO, 0);
3692
bec84e6b
VV
3693 if (adapter->num_vfs)
3694 be_vf_setup(adapter);
f9449ab7 3695
f25b119c
PR
3696 status = be_cmd_get_phy_info(adapter);
3697 if (!status && be_pause_supported(adapter))
42f11cf2
AK
3698 adapter->phy.fc_autoneg = 1;
3699
68d7bdcb 3700 be_schedule_worker(adapter);
e1ad8e33 3701 adapter->flags |= BE_FLAGS_SETUP_DONE;
f9449ab7 3702 return 0;
a54769f5
SP
3703err:
3704 be_clear(adapter);
3705 return status;
3706}
6b7c5b94 3707
66268739
IV
3708#ifdef CONFIG_NET_POLL_CONTROLLER
3709static void be_netpoll(struct net_device *netdev)
3710{
3711 struct be_adapter *adapter = netdev_priv(netdev);
10ef9ab4 3712 struct be_eq_obj *eqo;
66268739
IV
3713 int i;
3714
e49cc34f
SP
3715 for_all_evt_queues(adapter, eqo, i) {
3716 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
3717 napi_schedule(&eqo->napi);
3718 }
66268739
IV
3719}
3720#endif
3721
96c9b2e4 3722static char flash_cookie[2][16] = {"*** SE FLAS", "H DIRECTORY *** "};
fa9a6fed 3723
306f1348
SP
3724static bool phy_flashing_required(struct be_adapter *adapter)
3725{
42f11cf2
AK
3726 return (adapter->phy.phy_type == TN_8022 &&
3727 adapter->phy.interface_type == PHY_TYPE_BASET_10GB);
306f1348
SP
3728}
3729
c165541e
PR
3730static bool is_comp_in_ufi(struct be_adapter *adapter,
3731 struct flash_section_info *fsec, int type)
3732{
3733 int i = 0, img_type = 0;
3734 struct flash_section_info_g2 *fsec_g2 = NULL;
3735
ca34fe38 3736 if (BE2_chip(adapter))
c165541e
PR
3737 fsec_g2 = (struct flash_section_info_g2 *)fsec;
3738
3739 for (i = 0; i < MAX_FLASH_COMP; i++) {
3740 if (fsec_g2)
3741 img_type = le32_to_cpu(fsec_g2->fsec_entry[i].type);
3742 else
3743 img_type = le32_to_cpu(fsec->fsec_entry[i].type);
3744
3745 if (img_type == type)
3746 return true;
3747 }
3748 return false;
3749
3750}
3751
4188e7df 3752static struct flash_section_info *get_fsec_info(struct be_adapter *adapter,
748b539a
SP
3753 int header_size,
3754 const struct firmware *fw)
c165541e
PR
3755{
3756 struct flash_section_info *fsec = NULL;
3757 const u8 *p = fw->data;
3758
3759 p += header_size;
3760 while (p < (fw->data + fw->size)) {
3761 fsec = (struct flash_section_info *)p;
3762 if (!memcmp(flash_cookie, fsec->cookie, sizeof(flash_cookie)))
3763 return fsec;
3764 p += 32;
3765 }
3766 return NULL;
3767}
3768
96c9b2e4
VV
3769static int be_check_flash_crc(struct be_adapter *adapter, const u8 *p,
3770 u32 img_offset, u32 img_size, int hdr_size,
3771 u16 img_optype, bool *crc_match)
3772{
3773 u32 crc_offset;
3774 int status;
3775 u8 crc[4];
3776
3777 status = be_cmd_get_flash_crc(adapter, crc, img_optype, img_size - 4);
3778 if (status)
3779 return status;
3780
3781 crc_offset = hdr_size + img_offset + img_size - 4;
3782
3783 /* Skip flashing, if crc of flashed region matches */
3784 if (!memcmp(crc, p + crc_offset, 4))
3785 *crc_match = true;
3786 else
3787 *crc_match = false;
3788
3789 return status;
3790}
3791
773a2d7c 3792static int be_flash(struct be_adapter *adapter, const u8 *img,
748b539a 3793 struct be_dma_mem *flash_cmd, int optype, int img_size)
773a2d7c 3794{
773a2d7c 3795 struct be_cmd_write_flashrom *req = flash_cmd->va;
96c9b2e4
VV
3796 u32 total_bytes, flash_op, num_bytes;
3797 int status;
773a2d7c
PR
3798
3799 total_bytes = img_size;
3800 while (total_bytes) {
3801 num_bytes = min_t(u32, 32*1024, total_bytes);
3802
3803 total_bytes -= num_bytes;
3804
3805 if (!total_bytes) {
3806 if (optype == OPTYPE_PHY_FW)
3807 flash_op = FLASHROM_OPER_PHY_FLASH;
3808 else
3809 flash_op = FLASHROM_OPER_FLASH;
3810 } else {
3811 if (optype == OPTYPE_PHY_FW)
3812 flash_op = FLASHROM_OPER_PHY_SAVE;
3813 else
3814 flash_op = FLASHROM_OPER_SAVE;
3815 }
3816
be716446 3817 memcpy(req->data_buf, img, num_bytes);
773a2d7c
PR
3818 img += num_bytes;
3819 status = be_cmd_write_flashrom(adapter, flash_cmd, optype,
748b539a 3820 flash_op, num_bytes);
4c60005f 3821 if (base_status(status) == MCC_STATUS_ILLEGAL_REQUEST &&
96c9b2e4
VV
3822 optype == OPTYPE_PHY_FW)
3823 break;
3824 else if (status)
773a2d7c 3825 return status;
773a2d7c
PR
3826 }
3827 return 0;
3828}
3829
0ad3157e 3830/* For BE2, BE3 and BE3-R */
ca34fe38 3831static int be_flash_BEx(struct be_adapter *adapter,
748b539a
SP
3832 const struct firmware *fw,
3833 struct be_dma_mem *flash_cmd, int num_of_images)
84517482 3834{
c165541e 3835 int img_hdrs_size = (num_of_images * sizeof(struct image_hdr));
96c9b2e4 3836 struct device *dev = &adapter->pdev->dev;
c165541e 3837 struct flash_section_info *fsec = NULL;
96c9b2e4
VV
3838 int status, i, filehdr_size, num_comp;
3839 const struct flash_comp *pflashcomp;
3840 bool crc_match;
3841 const u8 *p;
c165541e
PR
3842
3843 struct flash_comp gen3_flash_types[] = {
3844 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, OPTYPE_ISCSI_ACTIVE,
3845 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_iSCSI},
3846 { FLASH_REDBOOT_START_g3, OPTYPE_REDBOOT,
3847 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3, IMAGE_BOOT_CODE},
3848 { FLASH_iSCSI_BIOS_START_g3, OPTYPE_BIOS,
3849 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_ISCSI},
3850 { FLASH_PXE_BIOS_START_g3, OPTYPE_PXE_BIOS,
3851 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_PXE},
3852 { FLASH_FCoE_BIOS_START_g3, OPTYPE_FCOE_BIOS,
3853 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_FCoE},
3854 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, OPTYPE_ISCSI_BACKUP,
3855 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_iSCSI},
3856 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, OPTYPE_FCOE_FW_ACTIVE,
3857 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_FCoE},
3858 { FLASH_FCoE_BACKUP_IMAGE_START_g3, OPTYPE_FCOE_FW_BACKUP,
3859 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_FCoE},
3860 { FLASH_NCSI_START_g3, OPTYPE_NCSI_FW,
3861 FLASH_NCSI_IMAGE_MAX_SIZE_g3, IMAGE_NCSI},
3862 { FLASH_PHY_FW_START_g3, OPTYPE_PHY_FW,
3863 FLASH_PHY_FW_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_PHY}
3f0d4560 3864 };
c165541e
PR
3865
3866 struct flash_comp gen2_flash_types[] = {
3867 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, OPTYPE_ISCSI_ACTIVE,
3868 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_iSCSI},
3869 { FLASH_REDBOOT_START_g2, OPTYPE_REDBOOT,
3870 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2, IMAGE_BOOT_CODE},
3871 { FLASH_iSCSI_BIOS_START_g2, OPTYPE_BIOS,
3872 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_ISCSI},
3873 { FLASH_PXE_BIOS_START_g2, OPTYPE_PXE_BIOS,
3874 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_PXE},
3875 { FLASH_FCoE_BIOS_START_g2, OPTYPE_FCOE_BIOS,
3876 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_FCoE},
3877 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, OPTYPE_ISCSI_BACKUP,
3878 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_iSCSI},
3879 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, OPTYPE_FCOE_FW_ACTIVE,
3880 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_FCoE},
3881 { FLASH_FCoE_BACKUP_IMAGE_START_g2, OPTYPE_FCOE_FW_BACKUP,
3882 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_FCoE}
3f0d4560
AK
3883 };
3884
ca34fe38 3885 if (BE3_chip(adapter)) {
3f0d4560
AK
3886 pflashcomp = gen3_flash_types;
3887 filehdr_size = sizeof(struct flash_file_hdr_g3);
215faf9c 3888 num_comp = ARRAY_SIZE(gen3_flash_types);
3f0d4560
AK
3889 } else {
3890 pflashcomp = gen2_flash_types;
3891 filehdr_size = sizeof(struct flash_file_hdr_g2);
215faf9c 3892 num_comp = ARRAY_SIZE(gen2_flash_types);
84517482 3893 }
ca34fe38 3894
c165541e
PR
3895 /* Get flash section info*/
3896 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3897 if (!fsec) {
96c9b2e4 3898 dev_err(dev, "Invalid Cookie. FW image may be corrupted\n");
c165541e
PR
3899 return -1;
3900 }
9fe96934 3901 for (i = 0; i < num_comp; i++) {
c165541e 3902 if (!is_comp_in_ufi(adapter, fsec, pflashcomp[i].img_type))
9fe96934 3903 continue;
c165541e
PR
3904
3905 if ((pflashcomp[i].optype == OPTYPE_NCSI_FW) &&
3906 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
3907 continue;
3908
773a2d7c
PR
3909 if (pflashcomp[i].optype == OPTYPE_PHY_FW &&
3910 !phy_flashing_required(adapter))
306f1348 3911 continue;
c165541e 3912
773a2d7c 3913 if (pflashcomp[i].optype == OPTYPE_REDBOOT) {
96c9b2e4
VV
3914 status = be_check_flash_crc(adapter, fw->data,
3915 pflashcomp[i].offset,
3916 pflashcomp[i].size,
3917 filehdr_size +
3918 img_hdrs_size,
3919 OPTYPE_REDBOOT, &crc_match);
3920 if (status) {
3921 dev_err(dev,
3922 "Could not get CRC for 0x%x region\n",
3923 pflashcomp[i].optype);
3924 continue;
3925 }
3926
3927 if (crc_match)
773a2d7c
PR
3928 continue;
3929 }
c165541e 3930
96c9b2e4
VV
3931 p = fw->data + filehdr_size + pflashcomp[i].offset +
3932 img_hdrs_size;
306f1348
SP
3933 if (p + pflashcomp[i].size > fw->data + fw->size)
3934 return -1;
773a2d7c
PR
3935
3936 status = be_flash(adapter, p, flash_cmd, pflashcomp[i].optype,
748b539a 3937 pflashcomp[i].size);
773a2d7c 3938 if (status) {
96c9b2e4 3939 dev_err(dev, "Flashing section type 0x%x failed\n",
773a2d7c
PR
3940 pflashcomp[i].img_type);
3941 return status;
84517482 3942 }
84517482 3943 }
84517482
AK
3944 return 0;
3945}
3946
96c9b2e4
VV
3947static u16 be_get_img_optype(struct flash_section_entry fsec_entry)
3948{
3949 u32 img_type = le32_to_cpu(fsec_entry.type);
3950 u16 img_optype = le16_to_cpu(fsec_entry.optype);
3951
3952 if (img_optype != 0xFFFF)
3953 return img_optype;
3954
3955 switch (img_type) {
3956 case IMAGE_FIRMWARE_iSCSI:
3957 img_optype = OPTYPE_ISCSI_ACTIVE;
3958 break;
3959 case IMAGE_BOOT_CODE:
3960 img_optype = OPTYPE_REDBOOT;
3961 break;
3962 case IMAGE_OPTION_ROM_ISCSI:
3963 img_optype = OPTYPE_BIOS;
3964 break;
3965 case IMAGE_OPTION_ROM_PXE:
3966 img_optype = OPTYPE_PXE_BIOS;
3967 break;
3968 case IMAGE_OPTION_ROM_FCoE:
3969 img_optype = OPTYPE_FCOE_BIOS;
3970 break;
3971 case IMAGE_FIRMWARE_BACKUP_iSCSI:
3972 img_optype = OPTYPE_ISCSI_BACKUP;
3973 break;
3974 case IMAGE_NCSI:
3975 img_optype = OPTYPE_NCSI_FW;
3976 break;
3977 case IMAGE_FLASHISM_JUMPVECTOR:
3978 img_optype = OPTYPE_FLASHISM_JUMPVECTOR;
3979 break;
3980 case IMAGE_FIRMWARE_PHY:
3981 img_optype = OPTYPE_SH_PHY_FW;
3982 break;
3983 case IMAGE_REDBOOT_DIR:
3984 img_optype = OPTYPE_REDBOOT_DIR;
3985 break;
3986 case IMAGE_REDBOOT_CONFIG:
3987 img_optype = OPTYPE_REDBOOT_CONFIG;
3988 break;
3989 case IMAGE_UFI_DIR:
3990 img_optype = OPTYPE_UFI_DIR;
3991 break;
3992 default:
3993 break;
3994 }
3995
3996 return img_optype;
3997}
3998
773a2d7c 3999static int be_flash_skyhawk(struct be_adapter *adapter,
748b539a
SP
4000 const struct firmware *fw,
4001 struct be_dma_mem *flash_cmd, int num_of_images)
3f0d4560 4002{
773a2d7c 4003 int img_hdrs_size = num_of_images * sizeof(struct image_hdr);
96c9b2e4 4004 struct device *dev = &adapter->pdev->dev;
773a2d7c 4005 struct flash_section_info *fsec = NULL;
96c9b2e4
VV
4006 u32 img_offset, img_size, img_type;
4007 int status, i, filehdr_size;
4008 bool crc_match, old_fw_img;
4009 u16 img_optype;
4010 const u8 *p;
773a2d7c
PR
4011
4012 filehdr_size = sizeof(struct flash_file_hdr_g3);
4013 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
4014 if (!fsec) {
96c9b2e4 4015 dev_err(dev, "Invalid Cookie. FW image may be corrupted\n");
56ace3a0 4016 return -EINVAL;
773a2d7c
PR
4017 }
4018
4019 for (i = 0; i < le32_to_cpu(fsec->fsec_hdr.num_images); i++) {
4020 img_offset = le32_to_cpu(fsec->fsec_entry[i].offset);
4021 img_size = le32_to_cpu(fsec->fsec_entry[i].pad_size);
96c9b2e4
VV
4022 img_type = le32_to_cpu(fsec->fsec_entry[i].type);
4023 img_optype = be_get_img_optype(fsec->fsec_entry[i]);
4024 old_fw_img = fsec->fsec_entry[i].optype == 0xFFFF;
773a2d7c 4025
96c9b2e4 4026 if (img_optype == 0xFFFF)
773a2d7c 4027 continue;
96c9b2e4
VV
4028 /* Don't bother verifying CRC if an old FW image is being
4029 * flashed
4030 */
4031 if (old_fw_img)
4032 goto flash;
4033
4034 status = be_check_flash_crc(adapter, fw->data, img_offset,
4035 img_size, filehdr_size +
4036 img_hdrs_size, img_optype,
4037 &crc_match);
4038 /* The current FW image on the card does not recognize the new
4039 * FLASH op_type. The FW download is partially complete.
4040 * Reboot the server now to enable FW image to recognize the
4041 * new FLASH op_type. To complete the remaining process,
4042 * download the same FW again after the reboot.
4043 */
4c60005f
KA
4044 if (base_status(status) == MCC_STATUS_ILLEGAL_REQUEST ||
4045 base_status(status) == MCC_STATUS_ILLEGAL_FIELD) {
96c9b2e4
VV
4046 dev_err(dev, "Flash incomplete. Reset the server\n");
4047 dev_err(dev, "Download FW image again after reset\n");
4048 return -EAGAIN;
4049 } else if (status) {
4050 dev_err(dev, "Could not get CRC for 0x%x region\n",
4051 img_optype);
4052 return -EFAULT;
773a2d7c
PR
4053 }
4054
96c9b2e4
VV
4055 if (crc_match)
4056 continue;
773a2d7c 4057
96c9b2e4
VV
4058flash:
4059 p = fw->data + filehdr_size + img_offset + img_hdrs_size;
773a2d7c
PR
4060 if (p + img_size > fw->data + fw->size)
4061 return -1;
4062
4063 status = be_flash(adapter, p, flash_cmd, img_optype, img_size);
96c9b2e4
VV
4064 /* For old FW images ignore ILLEGAL_FIELD error or errors on
4065 * UFI_DIR region
4066 */
4c60005f
KA
4067 if (old_fw_img &&
4068 (base_status(status) == MCC_STATUS_ILLEGAL_FIELD ||
4069 (img_optype == OPTYPE_UFI_DIR &&
4070 base_status(status) == MCC_STATUS_FAILED))) {
96c9b2e4
VV
4071 continue;
4072 } else if (status) {
4073 dev_err(dev, "Flashing section type 0x%x failed\n",
4074 img_type);
4075 return -EFAULT;
773a2d7c
PR
4076 }
4077 }
4078 return 0;
3f0d4560
AK
4079}
4080
485bf569 4081static int lancer_fw_download(struct be_adapter *adapter,
748b539a 4082 const struct firmware *fw)
84517482 4083{
485bf569
SN
4084#define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024)
4085#define LANCER_FW_DOWNLOAD_LOCATION "/prg"
bb864e07 4086 struct device *dev = &adapter->pdev->dev;
84517482 4087 struct be_dma_mem flash_cmd;
485bf569
SN
4088 const u8 *data_ptr = NULL;
4089 u8 *dest_image_ptr = NULL;
4090 size_t image_size = 0;
4091 u32 chunk_size = 0;
4092 u32 data_written = 0;
4093 u32 offset = 0;
4094 int status = 0;
4095 u8 add_status = 0;
f67ef7ba 4096 u8 change_status;
84517482 4097
485bf569 4098 if (!IS_ALIGNED(fw->size, sizeof(u32))) {
bb864e07 4099 dev_err(dev, "FW image size should be multiple of 4\n");
3fb8cb80 4100 return -EINVAL;
d9efd2af
SB
4101 }
4102
485bf569
SN
4103 flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
4104 + LANCER_FW_DOWNLOAD_CHUNK;
bb864e07 4105 flash_cmd.va = dma_alloc_coherent(dev, flash_cmd.size,
d0320f75 4106 &flash_cmd.dma, GFP_KERNEL);
3fb8cb80
KA
4107 if (!flash_cmd.va)
4108 return -ENOMEM;
84517482 4109
485bf569
SN
4110 dest_image_ptr = flash_cmd.va +
4111 sizeof(struct lancer_cmd_req_write_object);
4112 image_size = fw->size;
4113 data_ptr = fw->data;
4114
4115 while (image_size) {
4116 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
4117
4118 /* Copy the image chunk content. */
4119 memcpy(dest_image_ptr, data_ptr, chunk_size);
4120
4121 status = lancer_cmd_write_object(adapter, &flash_cmd,
f67ef7ba
PR
4122 chunk_size, offset,
4123 LANCER_FW_DOWNLOAD_LOCATION,
4124 &data_written, &change_status,
4125 &add_status);
485bf569
SN
4126 if (status)
4127 break;
4128
4129 offset += data_written;
4130 data_ptr += data_written;
4131 image_size -= data_written;
4132 }
4133
4134 if (!status) {
4135 /* Commit the FW written */
4136 status = lancer_cmd_write_object(adapter, &flash_cmd,
f67ef7ba
PR
4137 0, offset,
4138 LANCER_FW_DOWNLOAD_LOCATION,
4139 &data_written, &change_status,
4140 &add_status);
485bf569
SN
4141 }
4142
bb864e07 4143 dma_free_coherent(dev, flash_cmd.size, flash_cmd.va, flash_cmd.dma);
485bf569 4144 if (status) {
bb864e07 4145 dev_err(dev, "Firmware load error\n");
3fb8cb80 4146 return be_cmd_status(status);
485bf569
SN
4147 }
4148
bb864e07
KA
4149 dev_info(dev, "Firmware flashed successfully\n");
4150
f67ef7ba 4151 if (change_status == LANCER_FW_RESET_NEEDED) {
bb864e07 4152 dev_info(dev, "Resetting adapter to activate new FW\n");
5c510811
SK
4153 status = lancer_physdev_ctrl(adapter,
4154 PHYSDEV_CONTROL_FW_RESET_MASK);
f67ef7ba 4155 if (status) {
bb864e07
KA
4156 dev_err(dev, "Adapter busy, could not reset FW\n");
4157 dev_err(dev, "Reboot server to activate new FW\n");
f67ef7ba
PR
4158 }
4159 } else if (change_status != LANCER_NO_RESET_NEEDED) {
bb864e07 4160 dev_info(dev, "Reboot server to activate new FW\n");
f67ef7ba 4161 }
3fb8cb80
KA
4162
4163 return 0;
485bf569
SN
4164}
4165
ca34fe38
SP
4166#define UFI_TYPE2 2
4167#define UFI_TYPE3 3
0ad3157e 4168#define UFI_TYPE3R 10
ca34fe38
SP
4169#define UFI_TYPE4 4
4170static int be_get_ufi_type(struct be_adapter *adapter,
0ad3157e 4171 struct flash_file_hdr_g3 *fhdr)
773a2d7c 4172{
ddf1169f 4173 if (!fhdr)
773a2d7c
PR
4174 goto be_get_ufi_exit;
4175
ca34fe38
SP
4176 if (skyhawk_chip(adapter) && fhdr->build[0] == '4')
4177 return UFI_TYPE4;
0ad3157e
VV
4178 else if (BE3_chip(adapter) && fhdr->build[0] == '3') {
4179 if (fhdr->asic_type_rev == 0x10)
4180 return UFI_TYPE3R;
4181 else
4182 return UFI_TYPE3;
4183 } else if (BE2_chip(adapter) && fhdr->build[0] == '2')
ca34fe38 4184 return UFI_TYPE2;
773a2d7c
PR
4185
4186be_get_ufi_exit:
4187 dev_err(&adapter->pdev->dev,
4188 "UFI and Interface are not compatible for flashing\n");
4189 return -1;
4190}
4191
485bf569
SN
4192static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
4193{
485bf569
SN
4194 struct flash_file_hdr_g3 *fhdr3;
4195 struct image_hdr *img_hdr_ptr = NULL;
4196 struct be_dma_mem flash_cmd;
4197 const u8 *p;
773a2d7c 4198 int status = 0, i = 0, num_imgs = 0, ufi_type = 0;
84517482 4199
be716446 4200 flash_cmd.size = sizeof(struct be_cmd_write_flashrom);
2b7bcebf
IV
4201 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
4202 &flash_cmd.dma, GFP_KERNEL);
84517482
AK
4203 if (!flash_cmd.va) {
4204 status = -ENOMEM;
485bf569 4205 goto be_fw_exit;
84517482
AK
4206 }
4207
773a2d7c 4208 p = fw->data;
0ad3157e 4209 fhdr3 = (struct flash_file_hdr_g3 *)p;
773a2d7c 4210
0ad3157e 4211 ufi_type = be_get_ufi_type(adapter, fhdr3);
773a2d7c 4212
773a2d7c
PR
4213 num_imgs = le32_to_cpu(fhdr3->num_imgs);
4214 for (i = 0; i < num_imgs; i++) {
4215 img_hdr_ptr = (struct image_hdr *)(fw->data +
4216 (sizeof(struct flash_file_hdr_g3) +
4217 i * sizeof(struct image_hdr)));
4218 if (le32_to_cpu(img_hdr_ptr->imageid) == 1) {
0ad3157e
VV
4219 switch (ufi_type) {
4220 case UFI_TYPE4:
773a2d7c 4221 status = be_flash_skyhawk(adapter, fw,
748b539a 4222 &flash_cmd, num_imgs);
0ad3157e
VV
4223 break;
4224 case UFI_TYPE3R:
ca34fe38
SP
4225 status = be_flash_BEx(adapter, fw, &flash_cmd,
4226 num_imgs);
0ad3157e
VV
4227 break;
4228 case UFI_TYPE3:
4229 /* Do not flash this ufi on BE3-R cards */
4230 if (adapter->asic_rev < 0x10)
4231 status = be_flash_BEx(adapter, fw,
4232 &flash_cmd,
4233 num_imgs);
4234 else {
56ace3a0 4235 status = -EINVAL;
0ad3157e
VV
4236 dev_err(&adapter->pdev->dev,
4237 "Can't load BE3 UFI on BE3R\n");
4238 }
4239 }
3f0d4560 4240 }
773a2d7c
PR
4241 }
4242
ca34fe38
SP
4243 if (ufi_type == UFI_TYPE2)
4244 status = be_flash_BEx(adapter, fw, &flash_cmd, 0);
773a2d7c 4245 else if (ufi_type == -1)
56ace3a0 4246 status = -EINVAL;
84517482 4247
2b7bcebf
IV
4248 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
4249 flash_cmd.dma);
84517482
AK
4250 if (status) {
4251 dev_err(&adapter->pdev->dev, "Firmware load error\n");
485bf569 4252 goto be_fw_exit;
84517482
AK
4253 }
4254
af901ca1 4255 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
84517482 4256
485bf569
SN
4257be_fw_exit:
4258 return status;
4259}
4260
4261int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
4262{
4263 const struct firmware *fw;
4264 int status;
4265
4266 if (!netif_running(adapter->netdev)) {
4267 dev_err(&adapter->pdev->dev,
4268 "Firmware load not allowed (interface is down)\n");
940a3fcd 4269 return -ENETDOWN;
485bf569
SN
4270 }
4271
4272 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
4273 if (status)
4274 goto fw_exit;
4275
4276 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
4277
4278 if (lancer_chip(adapter))
4279 status = lancer_fw_download(adapter, fw);
4280 else
4281 status = be_fw_download(adapter, fw);
4282
eeb65ced 4283 if (!status)
e97e3cda 4284 be_cmd_get_fw_ver(adapter);
eeb65ced 4285
84517482
AK
4286fw_exit:
4287 release_firmware(fw);
4288 return status;
4289}
4290
748b539a 4291static int be_ndo_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh)
a77dcb8c
AK
4292{
4293 struct be_adapter *adapter = netdev_priv(dev);
4294 struct nlattr *attr, *br_spec;
4295 int rem;
4296 int status = 0;
4297 u16 mode = 0;
4298
4299 if (!sriov_enabled(adapter))
4300 return -EOPNOTSUPP;
4301
4302 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
4303
4304 nla_for_each_nested(attr, br_spec, rem) {
4305 if (nla_type(attr) != IFLA_BRIDGE_MODE)
4306 continue;
4307
4308 mode = nla_get_u16(attr);
4309 if (mode != BRIDGE_MODE_VEPA && mode != BRIDGE_MODE_VEB)
4310 return -EINVAL;
4311
4312 status = be_cmd_set_hsw_config(adapter, 0, 0,
4313 adapter->if_handle,
4314 mode == BRIDGE_MODE_VEPA ?
4315 PORT_FWD_TYPE_VEPA :
4316 PORT_FWD_TYPE_VEB);
4317 if (status)
4318 goto err;
4319
4320 dev_info(&adapter->pdev->dev, "enabled switch mode: %s\n",
4321 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
4322
4323 return status;
4324 }
4325err:
4326 dev_err(&adapter->pdev->dev, "Failed to set switch mode %s\n",
4327 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
4328
4329 return status;
4330}
4331
4332static int be_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
748b539a 4333 struct net_device *dev, u32 filter_mask)
a77dcb8c
AK
4334{
4335 struct be_adapter *adapter = netdev_priv(dev);
4336 int status = 0;
4337 u8 hsw_mode;
4338
4339 if (!sriov_enabled(adapter))
4340 return 0;
4341
4342 /* BE and Lancer chips support VEB mode only */
4343 if (BEx_chip(adapter) || lancer_chip(adapter)) {
4344 hsw_mode = PORT_FWD_TYPE_VEB;
4345 } else {
4346 status = be_cmd_get_hsw_config(adapter, NULL, 0,
4347 adapter->if_handle, &hsw_mode);
4348 if (status)
4349 return 0;
4350 }
4351
4352 return ndo_dflt_bridge_getlink(skb, pid, seq, dev,
4353 hsw_mode == PORT_FWD_TYPE_VEPA ?
4354 BRIDGE_MODE_VEPA : BRIDGE_MODE_VEB);
4355}
4356
c5abe7c0 4357#ifdef CONFIG_BE2NET_VXLAN
c9c47142
SP
4358static void be_add_vxlan_port(struct net_device *netdev, sa_family_t sa_family,
4359 __be16 port)
4360{
4361 struct be_adapter *adapter = netdev_priv(netdev);
4362 struct device *dev = &adapter->pdev->dev;
4363 int status;
4364
4365 if (lancer_chip(adapter) || BEx_chip(adapter))
4366 return;
4367
4368 if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS) {
4369 dev_warn(dev, "Cannot add UDP port %d for VxLAN offloads\n",
4370 be16_to_cpu(port));
4371 dev_info(dev,
4372 "Only one UDP port supported for VxLAN offloads\n");
4373 return;
4374 }
4375
4376 status = be_cmd_manage_iface(adapter, adapter->if_handle,
4377 OP_CONVERT_NORMAL_TO_TUNNEL);
4378 if (status) {
4379 dev_warn(dev, "Failed to convert normal interface to tunnel\n");
4380 goto err;
4381 }
4382
4383 status = be_cmd_set_vxlan_port(adapter, port);
4384 if (status) {
4385 dev_warn(dev, "Failed to add VxLAN port\n");
4386 goto err;
4387 }
4388 adapter->flags |= BE_FLAGS_VXLAN_OFFLOADS;
4389 adapter->vxlan_port = port;
4390
4391 dev_info(dev, "Enabled VxLAN offloads for UDP port %d\n",
4392 be16_to_cpu(port));
4393 return;
4394err:
4395 be_disable_vxlan_offloads(adapter);
c9c47142
SP
4396}
4397
4398static void be_del_vxlan_port(struct net_device *netdev, sa_family_t sa_family,
4399 __be16 port)
4400{
4401 struct be_adapter *adapter = netdev_priv(netdev);
4402
4403 if (lancer_chip(adapter) || BEx_chip(adapter))
4404 return;
4405
4406 if (adapter->vxlan_port != port)
4407 return;
4408
4409 be_disable_vxlan_offloads(adapter);
4410
4411 dev_info(&adapter->pdev->dev,
4412 "Disabled VxLAN offloads for UDP port %d\n",
4413 be16_to_cpu(port));
4414}
c5abe7c0 4415#endif
c9c47142 4416
e5686ad8 4417static const struct net_device_ops be_netdev_ops = {
6b7c5b94
SP
4418 .ndo_open = be_open,
4419 .ndo_stop = be_close,
4420 .ndo_start_xmit = be_xmit,
a54769f5 4421 .ndo_set_rx_mode = be_set_rx_mode,
6b7c5b94
SP
4422 .ndo_set_mac_address = be_mac_addr_set,
4423 .ndo_change_mtu = be_change_mtu,
ab1594e9 4424 .ndo_get_stats64 = be_get_stats64,
6b7c5b94 4425 .ndo_validate_addr = eth_validate_addr,
6b7c5b94
SP
4426 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
4427 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
64600ea5 4428 .ndo_set_vf_mac = be_set_vf_mac,
1da87b7f 4429 .ndo_set_vf_vlan = be_set_vf_vlan,
ed616689 4430 .ndo_set_vf_rate = be_set_vf_tx_rate,
66268739 4431 .ndo_get_vf_config = be_get_vf_config,
bdce2ad7 4432 .ndo_set_vf_link_state = be_set_vf_link_state,
66268739
IV
4433#ifdef CONFIG_NET_POLL_CONTROLLER
4434 .ndo_poll_controller = be_netpoll,
4435#endif
a77dcb8c
AK
4436 .ndo_bridge_setlink = be_ndo_bridge_setlink,
4437 .ndo_bridge_getlink = be_ndo_bridge_getlink,
6384a4d0 4438#ifdef CONFIG_NET_RX_BUSY_POLL
c9c47142 4439 .ndo_busy_poll = be_busy_poll,
6384a4d0 4440#endif
c5abe7c0 4441#ifdef CONFIG_BE2NET_VXLAN
c9c47142
SP
4442 .ndo_add_vxlan_port = be_add_vxlan_port,
4443 .ndo_del_vxlan_port = be_del_vxlan_port,
c5abe7c0 4444#endif
6b7c5b94
SP
4445};
4446
4447static void be_netdev_init(struct net_device *netdev)
4448{
4449 struct be_adapter *adapter = netdev_priv(netdev);
4450
c9c47142
SP
4451 if (skyhawk_chip(adapter)) {
4452 netdev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
4453 NETIF_F_TSO | NETIF_F_TSO6 |
4454 NETIF_F_GSO_UDP_TUNNEL;
4455 netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL;
4456 }
6332c8d3 4457 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
8b8ddc68 4458 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
f646968f 4459 NETIF_F_HW_VLAN_CTAG_TX;
8b8ddc68
MM
4460 if (be_multi_rxq(adapter))
4461 netdev->hw_features |= NETIF_F_RXHASH;
6332c8d3
MM
4462
4463 netdev->features |= netdev->hw_features |
f646968f 4464 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER;
4b972914 4465
eb8a50d9 4466 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
79032644 4467 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
51c59870 4468
fbc13f01
AK
4469 netdev->priv_flags |= IFF_UNICAST_FLT;
4470
6b7c5b94
SP
4471 netdev->flags |= IFF_MULTICAST;
4472
b7e5887e 4473 netif_set_gso_max_size(netdev, 65535 - ETH_HLEN);
c190e3c8 4474
10ef9ab4 4475 netdev->netdev_ops = &be_netdev_ops;
6b7c5b94 4476
7ad24ea4 4477 netdev->ethtool_ops = &be_ethtool_ops;
6b7c5b94
SP
4478}
4479
4480static void be_unmap_pci_bars(struct be_adapter *adapter)
4481{
c5b3ad4c
SP
4482 if (adapter->csr)
4483 pci_iounmap(adapter->pdev, adapter->csr);
8788fdc2 4484 if (adapter->db)
ce66f781 4485 pci_iounmap(adapter->pdev, adapter->db);
045508a8
PP
4486}
4487
ce66f781
SP
4488static int db_bar(struct be_adapter *adapter)
4489{
4490 if (lancer_chip(adapter) || !be_physfn(adapter))
4491 return 0;
4492 else
4493 return 4;
4494}
4495
4496static int be_roce_map_pci_bars(struct be_adapter *adapter)
045508a8 4497{
dbf0f2a7 4498 if (skyhawk_chip(adapter)) {
ce66f781
SP
4499 adapter->roce_db.size = 4096;
4500 adapter->roce_db.io_addr = pci_resource_start(adapter->pdev,
4501 db_bar(adapter));
4502 adapter->roce_db.total_size = pci_resource_len(adapter->pdev,
4503 db_bar(adapter));
4504 }
045508a8 4505 return 0;
6b7c5b94
SP
4506}
4507
4508static int be_map_pci_bars(struct be_adapter *adapter)
4509{
4510 u8 __iomem *addr;
fe6d2a38 4511
c5b3ad4c
SP
4512 if (BEx_chip(adapter) && be_physfn(adapter)) {
4513 adapter->csr = pci_iomap(adapter->pdev, 2, 0);
ddf1169f 4514 if (!adapter->csr)
c5b3ad4c
SP
4515 return -ENOMEM;
4516 }
4517
ce66f781 4518 addr = pci_iomap(adapter->pdev, db_bar(adapter), 0);
ddf1169f 4519 if (!addr)
6b7c5b94 4520 goto pci_map_err;
ba343c77 4521 adapter->db = addr;
ce66f781
SP
4522
4523 be_roce_map_pci_bars(adapter);
6b7c5b94 4524 return 0;
ce66f781 4525
6b7c5b94 4526pci_map_err:
acbafeb1 4527 dev_err(&adapter->pdev->dev, "Error in mapping PCI BARs\n");
6b7c5b94
SP
4528 be_unmap_pci_bars(adapter);
4529 return -ENOMEM;
4530}
4531
6b7c5b94
SP
4532static void be_ctrl_cleanup(struct be_adapter *adapter)
4533{
8788fdc2 4534 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
6b7c5b94
SP
4535
4536 be_unmap_pci_bars(adapter);
4537
4538 if (mem->va)
2b7bcebf
IV
4539 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
4540 mem->dma);
e7b909a6 4541
5b8821b7 4542 mem = &adapter->rx_filter;
e7b909a6 4543 if (mem->va)
2b7bcebf
IV
4544 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
4545 mem->dma);
6b7c5b94
SP
4546}
4547
6b7c5b94
SP
4548static int be_ctrl_init(struct be_adapter *adapter)
4549{
8788fdc2
SP
4550 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
4551 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
5b8821b7 4552 struct be_dma_mem *rx_filter = &adapter->rx_filter;
ce66f781 4553 u32 sli_intf;
6b7c5b94 4554 int status;
6b7c5b94 4555
ce66f781
SP
4556 pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
4557 adapter->sli_family = (sli_intf & SLI_INTF_FAMILY_MASK) >>
4558 SLI_INTF_FAMILY_SHIFT;
4559 adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
4560
6b7c5b94
SP
4561 status = be_map_pci_bars(adapter);
4562 if (status)
e7b909a6 4563 goto done;
6b7c5b94
SP
4564
4565 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
2b7bcebf
IV
4566 mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
4567 mbox_mem_alloc->size,
4568 &mbox_mem_alloc->dma,
4569 GFP_KERNEL);
6b7c5b94 4570 if (!mbox_mem_alloc->va) {
e7b909a6
SP
4571 status = -ENOMEM;
4572 goto unmap_pci_bars;
6b7c5b94
SP
4573 }
4574 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
4575 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
4576 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
4577 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
e7b909a6 4578
5b8821b7 4579 rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
ede23fa8
JP
4580 rx_filter->va = dma_zalloc_coherent(&adapter->pdev->dev,
4581 rx_filter->size, &rx_filter->dma,
4582 GFP_KERNEL);
ddf1169f 4583 if (!rx_filter->va) {
e7b909a6
SP
4584 status = -ENOMEM;
4585 goto free_mbox;
4586 }
1f9061d2 4587
2984961c 4588 mutex_init(&adapter->mbox_lock);
8788fdc2
SP
4589 spin_lock_init(&adapter->mcc_lock);
4590 spin_lock_init(&adapter->mcc_cq_lock);
a8f447bd 4591
5eeff635 4592 init_completion(&adapter->et_cmd_compl);
cf588477 4593 pci_save_state(adapter->pdev);
6b7c5b94 4594 return 0;
e7b909a6
SP
4595
4596free_mbox:
2b7bcebf
IV
4597 dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
4598 mbox_mem_alloc->va, mbox_mem_alloc->dma);
e7b909a6
SP
4599
4600unmap_pci_bars:
4601 be_unmap_pci_bars(adapter);
4602
4603done:
4604 return status;
6b7c5b94
SP
4605}
4606
4607static void be_stats_cleanup(struct be_adapter *adapter)
4608{
3abcdeda 4609 struct be_dma_mem *cmd = &adapter->stats_cmd;
6b7c5b94
SP
4610
4611 if (cmd->va)
2b7bcebf
IV
4612 dma_free_coherent(&adapter->pdev->dev, cmd->size,
4613 cmd->va, cmd->dma);
6b7c5b94
SP
4614}
4615
4616static int be_stats_init(struct be_adapter *adapter)
4617{
3abcdeda 4618 struct be_dma_mem *cmd = &adapter->stats_cmd;
6b7c5b94 4619
ca34fe38
SP
4620 if (lancer_chip(adapter))
4621 cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
4622 else if (BE2_chip(adapter))
89a88ab8 4623 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
61000861 4624 else if (BE3_chip(adapter))
ca34fe38 4625 cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
61000861
AK
4626 else
4627 /* ALL non-BE ASICs */
4628 cmd->size = sizeof(struct be_cmd_req_get_stats_v2);
ca34fe38 4629
ede23fa8
JP
4630 cmd->va = dma_zalloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
4631 GFP_KERNEL);
ddf1169f 4632 if (!cmd->va)
6b568689 4633 return -ENOMEM;
6b7c5b94
SP
4634 return 0;
4635}
4636
3bc6b06c 4637static void be_remove(struct pci_dev *pdev)
6b7c5b94
SP
4638{
4639 struct be_adapter *adapter = pci_get_drvdata(pdev);
8d56ff11 4640
6b7c5b94
SP
4641 if (!adapter)
4642 return;
4643
045508a8 4644 be_roce_dev_remove(adapter);
8cef7a78 4645 be_intr_set(adapter, false);
045508a8 4646
f67ef7ba
PR
4647 cancel_delayed_work_sync(&adapter->func_recovery_work);
4648
6b7c5b94
SP
4649 unregister_netdev(adapter->netdev);
4650
5fb379ee
SP
4651 be_clear(adapter);
4652
bf99e50d
PR
4653 /* tell fw we're done with firing cmds */
4654 be_cmd_fw_clean(adapter);
4655
6b7c5b94
SP
4656 be_stats_cleanup(adapter);
4657
4658 be_ctrl_cleanup(adapter);
4659
d6b6d987
SP
4660 pci_disable_pcie_error_reporting(pdev);
4661
6b7c5b94
SP
4662 pci_release_regions(pdev);
4663 pci_disable_device(pdev);
4664
4665 free_netdev(adapter->netdev);
4666}
4667
39f1d94d 4668static int be_get_initial_config(struct be_adapter *adapter)
6b7c5b94 4669{
baaa08d1 4670 int status, level;
6b7c5b94 4671
9e1453c5
AK
4672 status = be_cmd_get_cntl_attributes(adapter);
4673 if (status)
4674 return status;
4675
7aeb2156
PR
4676 /* Must be a power of 2 or else MODULO will BUG_ON */
4677 adapter->be_get_temp_freq = 64;
4678
baaa08d1
VV
4679 if (BEx_chip(adapter)) {
4680 level = be_cmd_get_fw_log_level(adapter);
4681 adapter->msg_enable =
4682 level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0;
4683 }
941a77d5 4684
92bf14ab 4685 adapter->cfg_num_qs = netif_get_num_default_rss_queues();
2243e2e9 4686 return 0;
6b7c5b94
SP
4687}
4688
f67ef7ba 4689static int lancer_recover_func(struct be_adapter *adapter)
d8110f62 4690{
01e5b2c4 4691 struct device *dev = &adapter->pdev->dev;
d8110f62 4692 int status;
d8110f62 4693
f67ef7ba
PR
4694 status = lancer_test_and_set_rdy_state(adapter);
4695 if (status)
4696 goto err;
d8110f62 4697
f67ef7ba
PR
4698 if (netif_running(adapter->netdev))
4699 be_close(adapter->netdev);
d8110f62 4700
f67ef7ba
PR
4701 be_clear(adapter);
4702
01e5b2c4 4703 be_clear_all_error(adapter);
f67ef7ba
PR
4704
4705 status = be_setup(adapter);
4706 if (status)
4707 goto err;
d8110f62 4708
f67ef7ba
PR
4709 if (netif_running(adapter->netdev)) {
4710 status = be_open(adapter->netdev);
d8110f62
PR
4711 if (status)
4712 goto err;
f67ef7ba 4713 }
d8110f62 4714
4bebb56a 4715 dev_err(dev, "Adapter recovery successful\n");
f67ef7ba
PR
4716 return 0;
4717err:
01e5b2c4
SK
4718 if (status == -EAGAIN)
4719 dev_err(dev, "Waiting for resource provisioning\n");
4720 else
4bebb56a 4721 dev_err(dev, "Adapter recovery failed\n");
d8110f62 4722
f67ef7ba
PR
4723 return status;
4724}
4725
4726static void be_func_recovery_task(struct work_struct *work)
4727{
4728 struct be_adapter *adapter =
4729 container_of(work, struct be_adapter, func_recovery_work.work);
01e5b2c4 4730 int status = 0;
d8110f62 4731
f67ef7ba 4732 be_detect_error(adapter);
d8110f62 4733
f67ef7ba 4734 if (adapter->hw_error && lancer_chip(adapter)) {
d8110f62 4735
f67ef7ba
PR
4736 rtnl_lock();
4737 netif_device_detach(adapter->netdev);
4738 rtnl_unlock();
d8110f62 4739
f67ef7ba 4740 status = lancer_recover_func(adapter);
f67ef7ba
PR
4741 if (!status)
4742 netif_device_attach(adapter->netdev);
d8110f62 4743 }
f67ef7ba 4744
01e5b2c4
SK
4745 /* In Lancer, for all errors other than provisioning error (-EAGAIN),
4746 * no need to attempt further recovery.
4747 */
4748 if (!status || status == -EAGAIN)
4749 schedule_delayed_work(&adapter->func_recovery_work,
4750 msecs_to_jiffies(1000));
d8110f62
PR
4751}
4752
4753static void be_worker(struct work_struct *work)
4754{
4755 struct be_adapter *adapter =
4756 container_of(work, struct be_adapter, work.work);
4757 struct be_rx_obj *rxo;
4758 int i;
4759
d8110f62
PR
4760 /* when interrupts are not yet enabled, just reap any pending
4761 * mcc completions */
4762 if (!netif_running(adapter->netdev)) {
072a9c48 4763 local_bh_disable();
10ef9ab4 4764 be_process_mcc(adapter);
072a9c48 4765 local_bh_enable();
d8110f62
PR
4766 goto reschedule;
4767 }
4768
4769 if (!adapter->stats_cmd_sent) {
4770 if (lancer_chip(adapter))
4771 lancer_cmd_get_pport_stats(adapter,
4772 &adapter->stats_cmd);
4773 else
4774 be_cmd_get_stats(adapter, &adapter->stats_cmd);
4775 }
4776
d696b5e2
VV
4777 if (be_physfn(adapter) &&
4778 MODULO(adapter->work_counter, adapter->be_get_temp_freq) == 0)
7aeb2156
PR
4779 be_cmd_get_die_temperature(adapter);
4780
d8110f62 4781 for_all_rx_queues(adapter, rxo, i) {
6384a4d0
SP
4782 /* Replenish RX-queues starved due to memory
4783 * allocation failures.
4784 */
4785 if (rxo->rx_post_starved)
c30d7266 4786 be_post_rx_frags(rxo, GFP_KERNEL, MAX_RX_POST);
d8110f62
PR
4787 }
4788
2632bafd 4789 be_eqd_update(adapter);
10ef9ab4 4790
d8110f62
PR
4791reschedule:
4792 adapter->work_counter++;
4793 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
4794}
4795
257a3feb 4796/* If any VFs are already enabled don't FLR the PF */
39f1d94d
SP
4797static bool be_reset_required(struct be_adapter *adapter)
4798{
257a3feb 4799 return pci_num_vf(adapter->pdev) ? false : true;
39f1d94d
SP
4800}
4801
d379142b
SP
4802static char *mc_name(struct be_adapter *adapter)
4803{
f93f160b
VV
4804 char *str = ""; /* default */
4805
4806 switch (adapter->mc_type) {
4807 case UMC:
4808 str = "UMC";
4809 break;
4810 case FLEX10:
4811 str = "FLEX10";
4812 break;
4813 case vNIC1:
4814 str = "vNIC-1";
4815 break;
4816 case nPAR:
4817 str = "nPAR";
4818 break;
4819 case UFP:
4820 str = "UFP";
4821 break;
4822 case vNIC2:
4823 str = "vNIC-2";
4824 break;
4825 default:
4826 str = "";
4827 }
4828
4829 return str;
d379142b
SP
4830}
4831
4832static inline char *func_name(struct be_adapter *adapter)
4833{
4834 return be_physfn(adapter) ? "PF" : "VF";
4835}
4836
1dd06ae8 4837static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id)
6b7c5b94
SP
4838{
4839 int status = 0;
4840 struct be_adapter *adapter;
4841 struct net_device *netdev;
b4e32a71 4842 char port_name;
6b7c5b94 4843
acbafeb1
SP
4844 dev_info(&pdev->dev, "%s version is %s\n", DRV_NAME, DRV_VER);
4845
6b7c5b94
SP
4846 status = pci_enable_device(pdev);
4847 if (status)
4848 goto do_none;
4849
4850 status = pci_request_regions(pdev, DRV_NAME);
4851 if (status)
4852 goto disable_dev;
4853 pci_set_master(pdev);
4854
7f640062 4855 netdev = alloc_etherdev_mqs(sizeof(*adapter), MAX_TX_QS, MAX_RX_QS);
ddf1169f 4856 if (!netdev) {
6b7c5b94
SP
4857 status = -ENOMEM;
4858 goto rel_reg;
4859 }
4860 adapter = netdev_priv(netdev);
4861 adapter->pdev = pdev;
4862 pci_set_drvdata(pdev, adapter);
4863 adapter->netdev = netdev;
2243e2e9 4864 SET_NETDEV_DEV(netdev, &pdev->dev);
6b7c5b94 4865
4c15c243 4866 status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
6b7c5b94
SP
4867 if (!status) {
4868 netdev->features |= NETIF_F_HIGHDMA;
4869 } else {
4c15c243 4870 status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
6b7c5b94
SP
4871 if (status) {
4872 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
4873 goto free_netdev;
4874 }
4875 }
4876
2f951a9a
KA
4877 status = pci_enable_pcie_error_reporting(pdev);
4878 if (!status)
4879 dev_info(&pdev->dev, "PCIe error reporting enabled\n");
d6b6d987 4880
6b7c5b94
SP
4881 status = be_ctrl_init(adapter);
4882 if (status)
39f1d94d 4883 goto free_netdev;
6b7c5b94 4884
2243e2e9 4885 /* sync up with fw's ready state */
ba343c77 4886 if (be_physfn(adapter)) {
bf99e50d 4887 status = be_fw_wait_ready(adapter);
ba343c77
SB
4888 if (status)
4889 goto ctrl_clean;
ba343c77 4890 }
6b7c5b94 4891
39f1d94d
SP
4892 if (be_reset_required(adapter)) {
4893 status = be_cmd_reset_function(adapter);
4894 if (status)
4895 goto ctrl_clean;
556ae191 4896
2d177be8
KA
4897 /* Wait for interrupts to quiesce after an FLR */
4898 msleep(100);
4899 }
8cef7a78
SK
4900
4901 /* Allow interrupts for other ULPs running on NIC function */
4902 be_intr_set(adapter, true);
10ef9ab4 4903
2d177be8
KA
4904 /* tell fw we're ready to fire cmds */
4905 status = be_cmd_fw_init(adapter);
4906 if (status)
4907 goto ctrl_clean;
4908
2243e2e9
SP
4909 status = be_stats_init(adapter);
4910 if (status)
4911 goto ctrl_clean;
4912
39f1d94d 4913 status = be_get_initial_config(adapter);
6b7c5b94
SP
4914 if (status)
4915 goto stats_clean;
6b7c5b94
SP
4916
4917 INIT_DELAYED_WORK(&adapter->work, be_worker);
f67ef7ba 4918 INIT_DELAYED_WORK(&adapter->func_recovery_work, be_func_recovery_task);
a54769f5 4919 adapter->rx_fc = adapter->tx_fc = true;
6b7c5b94 4920
5fb379ee
SP
4921 status = be_setup(adapter);
4922 if (status)
55f5c3c5 4923 goto stats_clean;
2243e2e9 4924
3abcdeda 4925 be_netdev_init(netdev);
6b7c5b94
SP
4926 status = register_netdev(netdev);
4927 if (status != 0)
5fb379ee 4928 goto unsetup;
6b7c5b94 4929
045508a8
PP
4930 be_roce_dev_add(adapter);
4931
f67ef7ba
PR
4932 schedule_delayed_work(&adapter->func_recovery_work,
4933 msecs_to_jiffies(1000));
b4e32a71
PR
4934
4935 be_cmd_query_port_name(adapter, &port_name);
4936
d379142b
SP
4937 dev_info(&pdev->dev, "%s: %s %s port %c\n", nic_name(pdev),
4938 func_name(adapter), mc_name(adapter), port_name);
34b1ef04 4939
6b7c5b94
SP
4940 return 0;
4941
5fb379ee
SP
4942unsetup:
4943 be_clear(adapter);
6b7c5b94
SP
4944stats_clean:
4945 be_stats_cleanup(adapter);
4946ctrl_clean:
4947 be_ctrl_cleanup(adapter);
f9449ab7 4948free_netdev:
fe6d2a38 4949 free_netdev(netdev);
6b7c5b94
SP
4950rel_reg:
4951 pci_release_regions(pdev);
4952disable_dev:
4953 pci_disable_device(pdev);
4954do_none:
c4ca2374 4955 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
6b7c5b94
SP
4956 return status;
4957}
4958
4959static int be_suspend(struct pci_dev *pdev, pm_message_t state)
4960{
4961 struct be_adapter *adapter = pci_get_drvdata(pdev);
4962 struct net_device *netdev = adapter->netdev;
4963
76a9e08e 4964 if (adapter->wol_en)
71d8d1b5
AK
4965 be_setup_wol(adapter, true);
4966
d4360d6f 4967 be_intr_set(adapter, false);
f67ef7ba
PR
4968 cancel_delayed_work_sync(&adapter->func_recovery_work);
4969
6b7c5b94
SP
4970 netif_device_detach(netdev);
4971 if (netif_running(netdev)) {
4972 rtnl_lock();
4973 be_close(netdev);
4974 rtnl_unlock();
4975 }
9b0365f1 4976 be_clear(adapter);
6b7c5b94
SP
4977
4978 pci_save_state(pdev);
4979 pci_disable_device(pdev);
4980 pci_set_power_state(pdev, pci_choose_state(pdev, state));
4981 return 0;
4982}
4983
4984static int be_resume(struct pci_dev *pdev)
4985{
4986 int status = 0;
4987 struct be_adapter *adapter = pci_get_drvdata(pdev);
4988 struct net_device *netdev = adapter->netdev;
4989
4990 netif_device_detach(netdev);
4991
4992 status = pci_enable_device(pdev);
4993 if (status)
4994 return status;
4995
1ca01512 4996 pci_set_power_state(pdev, PCI_D0);
6b7c5b94
SP
4997 pci_restore_state(pdev);
4998
dd5746bf
SB
4999 status = be_fw_wait_ready(adapter);
5000 if (status)
5001 return status;
5002
d4360d6f 5003 be_intr_set(adapter, true);
2243e2e9
SP
5004 /* tell fw we're ready to fire cmds */
5005 status = be_cmd_fw_init(adapter);
5006 if (status)
5007 return status;
5008
9b0365f1 5009 be_setup(adapter);
6b7c5b94
SP
5010 if (netif_running(netdev)) {
5011 rtnl_lock();
5012 be_open(netdev);
5013 rtnl_unlock();
5014 }
f67ef7ba
PR
5015
5016 schedule_delayed_work(&adapter->func_recovery_work,
5017 msecs_to_jiffies(1000));
6b7c5b94 5018 netif_device_attach(netdev);
71d8d1b5 5019
76a9e08e 5020 if (adapter->wol_en)
71d8d1b5 5021 be_setup_wol(adapter, false);
a4ca055f 5022
6b7c5b94
SP
5023 return 0;
5024}
5025
82456b03
SP
5026/*
5027 * An FLR will stop BE from DMAing any data.
5028 */
5029static void be_shutdown(struct pci_dev *pdev)
5030{
5031 struct be_adapter *adapter = pci_get_drvdata(pdev);
82456b03 5032
2d5d4154
AK
5033 if (!adapter)
5034 return;
82456b03 5035
d114f99a 5036 be_roce_dev_shutdown(adapter);
0f4a6828 5037 cancel_delayed_work_sync(&adapter->work);
f67ef7ba 5038 cancel_delayed_work_sync(&adapter->func_recovery_work);
a4ca055f 5039
2d5d4154 5040 netif_device_detach(adapter->netdev);
82456b03 5041
57841869
AK
5042 be_cmd_reset_function(adapter);
5043
82456b03 5044 pci_disable_device(pdev);
82456b03
SP
5045}
5046
cf588477 5047static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
748b539a 5048 pci_channel_state_t state)
cf588477
SP
5049{
5050 struct be_adapter *adapter = pci_get_drvdata(pdev);
5051 struct net_device *netdev = adapter->netdev;
5052
5053 dev_err(&adapter->pdev->dev, "EEH error detected\n");
5054
01e5b2c4
SK
5055 if (!adapter->eeh_error) {
5056 adapter->eeh_error = true;
cf588477 5057
01e5b2c4 5058 cancel_delayed_work_sync(&adapter->func_recovery_work);
cf588477 5059
cf588477 5060 rtnl_lock();
01e5b2c4
SK
5061 netif_device_detach(netdev);
5062 if (netif_running(netdev))
5063 be_close(netdev);
cf588477 5064 rtnl_unlock();
01e5b2c4
SK
5065
5066 be_clear(adapter);
cf588477 5067 }
cf588477
SP
5068
5069 if (state == pci_channel_io_perm_failure)
5070 return PCI_ERS_RESULT_DISCONNECT;
5071
5072 pci_disable_device(pdev);
5073
eeb7fc7b
SK
5074 /* The error could cause the FW to trigger a flash debug dump.
5075 * Resetting the card while flash dump is in progress
c8a54163
PR
5076 * can cause it not to recover; wait for it to finish.
5077 * Wait only for first function as it is needed only once per
5078 * adapter.
eeb7fc7b 5079 */
c8a54163
PR
5080 if (pdev->devfn == 0)
5081 ssleep(30);
5082
cf588477
SP
5083 return PCI_ERS_RESULT_NEED_RESET;
5084}
5085
5086static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
5087{
5088 struct be_adapter *adapter = pci_get_drvdata(pdev);
5089 int status;
5090
5091 dev_info(&adapter->pdev->dev, "EEH reset\n");
cf588477
SP
5092
5093 status = pci_enable_device(pdev);
5094 if (status)
5095 return PCI_ERS_RESULT_DISCONNECT;
5096
5097 pci_set_master(pdev);
1ca01512 5098 pci_set_power_state(pdev, PCI_D0);
cf588477
SP
5099 pci_restore_state(pdev);
5100
5101 /* Check if card is ok and fw is ready */
c5b3ad4c
SP
5102 dev_info(&adapter->pdev->dev,
5103 "Waiting for FW to be ready after EEH reset\n");
bf99e50d 5104 status = be_fw_wait_ready(adapter);
cf588477
SP
5105 if (status)
5106 return PCI_ERS_RESULT_DISCONNECT;
5107
d6b6d987 5108 pci_cleanup_aer_uncorrect_error_status(pdev);
01e5b2c4 5109 be_clear_all_error(adapter);
cf588477
SP
5110 return PCI_ERS_RESULT_RECOVERED;
5111}
5112
5113static void be_eeh_resume(struct pci_dev *pdev)
5114{
5115 int status = 0;
5116 struct be_adapter *adapter = pci_get_drvdata(pdev);
5117 struct net_device *netdev = adapter->netdev;
5118
5119 dev_info(&adapter->pdev->dev, "EEH resume\n");
5120
5121 pci_save_state(pdev);
5122
2d177be8 5123 status = be_cmd_reset_function(adapter);
cf588477
SP
5124 if (status)
5125 goto err;
5126
03a58baa
KA
5127 /* On some BE3 FW versions, after a HW reset,
5128 * interrupts will remain disabled for each function.
5129 * So, explicitly enable interrupts
5130 */
5131 be_intr_set(adapter, true);
5132
2d177be8
KA
5133 /* tell fw we're ready to fire cmds */
5134 status = be_cmd_fw_init(adapter);
bf99e50d
PR
5135 if (status)
5136 goto err;
5137
cf588477
SP
5138 status = be_setup(adapter);
5139 if (status)
5140 goto err;
5141
5142 if (netif_running(netdev)) {
5143 status = be_open(netdev);
5144 if (status)
5145 goto err;
5146 }
f67ef7ba
PR
5147
5148 schedule_delayed_work(&adapter->func_recovery_work,
5149 msecs_to_jiffies(1000));
cf588477
SP
5150 netif_device_attach(netdev);
5151 return;
5152err:
5153 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
cf588477
SP
5154}
5155
3646f0e5 5156static const struct pci_error_handlers be_eeh_handlers = {
cf588477
SP
5157 .error_detected = be_eeh_err_detected,
5158 .slot_reset = be_eeh_reset,
5159 .resume = be_eeh_resume,
5160};
5161
6b7c5b94
SP
5162static struct pci_driver be_driver = {
5163 .name = DRV_NAME,
5164 .id_table = be_dev_ids,
5165 .probe = be_probe,
5166 .remove = be_remove,
5167 .suspend = be_suspend,
cf588477 5168 .resume = be_resume,
82456b03 5169 .shutdown = be_shutdown,
cf588477 5170 .err_handler = &be_eeh_handlers
6b7c5b94
SP
5171};
5172
5173static int __init be_init_module(void)
5174{
8e95a202
JP
5175 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
5176 rx_frag_size != 2048) {
6b7c5b94
SP
5177 printk(KERN_WARNING DRV_NAME
5178 " : Module param rx_frag_size must be 2048/4096/8192."
5179 " Using 2048\n");
5180 rx_frag_size = 2048;
5181 }
6b7c5b94
SP
5182
5183 return pci_register_driver(&be_driver);
5184}
5185module_init(be_init_module);
5186
5187static void __exit be_exit_module(void)
5188{
5189 pci_unregister_driver(&be_driver);
5190}
5191module_exit(be_exit_module);