bonding: destroy proc directory only after all bonds are gone
[linux-2.6-block.git] / drivers / net / ethernet / emulex / benet / be_main.c
CommitLineData
6b7c5b94 1/*
40263820 2 * Copyright (C) 2005 - 2014 Emulex
6b7c5b94
SP
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
d2145cde 11 * linux-drivers@emulex.com
6b7c5b94 12 *
d2145cde
AK
13 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
6b7c5b94
SP
16 */
17
70c71606 18#include <linux/prefetch.h>
9d9779e7 19#include <linux/module.h>
6b7c5b94 20#include "be.h"
8788fdc2 21#include "be_cmds.h"
65f71b8b 22#include <asm/div64.h>
d6b6d987 23#include <linux/aer.h>
a77dcb8c 24#include <linux/if_bridge.h>
6384a4d0 25#include <net/busy_poll.h>
c9c47142 26#include <net/vxlan.h>
6b7c5b94
SP
27
28MODULE_VERSION(DRV_VER);
29MODULE_DEVICE_TABLE(pci, be_dev_ids);
30MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
00d3d51e 31MODULE_AUTHOR("Emulex Corporation");
6b7c5b94
SP
32MODULE_LICENSE("GPL");
33
ba343c77 34static unsigned int num_vfs;
ba343c77 35module_param(num_vfs, uint, S_IRUGO);
ba343c77 36MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
6b7c5b94 37
11ac75ed
SP
38static ushort rx_frag_size = 2048;
39module_param(rx_frag_size, ushort, S_IRUGO);
40MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
41
6b7c5b94 42static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
c4ca2374 43 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
59fd5d87 44 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
c4ca2374
AK
45 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
46 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
fe6d2a38 47 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
12f4d0a8 48 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
ecedb6ae 49 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
76b73530 50 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID6)},
6b7c5b94
SP
51 { 0 }
52};
53MODULE_DEVICE_TABLE(pci, be_dev_ids);
7c185276 54/* UE Status Low CSR */
42c8b11e 55static const char * const ue_status_low_desc[] = {
7c185276
AK
56 "CEV",
57 "CTX",
58 "DBUF",
59 "ERX",
60 "Host",
61 "MPU",
62 "NDMA",
63 "PTC ",
64 "RDMA ",
65 "RXF ",
66 "RXIPS ",
67 "RXULP0 ",
68 "RXULP1 ",
69 "RXULP2 ",
70 "TIM ",
71 "TPOST ",
72 "TPRE ",
73 "TXIPS ",
74 "TXULP0 ",
75 "TXULP1 ",
76 "UC ",
77 "WDMA ",
78 "TXULP2 ",
79 "HOST1 ",
80 "P0_OB_LINK ",
81 "P1_OB_LINK ",
82 "HOST_GPIO ",
83 "MBOX ",
84 "AXGMAC0",
85 "AXGMAC1",
86 "JTAG",
87 "MPU_INTPEND"
88};
89/* UE Status High CSR */
42c8b11e 90static const char * const ue_status_hi_desc[] = {
7c185276
AK
91 "LPCMEMHOST",
92 "MGMT_MAC",
93 "PCS0ONLINE",
94 "MPU_IRAM",
95 "PCS1ONLINE",
96 "PCTL0",
97 "PCTL1",
98 "PMEM",
99 "RR",
100 "TXPB",
101 "RXPP",
102 "XAUI",
103 "TXP",
104 "ARM",
105 "IPC",
106 "HOST2",
107 "HOST3",
108 "HOST4",
109 "HOST5",
110 "HOST6",
111 "HOST7",
112 "HOST8",
113 "HOST9",
42c8b11e 114 "NETC",
7c185276
AK
115 "Unknown",
116 "Unknown",
117 "Unknown",
118 "Unknown",
119 "Unknown",
120 "Unknown",
121 "Unknown",
122 "Unknown"
123};
6b7c5b94 124
752961a1 125
6b7c5b94
SP
126static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
127{
128 struct be_dma_mem *mem = &q->dma_mem;
1cfafab9 129 if (mem->va) {
2b7bcebf
IV
130 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
131 mem->dma);
1cfafab9
SP
132 mem->va = NULL;
133 }
6b7c5b94
SP
134}
135
136static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
748b539a 137 u16 len, u16 entry_size)
6b7c5b94
SP
138{
139 struct be_dma_mem *mem = &q->dma_mem;
140
141 memset(q, 0, sizeof(*q));
142 q->len = len;
143 q->entry_size = entry_size;
144 mem->size = len * entry_size;
ede23fa8
JP
145 mem->va = dma_zalloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
146 GFP_KERNEL);
6b7c5b94 147 if (!mem->va)
10ef9ab4 148 return -ENOMEM;
6b7c5b94
SP
149 return 0;
150}
151
68c45a2d 152static void be_reg_intr_set(struct be_adapter *adapter, bool enable)
6b7c5b94 153{
db3ea781 154 u32 reg, enabled;
5f0b849e 155
db3ea781 156 pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
748b539a 157 &reg);
db3ea781
SP
158 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
159
5f0b849e 160 if (!enabled && enable)
6b7c5b94 161 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
5f0b849e 162 else if (enabled && !enable)
6b7c5b94 163 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
5f0b849e 164 else
6b7c5b94 165 return;
5f0b849e 166
db3ea781 167 pci_write_config_dword(adapter->pdev,
748b539a 168 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
6b7c5b94
SP
169}
170
68c45a2d
SK
171static void be_intr_set(struct be_adapter *adapter, bool enable)
172{
173 int status = 0;
174
175 /* On lancer interrupts can't be controlled via this register */
176 if (lancer_chip(adapter))
177 return;
178
179 if (adapter->eeh_error)
180 return;
181
182 status = be_cmd_intr_set(adapter, enable);
183 if (status)
184 be_reg_intr_set(adapter, enable);
185}
186
8788fdc2 187static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
6b7c5b94
SP
188{
189 u32 val = 0;
190 val |= qid & DB_RQ_RING_ID_MASK;
191 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
f3eb62d2
SP
192
193 wmb();
8788fdc2 194 iowrite32(val, adapter->db + DB_RQ_OFFSET);
6b7c5b94
SP
195}
196
94d73aaa
VV
197static void be_txq_notify(struct be_adapter *adapter, struct be_tx_obj *txo,
198 u16 posted)
6b7c5b94
SP
199{
200 u32 val = 0;
94d73aaa 201 val |= txo->q.id & DB_TXULP_RING_ID_MASK;
6b7c5b94 202 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
f3eb62d2
SP
203
204 wmb();
94d73aaa 205 iowrite32(val, adapter->db + txo->db_offset);
6b7c5b94
SP
206}
207
8788fdc2 208static void be_eq_notify(struct be_adapter *adapter, u16 qid,
748b539a 209 bool arm, bool clear_int, u16 num_popped)
6b7c5b94
SP
210{
211 u32 val = 0;
212 val |= qid & DB_EQ_RING_ID_MASK;
748b539a 213 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) << DB_EQ_RING_ID_EXT_MASK_SHIFT);
cf588477 214
f67ef7ba 215 if (adapter->eeh_error)
cf588477
SP
216 return;
217
6b7c5b94
SP
218 if (arm)
219 val |= 1 << DB_EQ_REARM_SHIFT;
220 if (clear_int)
221 val |= 1 << DB_EQ_CLR_SHIFT;
222 val |= 1 << DB_EQ_EVNT_SHIFT;
223 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
8788fdc2 224 iowrite32(val, adapter->db + DB_EQ_OFFSET);
6b7c5b94
SP
225}
226
8788fdc2 227void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
6b7c5b94
SP
228{
229 u32 val = 0;
230 val |= qid & DB_CQ_RING_ID_MASK;
fe6d2a38
SP
231 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
232 DB_CQ_RING_ID_EXT_MASK_SHIFT);
cf588477 233
f67ef7ba 234 if (adapter->eeh_error)
cf588477
SP
235 return;
236
6b7c5b94
SP
237 if (arm)
238 val |= 1 << DB_CQ_REARM_SHIFT;
239 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
8788fdc2 240 iowrite32(val, adapter->db + DB_CQ_OFFSET);
6b7c5b94
SP
241}
242
6b7c5b94
SP
243static int be_mac_addr_set(struct net_device *netdev, void *p)
244{
245 struct be_adapter *adapter = netdev_priv(netdev);
5a712c13 246 struct device *dev = &adapter->pdev->dev;
6b7c5b94 247 struct sockaddr *addr = p;
5a712c13
SP
248 int status;
249 u8 mac[ETH_ALEN];
250 u32 old_pmac_id = adapter->pmac_id[0], curr_pmac_id = 0;
6b7c5b94 251
ca9e4988
AK
252 if (!is_valid_ether_addr(addr->sa_data))
253 return -EADDRNOTAVAIL;
254
ff32f8ab
VV
255 /* Proceed further only if, User provided MAC is different
256 * from active MAC
257 */
258 if (ether_addr_equal(addr->sa_data, netdev->dev_addr))
259 return 0;
260
5a712c13
SP
261 /* The PMAC_ADD cmd may fail if the VF doesn't have FILTMGMT
262 * privilege or if PF did not provision the new MAC address.
263 * On BE3, this cmd will always fail if the VF doesn't have the
264 * FILTMGMT privilege. This failure is OK, only if the PF programmed
265 * the MAC for the VF.
704e4c88 266 */
5a712c13
SP
267 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
268 adapter->if_handle, &adapter->pmac_id[0], 0);
269 if (!status) {
270 curr_pmac_id = adapter->pmac_id[0];
271
272 /* Delete the old programmed MAC. This call may fail if the
273 * old MAC was already deleted by the PF driver.
274 */
275 if (adapter->pmac_id[0] != old_pmac_id)
276 be_cmd_pmac_del(adapter, adapter->if_handle,
277 old_pmac_id, 0);
704e4c88
PR
278 }
279
5a712c13
SP
280 /* Decide if the new MAC is successfully activated only after
281 * querying the FW
704e4c88 282 */
b188f090
SR
283 status = be_cmd_get_active_mac(adapter, curr_pmac_id, mac,
284 adapter->if_handle, true, 0);
a65027e4 285 if (status)
e3a7ae2c 286 goto err;
6b7c5b94 287
5a712c13
SP
288 /* The MAC change did not happen, either due to lack of privilege
289 * or PF didn't pre-provision.
290 */
61d23e9f 291 if (!ether_addr_equal(addr->sa_data, mac)) {
5a712c13
SP
292 status = -EPERM;
293 goto err;
294 }
295
e3a7ae2c 296 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
5a712c13 297 dev_info(dev, "MAC address changed to %pM\n", mac);
e3a7ae2c
SK
298 return 0;
299err:
5a712c13 300 dev_warn(dev, "MAC address change to %pM failed\n", addr->sa_data);
6b7c5b94
SP
301 return status;
302}
303
ca34fe38
SP
304/* BE2 supports only v0 cmd */
305static void *hw_stats_from_cmd(struct be_adapter *adapter)
306{
307 if (BE2_chip(adapter)) {
308 struct be_cmd_resp_get_stats_v0 *cmd = adapter->stats_cmd.va;
309
310 return &cmd->hw_stats;
61000861 311 } else if (BE3_chip(adapter)) {
ca34fe38
SP
312 struct be_cmd_resp_get_stats_v1 *cmd = adapter->stats_cmd.va;
313
61000861
AK
314 return &cmd->hw_stats;
315 } else {
316 struct be_cmd_resp_get_stats_v2 *cmd = adapter->stats_cmd.va;
317
ca34fe38
SP
318 return &cmd->hw_stats;
319 }
320}
321
322/* BE2 supports only v0 cmd */
323static void *be_erx_stats_from_cmd(struct be_adapter *adapter)
324{
325 if (BE2_chip(adapter)) {
326 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
327
328 return &hw_stats->erx;
61000861 329 } else if (BE3_chip(adapter)) {
ca34fe38
SP
330 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
331
61000861
AK
332 return &hw_stats->erx;
333 } else {
334 struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
335
ca34fe38
SP
336 return &hw_stats->erx;
337 }
338}
339
340static void populate_be_v0_stats(struct be_adapter *adapter)
89a88ab8 341{
ac124ff9
SP
342 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
343 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
344 struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
89a88ab8 345 struct be_port_rxf_stats_v0 *port_stats =
ac124ff9
SP
346 &rxf_stats->port[adapter->port_num];
347 struct be_drv_stats *drvs = &adapter->drv_stats;
89a88ab8 348
ac124ff9 349 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
89a88ab8
AK
350 drvs->rx_pause_frames = port_stats->rx_pause_frames;
351 drvs->rx_crc_errors = port_stats->rx_crc_errors;
352 drvs->rx_control_frames = port_stats->rx_control_frames;
353 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
354 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
355 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
356 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
357 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
358 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
359 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
360 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
361 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
362 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
363 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
ac124ff9 364 drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
89a88ab8
AK
365 drvs->rx_dropped_header_too_small =
366 port_stats->rx_dropped_header_too_small;
18fb06a1
SR
367 drvs->rx_address_filtered =
368 port_stats->rx_address_filtered +
369 port_stats->rx_vlan_filtered;
89a88ab8
AK
370 drvs->rx_alignment_symbol_errors =
371 port_stats->rx_alignment_symbol_errors;
372
373 drvs->tx_pauseframes = port_stats->tx_pauseframes;
374 drvs->tx_controlframes = port_stats->tx_controlframes;
375
376 if (adapter->port_num)
ac124ff9 377 drvs->jabber_events = rxf_stats->port1_jabber_events;
89a88ab8 378 else
ac124ff9 379 drvs->jabber_events = rxf_stats->port0_jabber_events;
89a88ab8 380 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
89a88ab8 381 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
89a88ab8
AK
382 drvs->forwarded_packets = rxf_stats->forwarded_packets;
383 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
ac124ff9
SP
384 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
385 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
89a88ab8
AK
386 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
387}
388
ca34fe38 389static void populate_be_v1_stats(struct be_adapter *adapter)
89a88ab8 390{
ac124ff9
SP
391 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
392 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
393 struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
89a88ab8 394 struct be_port_rxf_stats_v1 *port_stats =
ac124ff9
SP
395 &rxf_stats->port[adapter->port_num];
396 struct be_drv_stats *drvs = &adapter->drv_stats;
89a88ab8 397
ac124ff9 398 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
02fe7027
AK
399 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
400 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
89a88ab8
AK
401 drvs->rx_pause_frames = port_stats->rx_pause_frames;
402 drvs->rx_crc_errors = port_stats->rx_crc_errors;
403 drvs->rx_control_frames = port_stats->rx_control_frames;
404 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
405 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
406 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
407 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
408 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
409 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
410 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
411 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
412 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
413 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
414 drvs->rx_dropped_header_too_small =
415 port_stats->rx_dropped_header_too_small;
416 drvs->rx_input_fifo_overflow_drop =
417 port_stats->rx_input_fifo_overflow_drop;
18fb06a1 418 drvs->rx_address_filtered = port_stats->rx_address_filtered;
89a88ab8
AK
419 drvs->rx_alignment_symbol_errors =
420 port_stats->rx_alignment_symbol_errors;
ac124ff9 421 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
89a88ab8
AK
422 drvs->tx_pauseframes = port_stats->tx_pauseframes;
423 drvs->tx_controlframes = port_stats->tx_controlframes;
b5adffc4 424 drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
89a88ab8
AK
425 drvs->jabber_events = port_stats->jabber_events;
426 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
89a88ab8 427 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
89a88ab8
AK
428 drvs->forwarded_packets = rxf_stats->forwarded_packets;
429 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
ac124ff9
SP
430 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
431 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
89a88ab8
AK
432 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
433}
434
61000861
AK
435static void populate_be_v2_stats(struct be_adapter *adapter)
436{
437 struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
438 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
439 struct be_rxf_stats_v2 *rxf_stats = &hw_stats->rxf;
440 struct be_port_rxf_stats_v2 *port_stats =
441 &rxf_stats->port[adapter->port_num];
442 struct be_drv_stats *drvs = &adapter->drv_stats;
443
444 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
445 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
446 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
447 drvs->rx_pause_frames = port_stats->rx_pause_frames;
448 drvs->rx_crc_errors = port_stats->rx_crc_errors;
449 drvs->rx_control_frames = port_stats->rx_control_frames;
450 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
451 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
452 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
453 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
454 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
455 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
456 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
457 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
458 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
459 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
460 drvs->rx_dropped_header_too_small =
461 port_stats->rx_dropped_header_too_small;
462 drvs->rx_input_fifo_overflow_drop =
463 port_stats->rx_input_fifo_overflow_drop;
464 drvs->rx_address_filtered = port_stats->rx_address_filtered;
465 drvs->rx_alignment_symbol_errors =
466 port_stats->rx_alignment_symbol_errors;
467 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
468 drvs->tx_pauseframes = port_stats->tx_pauseframes;
469 drvs->tx_controlframes = port_stats->tx_controlframes;
470 drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
471 drvs->jabber_events = port_stats->jabber_events;
472 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
473 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
474 drvs->forwarded_packets = rxf_stats->forwarded_packets;
475 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
476 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
477 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
478 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
748b539a 479 if (be_roce_supported(adapter)) {
461ae379
AK
480 drvs->rx_roce_bytes_lsd = port_stats->roce_bytes_received_lsd;
481 drvs->rx_roce_bytes_msd = port_stats->roce_bytes_received_msd;
482 drvs->rx_roce_frames = port_stats->roce_frames_received;
483 drvs->roce_drops_crc = port_stats->roce_drops_crc;
484 drvs->roce_drops_payload_len =
485 port_stats->roce_drops_payload_len;
486 }
61000861
AK
487}
488
005d5696
SX
489static void populate_lancer_stats(struct be_adapter *adapter)
490{
89a88ab8 491
005d5696 492 struct be_drv_stats *drvs = &adapter->drv_stats;
748b539a 493 struct lancer_pport_stats *pport_stats = pport_stats_from_cmd(adapter);
ac124ff9
SP
494
495 be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
496 drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
497 drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
498 drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
005d5696 499 drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
ac124ff9 500 drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
005d5696
SX
501 drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
502 drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
503 drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
504 drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
505 drvs->rx_dropped_tcp_length =
506 pport_stats->rx_dropped_invalid_tcp_length;
507 drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
508 drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
509 drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
510 drvs->rx_dropped_header_too_small =
511 pport_stats->rx_dropped_header_too_small;
512 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
18fb06a1
SR
513 drvs->rx_address_filtered =
514 pport_stats->rx_address_filtered +
515 pport_stats->rx_vlan_filtered;
ac124ff9 516 drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
005d5696 517 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
ac124ff9
SP
518 drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
519 drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
005d5696 520 drvs->jabber_events = pport_stats->rx_jabbers;
ac124ff9
SP
521 drvs->forwarded_packets = pport_stats->num_forwards_lo;
522 drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
005d5696 523 drvs->rx_drops_too_many_frags =
ac124ff9 524 pport_stats->rx_drops_too_many_frags_lo;
005d5696 525}
89a88ab8 526
09c1c68f
SP
527static void accumulate_16bit_val(u32 *acc, u16 val)
528{
529#define lo(x) (x & 0xFFFF)
530#define hi(x) (x & 0xFFFF0000)
531 bool wrapped = val < lo(*acc);
532 u32 newacc = hi(*acc) + val;
533
534 if (wrapped)
535 newacc += 65536;
536 ACCESS_ONCE(*acc) = newacc;
537}
538
4188e7df 539static void populate_erx_stats(struct be_adapter *adapter,
748b539a 540 struct be_rx_obj *rxo, u32 erx_stat)
a6c578ef
AK
541{
542 if (!BEx_chip(adapter))
543 rx_stats(rxo)->rx_drops_no_frags = erx_stat;
544 else
545 /* below erx HW counter can actually wrap around after
546 * 65535. Driver accumulates a 32-bit value
547 */
548 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
549 (u16)erx_stat);
550}
551
89a88ab8
AK
552void be_parse_stats(struct be_adapter *adapter)
553{
61000861 554 struct be_erx_stats_v2 *erx = be_erx_stats_from_cmd(adapter);
ac124ff9
SP
555 struct be_rx_obj *rxo;
556 int i;
a6c578ef 557 u32 erx_stat;
ac124ff9 558
ca34fe38
SP
559 if (lancer_chip(adapter)) {
560 populate_lancer_stats(adapter);
005d5696 561 } else {
ca34fe38
SP
562 if (BE2_chip(adapter))
563 populate_be_v0_stats(adapter);
61000861
AK
564 else if (BE3_chip(adapter))
565 /* for BE3 */
ca34fe38 566 populate_be_v1_stats(adapter);
61000861
AK
567 else
568 populate_be_v2_stats(adapter);
d51ebd33 569
61000861 570 /* erx_v2 is longer than v0, v1. use v2 for v0, v1 access */
ca34fe38 571 for_all_rx_queues(adapter, rxo, i) {
a6c578ef
AK
572 erx_stat = erx->rx_drops_no_fragments[rxo->q.id];
573 populate_erx_stats(adapter, rxo, erx_stat);
ca34fe38 574 }
09c1c68f 575 }
89a88ab8
AK
576}
577
ab1594e9 578static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
748b539a 579 struct rtnl_link_stats64 *stats)
6b7c5b94 580{
ab1594e9 581 struct be_adapter *adapter = netdev_priv(netdev);
89a88ab8 582 struct be_drv_stats *drvs = &adapter->drv_stats;
3abcdeda 583 struct be_rx_obj *rxo;
3c8def97 584 struct be_tx_obj *txo;
ab1594e9
SP
585 u64 pkts, bytes;
586 unsigned int start;
3abcdeda 587 int i;
6b7c5b94 588
3abcdeda 589 for_all_rx_queues(adapter, rxo, i) {
ab1594e9
SP
590 const struct be_rx_stats *rx_stats = rx_stats(rxo);
591 do {
57a7744e 592 start = u64_stats_fetch_begin_irq(&rx_stats->sync);
ab1594e9
SP
593 pkts = rx_stats(rxo)->rx_pkts;
594 bytes = rx_stats(rxo)->rx_bytes;
57a7744e 595 } while (u64_stats_fetch_retry_irq(&rx_stats->sync, start));
ab1594e9
SP
596 stats->rx_packets += pkts;
597 stats->rx_bytes += bytes;
598 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
599 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
600 rx_stats(rxo)->rx_drops_no_frags;
3abcdeda
SP
601 }
602
3c8def97 603 for_all_tx_queues(adapter, txo, i) {
ab1594e9
SP
604 const struct be_tx_stats *tx_stats = tx_stats(txo);
605 do {
57a7744e 606 start = u64_stats_fetch_begin_irq(&tx_stats->sync);
ab1594e9
SP
607 pkts = tx_stats(txo)->tx_pkts;
608 bytes = tx_stats(txo)->tx_bytes;
57a7744e 609 } while (u64_stats_fetch_retry_irq(&tx_stats->sync, start));
ab1594e9
SP
610 stats->tx_packets += pkts;
611 stats->tx_bytes += bytes;
3c8def97 612 }
6b7c5b94
SP
613
614 /* bad pkts received */
ab1594e9 615 stats->rx_errors = drvs->rx_crc_errors +
89a88ab8
AK
616 drvs->rx_alignment_symbol_errors +
617 drvs->rx_in_range_errors +
618 drvs->rx_out_range_errors +
619 drvs->rx_frame_too_long +
620 drvs->rx_dropped_too_small +
621 drvs->rx_dropped_too_short +
622 drvs->rx_dropped_header_too_small +
623 drvs->rx_dropped_tcp_length +
ab1594e9 624 drvs->rx_dropped_runt;
68110868 625
6b7c5b94 626 /* detailed rx errors */
ab1594e9 627 stats->rx_length_errors = drvs->rx_in_range_errors +
89a88ab8
AK
628 drvs->rx_out_range_errors +
629 drvs->rx_frame_too_long;
68110868 630
ab1594e9 631 stats->rx_crc_errors = drvs->rx_crc_errors;
6b7c5b94
SP
632
633 /* frame alignment errors */
ab1594e9 634 stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
68110868 635
6b7c5b94
SP
636 /* receiver fifo overrun */
637 /* drops_no_pbuf is no per i/f, it's per BE card */
ab1594e9 638 stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
89a88ab8
AK
639 drvs->rx_input_fifo_overflow_drop +
640 drvs->rx_drops_no_pbuf;
ab1594e9 641 return stats;
6b7c5b94
SP
642}
643
b236916a 644void be_link_status_update(struct be_adapter *adapter, u8 link_status)
6b7c5b94 645{
6b7c5b94
SP
646 struct net_device *netdev = adapter->netdev;
647
b236916a 648 if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
ea172a01 649 netif_carrier_off(netdev);
b236916a 650 adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
6b7c5b94 651 }
b236916a 652
bdce2ad7 653 if (link_status)
b236916a
AK
654 netif_carrier_on(netdev);
655 else
656 netif_carrier_off(netdev);
6b7c5b94
SP
657}
658
3c8def97 659static void be_tx_stats_update(struct be_tx_obj *txo,
748b539a
SP
660 u32 wrb_cnt, u32 copied, u32 gso_segs,
661 bool stopped)
6b7c5b94 662{
3c8def97
SP
663 struct be_tx_stats *stats = tx_stats(txo);
664
ab1594e9 665 u64_stats_update_begin(&stats->sync);
ac124ff9
SP
666 stats->tx_reqs++;
667 stats->tx_wrbs += wrb_cnt;
668 stats->tx_bytes += copied;
669 stats->tx_pkts += (gso_segs ? gso_segs : 1);
6b7c5b94 670 if (stopped)
ac124ff9 671 stats->tx_stops++;
ab1594e9 672 u64_stats_update_end(&stats->sync);
6b7c5b94
SP
673}
674
675/* Determine number of WRB entries needed to xmit data in an skb */
fe6d2a38 676static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
748b539a 677 bool *dummy)
6b7c5b94 678{
ebc8d2ab
DM
679 int cnt = (skb->len > skb->data_len);
680
681 cnt += skb_shinfo(skb)->nr_frags;
682
6b7c5b94
SP
683 /* to account for hdr wrb */
684 cnt++;
fe6d2a38
SP
685 if (lancer_chip(adapter) || !(cnt & 1)) {
686 *dummy = false;
687 } else {
6b7c5b94
SP
688 /* add a dummy to make it an even num */
689 cnt++;
690 *dummy = true;
fe6d2a38 691 }
6b7c5b94
SP
692 BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
693 return cnt;
694}
695
696static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
697{
698 wrb->frag_pa_hi = upper_32_bits(addr);
699 wrb->frag_pa_lo = addr & 0xFFFFFFFF;
700 wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
89b1f496 701 wrb->rsvd0 = 0;
6b7c5b94
SP
702}
703
1ded132d 704static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
748b539a 705 struct sk_buff *skb)
1ded132d
AK
706{
707 u8 vlan_prio;
708 u16 vlan_tag;
709
710 vlan_tag = vlan_tx_tag_get(skb);
711 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
712 /* If vlan priority provided by OS is NOT in available bmap */
713 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
714 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
715 adapter->recommended_prio;
716
717 return vlan_tag;
718}
719
c9c47142
SP
720/* Used only for IP tunnel packets */
721static u16 skb_inner_ip_proto(struct sk_buff *skb)
722{
723 return (inner_ip_hdr(skb)->version == 4) ?
724 inner_ip_hdr(skb)->protocol : inner_ipv6_hdr(skb)->nexthdr;
725}
726
727static u16 skb_ip_proto(struct sk_buff *skb)
728{
729 return (ip_hdr(skb)->version == 4) ?
730 ip_hdr(skb)->protocol : ipv6_hdr(skb)->nexthdr;
731}
732
cc4ce020 733static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
748b539a
SP
734 struct sk_buff *skb, u32 wrb_cnt, u32 len,
735 bool skip_hw_vlan)
6b7c5b94 736{
c9c47142 737 u16 vlan_tag, proto;
cc4ce020 738
6b7c5b94
SP
739 memset(hdr, 0, sizeof(*hdr));
740
741 AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
742
49e4b847 743 if (skb_is_gso(skb)) {
6b7c5b94
SP
744 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
745 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
746 hdr, skb_shinfo(skb)->gso_size);
fe6d2a38 747 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
49e4b847 748 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
6b7c5b94 749 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
c9c47142
SP
750 if (skb->encapsulation) {
751 AMAP_SET_BITS(struct amap_eth_hdr_wrb, ipcs, hdr, 1);
752 proto = skb_inner_ip_proto(skb);
753 } else {
754 proto = skb_ip_proto(skb);
755 }
756 if (proto == IPPROTO_TCP)
6b7c5b94 757 AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
c9c47142 758 else if (proto == IPPROTO_UDP)
6b7c5b94
SP
759 AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
760 }
761
4c5102f9 762 if (vlan_tx_tag_present(skb)) {
6b7c5b94 763 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
1ded132d 764 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
cc4ce020 765 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
6b7c5b94
SP
766 }
767
bc0c3405
AK
768 /* To skip HW VLAN tagging: evt = 1, compl = 0 */
769 AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, !skip_hw_vlan);
6b7c5b94 770 AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
6b7c5b94
SP
771 AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
772 AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
773}
774
2b7bcebf 775static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
748b539a 776 bool unmap_single)
7101e111
SP
777{
778 dma_addr_t dma;
779
780 be_dws_le_to_cpu(wrb, sizeof(*wrb));
781
782 dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
b681ee77 783 if (wrb->frag_len) {
7101e111 784 if (unmap_single)
2b7bcebf
IV
785 dma_unmap_single(dev, dma, wrb->frag_len,
786 DMA_TO_DEVICE);
7101e111 787 else
2b7bcebf 788 dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
7101e111
SP
789 }
790}
6b7c5b94 791
3c8def97 792static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
748b539a
SP
793 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb,
794 bool skip_hw_vlan)
6b7c5b94 795{
7101e111
SP
796 dma_addr_t busaddr;
797 int i, copied = 0;
2b7bcebf 798 struct device *dev = &adapter->pdev->dev;
6b7c5b94 799 struct sk_buff *first_skb = skb;
6b7c5b94
SP
800 struct be_eth_wrb *wrb;
801 struct be_eth_hdr_wrb *hdr;
7101e111
SP
802 bool map_single = false;
803 u16 map_head;
6b7c5b94 804
6b7c5b94
SP
805 hdr = queue_head_node(txq);
806 queue_head_inc(txq);
7101e111 807 map_head = txq->head;
6b7c5b94 808
ebc8d2ab 809 if (skb->len > skb->data_len) {
e743d313 810 int len = skb_headlen(skb);
2b7bcebf
IV
811 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
812 if (dma_mapping_error(dev, busaddr))
7101e111
SP
813 goto dma_err;
814 map_single = true;
ebc8d2ab
DM
815 wrb = queue_head_node(txq);
816 wrb_fill(wrb, busaddr, len);
817 be_dws_cpu_to_le(wrb, sizeof(*wrb));
818 queue_head_inc(txq);
819 copied += len;
820 }
6b7c5b94 821
ebc8d2ab 822 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
748b539a 823 const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
b061b39e 824 busaddr = skb_frag_dma_map(dev, frag, 0,
9e903e08 825 skb_frag_size(frag), DMA_TO_DEVICE);
2b7bcebf 826 if (dma_mapping_error(dev, busaddr))
7101e111 827 goto dma_err;
ebc8d2ab 828 wrb = queue_head_node(txq);
9e903e08 829 wrb_fill(wrb, busaddr, skb_frag_size(frag));
ebc8d2ab
DM
830 be_dws_cpu_to_le(wrb, sizeof(*wrb));
831 queue_head_inc(txq);
9e903e08 832 copied += skb_frag_size(frag);
6b7c5b94
SP
833 }
834
835 if (dummy_wrb) {
836 wrb = queue_head_node(txq);
837 wrb_fill(wrb, 0, 0);
838 be_dws_cpu_to_le(wrb, sizeof(*wrb));
839 queue_head_inc(txq);
840 }
841
bc0c3405 842 wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied, skip_hw_vlan);
6b7c5b94
SP
843 be_dws_cpu_to_le(hdr, sizeof(*hdr));
844
845 return copied;
7101e111
SP
846dma_err:
847 txq->head = map_head;
848 while (copied) {
849 wrb = queue_head_node(txq);
2b7bcebf 850 unmap_tx_frag(dev, wrb, map_single);
7101e111
SP
851 map_single = false;
852 copied -= wrb->frag_len;
853 queue_head_inc(txq);
854 }
855 return 0;
6b7c5b94
SP
856}
857
93040ae5 858static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
bc0c3405
AK
859 struct sk_buff *skb,
860 bool *skip_hw_vlan)
93040ae5
SK
861{
862 u16 vlan_tag = 0;
863
864 skb = skb_share_check(skb, GFP_ATOMIC);
865 if (unlikely(!skb))
866 return skb;
867
efee8e87 868 if (vlan_tx_tag_present(skb))
93040ae5 869 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
52fe29e4
SB
870
871 if (qnq_async_evt_rcvd(adapter) && adapter->pvid) {
872 if (!vlan_tag)
873 vlan_tag = adapter->pvid;
874 /* f/w workaround to set skip_hw_vlan = 1, informs the F/W to
875 * skip VLAN insertion
876 */
877 if (skip_hw_vlan)
878 *skip_hw_vlan = true;
879 }
bc0c3405
AK
880
881 if (vlan_tag) {
58717686 882 skb = __vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
bc0c3405
AK
883 if (unlikely(!skb))
884 return skb;
bc0c3405
AK
885 skb->vlan_tci = 0;
886 }
887
888 /* Insert the outer VLAN, if any */
889 if (adapter->qnq_vid) {
890 vlan_tag = adapter->qnq_vid;
58717686 891 skb = __vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
bc0c3405
AK
892 if (unlikely(!skb))
893 return skb;
894 if (skip_hw_vlan)
895 *skip_hw_vlan = true;
896 }
897
93040ae5
SK
898 return skb;
899}
900
bc0c3405
AK
901static bool be_ipv6_exthdr_check(struct sk_buff *skb)
902{
903 struct ethhdr *eh = (struct ethhdr *)skb->data;
904 u16 offset = ETH_HLEN;
905
906 if (eh->h_proto == htons(ETH_P_IPV6)) {
907 struct ipv6hdr *ip6h = (struct ipv6hdr *)(skb->data + offset);
908
909 offset += sizeof(struct ipv6hdr);
910 if (ip6h->nexthdr != NEXTHDR_TCP &&
911 ip6h->nexthdr != NEXTHDR_UDP) {
912 struct ipv6_opt_hdr *ehdr =
913 (struct ipv6_opt_hdr *) (skb->data + offset);
914
915 /* offending pkt: 2nd byte following IPv6 hdr is 0xff */
916 if (ehdr->hdrlen == 0xff)
917 return true;
918 }
919 }
920 return false;
921}
922
923static int be_vlan_tag_tx_chk(struct be_adapter *adapter, struct sk_buff *skb)
924{
925 return vlan_tx_tag_present(skb) || adapter->pvid || adapter->qnq_vid;
926}
927
748b539a 928static int be_ipv6_tx_stall_chk(struct be_adapter *adapter, struct sk_buff *skb)
bc0c3405 929{
ee9c799c 930 return BE3_chip(adapter) && be_ipv6_exthdr_check(skb);
bc0c3405
AK
931}
932
ec495fac
VV
933static struct sk_buff *be_lancer_xmit_workarounds(struct be_adapter *adapter,
934 struct sk_buff *skb,
935 bool *skip_hw_vlan)
6b7c5b94 936{
d2cb6ce7 937 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
ee9c799c
SP
938 unsigned int eth_hdr_len;
939 struct iphdr *ip;
93040ae5 940
1297f9db
AK
941 /* For padded packets, BE HW modifies tot_len field in IP header
942 * incorrecly when VLAN tag is inserted by HW.
3904dcc4 943 * For padded packets, Lancer computes incorrect checksum.
1ded132d 944 */
ee9c799c
SP
945 eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
946 VLAN_ETH_HLEN : ETH_HLEN;
3904dcc4
SK
947 if (skb->len <= 60 &&
948 (lancer_chip(adapter) || vlan_tx_tag_present(skb)) &&
ee9c799c 949 is_ipv4_pkt(skb)) {
93040ae5
SK
950 ip = (struct iphdr *)ip_hdr(skb);
951 pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len));
952 }
1ded132d 953
d2cb6ce7 954 /* If vlan tag is already inlined in the packet, skip HW VLAN
f93f160b 955 * tagging in pvid-tagging mode
d2cb6ce7 956 */
f93f160b 957 if (be_pvid_tagging_enabled(adapter) &&
d2cb6ce7 958 veh->h_vlan_proto == htons(ETH_P_8021Q))
748b539a 959 *skip_hw_vlan = true;
d2cb6ce7 960
93040ae5
SK
961 /* HW has a bug wherein it will calculate CSUM for VLAN
962 * pkts even though it is disabled.
963 * Manually insert VLAN in pkt.
964 */
965 if (skb->ip_summed != CHECKSUM_PARTIAL &&
ee9c799c
SP
966 vlan_tx_tag_present(skb)) {
967 skb = be_insert_vlan_in_pkt(adapter, skb, skip_hw_vlan);
bc0c3405 968 if (unlikely(!skb))
c9128951 969 goto err;
bc0c3405
AK
970 }
971
972 /* HW may lockup when VLAN HW tagging is requested on
973 * certain ipv6 packets. Drop such pkts if the HW workaround to
974 * skip HW tagging is not enabled by FW.
975 */
976 if (unlikely(be_ipv6_tx_stall_chk(adapter, skb) &&
ee9c799c
SP
977 (adapter->pvid || adapter->qnq_vid) &&
978 !qnq_async_evt_rcvd(adapter)))
bc0c3405
AK
979 goto tx_drop;
980
981 /* Manual VLAN tag insertion to prevent:
982 * ASIC lockup when the ASIC inserts VLAN tag into
983 * certain ipv6 packets. Insert VLAN tags in driver,
984 * and set event, completion, vlan bits accordingly
985 * in the Tx WRB.
986 */
987 if (be_ipv6_tx_stall_chk(adapter, skb) &&
988 be_vlan_tag_tx_chk(adapter, skb)) {
ee9c799c 989 skb = be_insert_vlan_in_pkt(adapter, skb, skip_hw_vlan);
1ded132d 990 if (unlikely(!skb))
c9128951 991 goto err;
1ded132d
AK
992 }
993
ee9c799c
SP
994 return skb;
995tx_drop:
996 dev_kfree_skb_any(skb);
c9128951 997err:
ee9c799c
SP
998 return NULL;
999}
1000
ec495fac
VV
1001static struct sk_buff *be_xmit_workarounds(struct be_adapter *adapter,
1002 struct sk_buff *skb,
1003 bool *skip_hw_vlan)
1004{
1005 /* Lancer, SH-R ASICs have a bug wherein Packets that are 32 bytes or
1006 * less may cause a transmit stall on that port. So the work-around is
1007 * to pad short packets (<= 32 bytes) to a 36-byte length.
1008 */
1009 if (unlikely(!BEx_chip(adapter) && skb->len <= 32)) {
1010 if (skb_padto(skb, 36))
1011 return NULL;
1012 skb->len = 36;
1013 }
1014
1015 if (BEx_chip(adapter) || lancer_chip(adapter)) {
1016 skb = be_lancer_xmit_workarounds(adapter, skb, skip_hw_vlan);
1017 if (!skb)
1018 return NULL;
1019 }
1020
1021 return skb;
1022}
1023
ee9c799c
SP
1024static netdev_tx_t be_xmit(struct sk_buff *skb, struct net_device *netdev)
1025{
1026 struct be_adapter *adapter = netdev_priv(netdev);
1027 struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
1028 struct be_queue_info *txq = &txo->q;
1029 bool dummy_wrb, stopped = false;
1030 u32 wrb_cnt = 0, copied = 0;
1031 bool skip_hw_vlan = false;
1032 u32 start = txq->head;
1033
1034 skb = be_xmit_workarounds(adapter, skb, &skip_hw_vlan);
bc617526
SP
1035 if (!skb) {
1036 tx_stats(txo)->tx_drv_drops++;
ee9c799c 1037 return NETDEV_TX_OK;
bc617526 1038 }
ee9c799c 1039
fe6d2a38 1040 wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
6b7c5b94 1041
bc0c3405
AK
1042 copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb,
1043 skip_hw_vlan);
c190e3c8 1044 if (copied) {
cd8f76c0
ED
1045 int gso_segs = skb_shinfo(skb)->gso_segs;
1046
c190e3c8 1047 /* record the sent skb in the sent_skb table */
3c8def97
SP
1048 BUG_ON(txo->sent_skb_list[start]);
1049 txo->sent_skb_list[start] = skb;
c190e3c8
AK
1050
1051 /* Ensure txq has space for the next skb; Else stop the queue
1052 * *BEFORE* ringing the tx doorbell, so that we serialze the
1053 * tx compls of the current transmit which'll wake up the queue
1054 */
7101e111 1055 atomic_add(wrb_cnt, &txq->used);
c190e3c8
AK
1056 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
1057 txq->len) {
3c8def97 1058 netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
c190e3c8
AK
1059 stopped = true;
1060 }
6b7c5b94 1061
94d73aaa 1062 be_txq_notify(adapter, txo, wrb_cnt);
6b7c5b94 1063
cd8f76c0 1064 be_tx_stats_update(txo, wrb_cnt, copied, gso_segs, stopped);
c190e3c8
AK
1065 } else {
1066 txq->head = start;
bc617526 1067 tx_stats(txo)->tx_drv_drops++;
c190e3c8 1068 dev_kfree_skb_any(skb);
6b7c5b94 1069 }
6b7c5b94
SP
1070 return NETDEV_TX_OK;
1071}
1072
1073static int be_change_mtu(struct net_device *netdev, int new_mtu)
1074{
1075 struct be_adapter *adapter = netdev_priv(netdev);
1076 if (new_mtu < BE_MIN_MTU ||
748b539a 1077 new_mtu > (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN))) {
6b7c5b94 1078 dev_info(&adapter->pdev->dev,
748b539a
SP
1079 "MTU must be between %d and %d bytes\n",
1080 BE_MIN_MTU,
1081 (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
6b7c5b94
SP
1082 return -EINVAL;
1083 }
1084 dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
748b539a 1085 netdev->mtu, new_mtu);
6b7c5b94
SP
1086 netdev->mtu = new_mtu;
1087 return 0;
1088}
1089
1090/*
82903e4b
AK
1091 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
1092 * If the user configures more, place BE in vlan promiscuous mode.
6b7c5b94 1093 */
10329df8 1094static int be_vid_config(struct be_adapter *adapter)
6b7c5b94 1095{
10329df8 1096 u16 vids[BE_NUM_VLANS_SUPPORTED];
f6cbd364 1097 u16 num = 0, i = 0;
82903e4b 1098 int status = 0;
1da87b7f 1099
c0e64ef4
SP
1100 /* No need to further configure vids if in promiscuous mode */
1101 if (adapter->promiscuous)
1102 return 0;
1103
92bf14ab 1104 if (adapter->vlans_added > be_max_vlans(adapter))
0fc16ebf
PR
1105 goto set_vlan_promisc;
1106
1107 /* Construct VLAN Table to give to HW */
f6cbd364
RN
1108 for_each_set_bit(i, adapter->vids, VLAN_N_VID)
1109 vids[num++] = cpu_to_le16(i);
0fc16ebf 1110
4d567d97 1111 status = be_cmd_vlan_config(adapter, adapter->if_handle, vids, num);
0fc16ebf 1112 if (status) {
d9d604f8 1113 /* Set to VLAN promisc mode as setting VLAN filter failed */
4c60005f
KA
1114 if (addl_status(status) ==
1115 MCC_ADDL_STATUS_INSUFFICIENT_RESOURCES)
d9d604f8
AK
1116 goto set_vlan_promisc;
1117 dev_err(&adapter->pdev->dev,
1118 "Setting HW VLAN filtering failed.\n");
1119 } else {
1120 if (adapter->flags & BE_FLAGS_VLAN_PROMISC) {
1121 /* hw VLAN filtering re-enabled. */
1122 status = be_cmd_rx_filter(adapter,
1123 BE_FLAGS_VLAN_PROMISC, OFF);
1124 if (!status) {
1125 dev_info(&adapter->pdev->dev,
1126 "Disabling VLAN Promiscuous mode.\n");
1127 adapter->flags &= ~BE_FLAGS_VLAN_PROMISC;
d9d604f8
AK
1128 }
1129 }
6b7c5b94 1130 }
1da87b7f 1131
b31c50a7 1132 return status;
0fc16ebf
PR
1133
1134set_vlan_promisc:
a6b74e01
SK
1135 if (adapter->flags & BE_FLAGS_VLAN_PROMISC)
1136 return 0;
d9d604f8
AK
1137
1138 status = be_cmd_rx_filter(adapter, BE_FLAGS_VLAN_PROMISC, ON);
1139 if (!status) {
1140 dev_info(&adapter->pdev->dev, "Enable VLAN Promiscuous mode\n");
d9d604f8
AK
1141 adapter->flags |= BE_FLAGS_VLAN_PROMISC;
1142 } else
1143 dev_err(&adapter->pdev->dev,
1144 "Failed to enable VLAN Promiscuous mode.\n");
0fc16ebf 1145 return status;
6b7c5b94
SP
1146}
1147
80d5c368 1148static int be_vlan_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
6b7c5b94
SP
1149{
1150 struct be_adapter *adapter = netdev_priv(netdev);
80817cbf 1151 int status = 0;
6b7c5b94 1152
a85e9986
PR
1153 /* Packets with VID 0 are always received by Lancer by default */
1154 if (lancer_chip(adapter) && vid == 0)
48291c22
VV
1155 return status;
1156
f6cbd364 1157 if (test_bit(vid, adapter->vids))
48291c22 1158 return status;
a85e9986 1159
f6cbd364 1160 set_bit(vid, adapter->vids);
a6b74e01 1161 adapter->vlans_added++;
8e586137 1162
a6b74e01
SK
1163 status = be_vid_config(adapter);
1164 if (status) {
1165 adapter->vlans_added--;
f6cbd364 1166 clear_bit(vid, adapter->vids);
a6b74e01 1167 }
48291c22 1168
80817cbf 1169 return status;
6b7c5b94
SP
1170}
1171
80d5c368 1172static int be_vlan_rem_vid(struct net_device *netdev, __be16 proto, u16 vid)
6b7c5b94
SP
1173{
1174 struct be_adapter *adapter = netdev_priv(netdev);
1175
a85e9986
PR
1176 /* Packets with VID 0 are always received by Lancer by default */
1177 if (lancer_chip(adapter) && vid == 0)
9d4dfe4a 1178 return 0;
a85e9986 1179
f6cbd364 1180 clear_bit(vid, adapter->vids);
9d4dfe4a
KA
1181 adapter->vlans_added--;
1182
1183 return be_vid_config(adapter);
6b7c5b94
SP
1184}
1185
7ad09458
S
1186static void be_clear_promisc(struct be_adapter *adapter)
1187{
1188 adapter->promiscuous = false;
a0794885 1189 adapter->flags &= ~(BE_FLAGS_VLAN_PROMISC | BE_FLAGS_MCAST_PROMISC);
7ad09458
S
1190
1191 be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
1192}
1193
a54769f5 1194static void be_set_rx_mode(struct net_device *netdev)
6b7c5b94
SP
1195{
1196 struct be_adapter *adapter = netdev_priv(netdev);
0fc16ebf 1197 int status;
6b7c5b94 1198
24307eef 1199 if (netdev->flags & IFF_PROMISC) {
5b8821b7 1200 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
24307eef
SP
1201 adapter->promiscuous = true;
1202 goto done;
6b7c5b94
SP
1203 }
1204
25985edc 1205 /* BE was previously in promiscuous mode; disable it */
24307eef 1206 if (adapter->promiscuous) {
7ad09458 1207 be_clear_promisc(adapter);
c0e64ef4 1208 if (adapter->vlans_added)
10329df8 1209 be_vid_config(adapter);
6b7c5b94
SP
1210 }
1211
e7b909a6 1212 /* Enable multicast promisc if num configured exceeds what we support */
4cd24eaf 1213 if (netdev->flags & IFF_ALLMULTI ||
a0794885
KA
1214 netdev_mc_count(netdev) > be_max_mc(adapter))
1215 goto set_mcast_promisc;
6b7c5b94 1216
fbc13f01
AK
1217 if (netdev_uc_count(netdev) != adapter->uc_macs) {
1218 struct netdev_hw_addr *ha;
1219 int i = 1; /* First slot is claimed by the Primary MAC */
1220
1221 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++) {
1222 be_cmd_pmac_del(adapter, adapter->if_handle,
1223 adapter->pmac_id[i], 0);
1224 }
1225
92bf14ab 1226 if (netdev_uc_count(netdev) > be_max_uc(adapter)) {
fbc13f01
AK
1227 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
1228 adapter->promiscuous = true;
1229 goto done;
1230 }
1231
1232 netdev_for_each_uc_addr(ha, adapter->netdev) {
1233 adapter->uc_macs++; /* First slot is for Primary MAC */
1234 be_cmd_pmac_add(adapter, (u8 *)ha->addr,
1235 adapter->if_handle,
1236 &adapter->pmac_id[adapter->uc_macs], 0);
1237 }
1238 }
1239
0fc16ebf 1240 status = be_cmd_rx_filter(adapter, IFF_MULTICAST, ON);
a0794885
KA
1241 if (!status) {
1242 if (adapter->flags & BE_FLAGS_MCAST_PROMISC)
1243 adapter->flags &= ~BE_FLAGS_MCAST_PROMISC;
1244 goto done;
0fc16ebf 1245 }
a0794885
KA
1246
1247set_mcast_promisc:
1248 if (adapter->flags & BE_FLAGS_MCAST_PROMISC)
1249 return;
1250
1251 /* Set to MCAST promisc mode if setting MULTICAST address fails
1252 * or if num configured exceeds what we support
1253 */
1254 status = be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
1255 if (!status)
1256 adapter->flags |= BE_FLAGS_MCAST_PROMISC;
24307eef
SP
1257done:
1258 return;
6b7c5b94
SP
1259}
1260
ba343c77
SB
1261static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
1262{
1263 struct be_adapter *adapter = netdev_priv(netdev);
11ac75ed 1264 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
ba343c77
SB
1265 int status;
1266
11ac75ed 1267 if (!sriov_enabled(adapter))
ba343c77
SB
1268 return -EPERM;
1269
11ac75ed 1270 if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
ba343c77
SB
1271 return -EINVAL;
1272
3175d8c2
SP
1273 if (BEx_chip(adapter)) {
1274 be_cmd_pmac_del(adapter, vf_cfg->if_handle, vf_cfg->pmac_id,
1275 vf + 1);
ba343c77 1276
11ac75ed
SP
1277 status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
1278 &vf_cfg->pmac_id, vf + 1);
3175d8c2
SP
1279 } else {
1280 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
1281 vf + 1);
590c391d
PR
1282 }
1283
64600ea5 1284 if (status)
ba343c77 1285 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
748b539a 1286 mac, vf);
64600ea5 1287 else
11ac75ed 1288 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
64600ea5 1289
ba343c77
SB
1290 return status;
1291}
1292
64600ea5 1293static int be_get_vf_config(struct net_device *netdev, int vf,
748b539a 1294 struct ifla_vf_info *vi)
64600ea5
AK
1295{
1296 struct be_adapter *adapter = netdev_priv(netdev);
11ac75ed 1297 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
64600ea5 1298
11ac75ed 1299 if (!sriov_enabled(adapter))
64600ea5
AK
1300 return -EPERM;
1301
11ac75ed 1302 if (vf >= adapter->num_vfs)
64600ea5
AK
1303 return -EINVAL;
1304
1305 vi->vf = vf;
ed616689
SC
1306 vi->max_tx_rate = vf_cfg->tx_rate;
1307 vi->min_tx_rate = 0;
a60b3a13
AK
1308 vi->vlan = vf_cfg->vlan_tag & VLAN_VID_MASK;
1309 vi->qos = vf_cfg->vlan_tag >> VLAN_PRIO_SHIFT;
11ac75ed 1310 memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
bdce2ad7 1311 vi->linkstate = adapter->vf_cfg[vf].plink_tracking;
64600ea5
AK
1312
1313 return 0;
1314}
1315
748b539a 1316static int be_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos)
1da87b7f
AK
1317{
1318 struct be_adapter *adapter = netdev_priv(netdev);
b9fc0e53 1319 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1da87b7f
AK
1320 int status = 0;
1321
11ac75ed 1322 if (!sriov_enabled(adapter))
1da87b7f
AK
1323 return -EPERM;
1324
b9fc0e53 1325 if (vf >= adapter->num_vfs || vlan > 4095 || qos > 7)
1da87b7f
AK
1326 return -EINVAL;
1327
b9fc0e53
AK
1328 if (vlan || qos) {
1329 vlan |= qos << VLAN_PRIO_SHIFT;
c502224e 1330 if (vf_cfg->vlan_tag != vlan)
b9fc0e53
AK
1331 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
1332 vf_cfg->if_handle, 0);
1da87b7f 1333 } else {
f1f3ee1b 1334 /* Reset Transparent Vlan Tagging. */
c502224e
SK
1335 status = be_cmd_set_hsw_config(adapter, BE_RESET_VLAN_TAG_ID,
1336 vf + 1, vf_cfg->if_handle, 0);
1da87b7f
AK
1337 }
1338
c502224e
SK
1339 if (!status)
1340 vf_cfg->vlan_tag = vlan;
1341 else
1da87b7f 1342 dev_info(&adapter->pdev->dev,
c502224e 1343 "VLAN %d config on VF %d failed\n", vlan, vf);
1da87b7f
AK
1344 return status;
1345}
1346
ed616689
SC
1347static int be_set_vf_tx_rate(struct net_device *netdev, int vf,
1348 int min_tx_rate, int max_tx_rate)
e1d18735
AK
1349{
1350 struct be_adapter *adapter = netdev_priv(netdev);
0f77ba73
RN
1351 struct device *dev = &adapter->pdev->dev;
1352 int percent_rate, status = 0;
1353 u16 link_speed = 0;
1354 u8 link_status;
e1d18735 1355
11ac75ed 1356 if (!sriov_enabled(adapter))
e1d18735
AK
1357 return -EPERM;
1358
94f434c2 1359 if (vf >= adapter->num_vfs)
e1d18735
AK
1360 return -EINVAL;
1361
ed616689
SC
1362 if (min_tx_rate)
1363 return -EINVAL;
1364
0f77ba73
RN
1365 if (!max_tx_rate)
1366 goto config_qos;
1367
1368 status = be_cmd_link_status_query(adapter, &link_speed,
1369 &link_status, 0);
1370 if (status)
1371 goto err;
1372
1373 if (!link_status) {
1374 dev_err(dev, "TX-rate setting not allowed when link is down\n");
1375 status = -EPERM;
1376 goto err;
1377 }
1378
1379 if (max_tx_rate < 100 || max_tx_rate > link_speed) {
1380 dev_err(dev, "TX-rate must be between 100 and %d Mbps\n",
1381 link_speed);
1382 status = -EINVAL;
1383 goto err;
1384 }
1385
1386 /* On Skyhawk the QOS setting must be done only as a % value */
1387 percent_rate = link_speed / 100;
1388 if (skyhawk_chip(adapter) && (max_tx_rate % percent_rate)) {
1389 dev_err(dev, "TX-rate must be a multiple of %d Mbps\n",
1390 percent_rate);
1391 status = -EINVAL;
1392 goto err;
94f434c2 1393 }
e1d18735 1394
0f77ba73
RN
1395config_qos:
1396 status = be_cmd_config_qos(adapter, max_tx_rate, link_speed, vf + 1);
e1d18735 1397 if (status)
0f77ba73
RN
1398 goto err;
1399
1400 adapter->vf_cfg[vf].tx_rate = max_tx_rate;
1401 return 0;
1402
1403err:
1404 dev_err(dev, "TX-rate setting of %dMbps on VF%d failed\n",
1405 max_tx_rate, vf);
e1d18735
AK
1406 return status;
1407}
bdce2ad7
SR
1408static int be_set_vf_link_state(struct net_device *netdev, int vf,
1409 int link_state)
1410{
1411 struct be_adapter *adapter = netdev_priv(netdev);
1412 int status;
1413
1414 if (!sriov_enabled(adapter))
1415 return -EPERM;
1416
1417 if (vf >= adapter->num_vfs)
1418 return -EINVAL;
1419
1420 status = be_cmd_set_logical_link_config(adapter, link_state, vf+1);
1421 if (!status)
1422 adapter->vf_cfg[vf].plink_tracking = link_state;
1423
1424 return status;
1425}
e1d18735 1426
2632bafd
SP
1427static void be_aic_update(struct be_aic_obj *aic, u64 rx_pkts, u64 tx_pkts,
1428 ulong now)
6b7c5b94 1429{
2632bafd
SP
1430 aic->rx_pkts_prev = rx_pkts;
1431 aic->tx_reqs_prev = tx_pkts;
1432 aic->jiffies = now;
1433}
ac124ff9 1434
2632bafd
SP
1435static void be_eqd_update(struct be_adapter *adapter)
1436{
1437 struct be_set_eqd set_eqd[MAX_EVT_QS];
1438 int eqd, i, num = 0, start;
1439 struct be_aic_obj *aic;
1440 struct be_eq_obj *eqo;
1441 struct be_rx_obj *rxo;
1442 struct be_tx_obj *txo;
1443 u64 rx_pkts, tx_pkts;
1444 ulong now;
1445 u32 pps, delta;
10ef9ab4 1446
2632bafd
SP
1447 for_all_evt_queues(adapter, eqo, i) {
1448 aic = &adapter->aic_obj[eqo->idx];
1449 if (!aic->enable) {
1450 if (aic->jiffies)
1451 aic->jiffies = 0;
1452 eqd = aic->et_eqd;
1453 goto modify_eqd;
1454 }
6b7c5b94 1455
2632bafd
SP
1456 rxo = &adapter->rx_obj[eqo->idx];
1457 do {
57a7744e 1458 start = u64_stats_fetch_begin_irq(&rxo->stats.sync);
2632bafd 1459 rx_pkts = rxo->stats.rx_pkts;
57a7744e 1460 } while (u64_stats_fetch_retry_irq(&rxo->stats.sync, start));
10ef9ab4 1461
2632bafd
SP
1462 txo = &adapter->tx_obj[eqo->idx];
1463 do {
57a7744e 1464 start = u64_stats_fetch_begin_irq(&txo->stats.sync);
2632bafd 1465 tx_pkts = txo->stats.tx_reqs;
57a7744e 1466 } while (u64_stats_fetch_retry_irq(&txo->stats.sync, start));
6b7c5b94 1467
6b7c5b94 1468
2632bafd
SP
1469 /* Skip, if wrapped around or first calculation */
1470 now = jiffies;
1471 if (!aic->jiffies || time_before(now, aic->jiffies) ||
1472 rx_pkts < aic->rx_pkts_prev ||
1473 tx_pkts < aic->tx_reqs_prev) {
1474 be_aic_update(aic, rx_pkts, tx_pkts, now);
1475 continue;
1476 }
1477
1478 delta = jiffies_to_msecs(now - aic->jiffies);
1479 pps = (((u32)(rx_pkts - aic->rx_pkts_prev) * 1000) / delta) +
1480 (((u32)(tx_pkts - aic->tx_reqs_prev) * 1000) / delta);
1481 eqd = (pps / 15000) << 2;
10ef9ab4 1482
2632bafd
SP
1483 if (eqd < 8)
1484 eqd = 0;
1485 eqd = min_t(u32, eqd, aic->max_eqd);
1486 eqd = max_t(u32, eqd, aic->min_eqd);
1487
1488 be_aic_update(aic, rx_pkts, tx_pkts, now);
10ef9ab4 1489modify_eqd:
2632bafd
SP
1490 if (eqd != aic->prev_eqd) {
1491 set_eqd[num].delay_multiplier = (eqd * 65)/100;
1492 set_eqd[num].eq_id = eqo->q.id;
1493 aic->prev_eqd = eqd;
1494 num++;
1495 }
ac124ff9 1496 }
2632bafd
SP
1497
1498 if (num)
1499 be_cmd_modify_eqd(adapter, set_eqd, num);
6b7c5b94
SP
1500}
1501
3abcdeda 1502static void be_rx_stats_update(struct be_rx_obj *rxo,
748b539a 1503 struct be_rx_compl_info *rxcp)
4097f663 1504{
ac124ff9 1505 struct be_rx_stats *stats = rx_stats(rxo);
1ef78abe 1506
ab1594e9 1507 u64_stats_update_begin(&stats->sync);
3abcdeda 1508 stats->rx_compl++;
2e588f84 1509 stats->rx_bytes += rxcp->pkt_size;
3abcdeda 1510 stats->rx_pkts++;
2e588f84 1511 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
3abcdeda 1512 stats->rx_mcast_pkts++;
2e588f84 1513 if (rxcp->err)
ac124ff9 1514 stats->rx_compl_err++;
ab1594e9 1515 u64_stats_update_end(&stats->sync);
4097f663
SP
1516}
1517
2e588f84 1518static inline bool csum_passed(struct be_rx_compl_info *rxcp)
728a9972 1519{
19fad86f 1520 /* L4 checksum is not reliable for non TCP/UDP packets.
c9c47142
SP
1521 * Also ignore ipcksm for ipv6 pkts
1522 */
2e588f84 1523 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
c9c47142 1524 (rxcp->ip_csum || rxcp->ipv6) && !rxcp->err;
728a9972
AK
1525}
1526
0b0ef1d0 1527static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo)
6b7c5b94 1528{
10ef9ab4 1529 struct be_adapter *adapter = rxo->adapter;
6b7c5b94 1530 struct be_rx_page_info *rx_page_info;
3abcdeda 1531 struct be_queue_info *rxq = &rxo->q;
0b0ef1d0 1532 u16 frag_idx = rxq->tail;
6b7c5b94 1533
3abcdeda 1534 rx_page_info = &rxo->page_info_tbl[frag_idx];
6b7c5b94
SP
1535 BUG_ON(!rx_page_info->page);
1536
e50287be 1537 if (rx_page_info->last_frag) {
2b7bcebf
IV
1538 dma_unmap_page(&adapter->pdev->dev,
1539 dma_unmap_addr(rx_page_info, bus),
1540 adapter->big_page_size, DMA_FROM_DEVICE);
e50287be
SP
1541 rx_page_info->last_frag = false;
1542 } else {
1543 dma_sync_single_for_cpu(&adapter->pdev->dev,
1544 dma_unmap_addr(rx_page_info, bus),
1545 rx_frag_size, DMA_FROM_DEVICE);
205859a2 1546 }
6b7c5b94 1547
0b0ef1d0 1548 queue_tail_inc(rxq);
6b7c5b94
SP
1549 atomic_dec(&rxq->used);
1550 return rx_page_info;
1551}
1552
1553/* Throwaway the data in the Rx completion */
10ef9ab4
SP
1554static void be_rx_compl_discard(struct be_rx_obj *rxo,
1555 struct be_rx_compl_info *rxcp)
6b7c5b94 1556{
6b7c5b94 1557 struct be_rx_page_info *page_info;
2e588f84 1558 u16 i, num_rcvd = rxcp->num_rcvd;
6b7c5b94 1559
e80d9da6 1560 for (i = 0; i < num_rcvd; i++) {
0b0ef1d0 1561 page_info = get_rx_page_info(rxo);
e80d9da6
PR
1562 put_page(page_info->page);
1563 memset(page_info, 0, sizeof(*page_info));
6b7c5b94
SP
1564 }
1565}
1566
1567/*
1568 * skb_fill_rx_data forms a complete skb for an ether frame
1569 * indicated by rxcp.
1570 */
10ef9ab4
SP
1571static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
1572 struct be_rx_compl_info *rxcp)
6b7c5b94 1573{
6b7c5b94 1574 struct be_rx_page_info *page_info;
2e588f84
SP
1575 u16 i, j;
1576 u16 hdr_len, curr_frag_len, remaining;
6b7c5b94 1577 u8 *start;
6b7c5b94 1578
0b0ef1d0 1579 page_info = get_rx_page_info(rxo);
6b7c5b94
SP
1580 start = page_address(page_info->page) + page_info->page_offset;
1581 prefetch(start);
1582
1583 /* Copy data in the first descriptor of this completion */
2e588f84 1584 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
6b7c5b94 1585
6b7c5b94
SP
1586 skb->len = curr_frag_len;
1587 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
ac1ae5f3 1588 memcpy(skb->data, start, curr_frag_len);
6b7c5b94
SP
1589 /* Complete packet has now been moved to data */
1590 put_page(page_info->page);
1591 skb->data_len = 0;
1592 skb->tail += curr_frag_len;
1593 } else {
ac1ae5f3
ED
1594 hdr_len = ETH_HLEN;
1595 memcpy(skb->data, start, hdr_len);
6b7c5b94 1596 skb_shinfo(skb)->nr_frags = 1;
b061b39e 1597 skb_frag_set_page(skb, 0, page_info->page);
6b7c5b94
SP
1598 skb_shinfo(skb)->frags[0].page_offset =
1599 page_info->page_offset + hdr_len;
748b539a
SP
1600 skb_frag_size_set(&skb_shinfo(skb)->frags[0],
1601 curr_frag_len - hdr_len);
6b7c5b94 1602 skb->data_len = curr_frag_len - hdr_len;
bdb28a97 1603 skb->truesize += rx_frag_size;
6b7c5b94
SP
1604 skb->tail += hdr_len;
1605 }
205859a2 1606 page_info->page = NULL;
6b7c5b94 1607
2e588f84
SP
1608 if (rxcp->pkt_size <= rx_frag_size) {
1609 BUG_ON(rxcp->num_rcvd != 1);
1610 return;
6b7c5b94
SP
1611 }
1612
1613 /* More frags present for this completion */
2e588f84
SP
1614 remaining = rxcp->pkt_size - curr_frag_len;
1615 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
0b0ef1d0 1616 page_info = get_rx_page_info(rxo);
2e588f84 1617 curr_frag_len = min(remaining, rx_frag_size);
6b7c5b94 1618
bd46cb6c
AK
1619 /* Coalesce all frags from the same physical page in one slot */
1620 if (page_info->page_offset == 0) {
1621 /* Fresh page */
1622 j++;
b061b39e 1623 skb_frag_set_page(skb, j, page_info->page);
bd46cb6c
AK
1624 skb_shinfo(skb)->frags[j].page_offset =
1625 page_info->page_offset;
9e903e08 1626 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
bd46cb6c
AK
1627 skb_shinfo(skb)->nr_frags++;
1628 } else {
1629 put_page(page_info->page);
1630 }
1631
9e903e08 1632 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
6b7c5b94
SP
1633 skb->len += curr_frag_len;
1634 skb->data_len += curr_frag_len;
bdb28a97 1635 skb->truesize += rx_frag_size;
2e588f84 1636 remaining -= curr_frag_len;
205859a2 1637 page_info->page = NULL;
6b7c5b94 1638 }
bd46cb6c 1639 BUG_ON(j > MAX_SKB_FRAGS);
6b7c5b94
SP
1640}
1641
5be93b9a 1642/* Process the RX completion indicated by rxcp when GRO is disabled */
6384a4d0 1643static void be_rx_compl_process(struct be_rx_obj *rxo, struct napi_struct *napi,
10ef9ab4 1644 struct be_rx_compl_info *rxcp)
6b7c5b94 1645{
10ef9ab4 1646 struct be_adapter *adapter = rxo->adapter;
6332c8d3 1647 struct net_device *netdev = adapter->netdev;
6b7c5b94 1648 struct sk_buff *skb;
89420424 1649
bb349bb4 1650 skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE);
a058a632 1651 if (unlikely(!skb)) {
ac124ff9 1652 rx_stats(rxo)->rx_drops_no_skbs++;
10ef9ab4 1653 be_rx_compl_discard(rxo, rxcp);
6b7c5b94
SP
1654 return;
1655 }
1656
10ef9ab4 1657 skb_fill_rx_data(rxo, skb, rxcp);
6b7c5b94 1658
6332c8d3 1659 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
728a9972 1660 skb->ip_summed = CHECKSUM_UNNECESSARY;
c6ce2f4b
SK
1661 else
1662 skb_checksum_none_assert(skb);
6b7c5b94 1663
6332c8d3 1664 skb->protocol = eth_type_trans(skb, netdev);
aaa6daec 1665 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
10ef9ab4 1666 if (netdev->features & NETIF_F_RXHASH)
d2464c8c 1667 skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
c9c47142
SP
1668
1669 skb->encapsulation = rxcp->tunneled;
6384a4d0 1670 skb_mark_napi_id(skb, napi);
6b7c5b94 1671
343e43c0 1672 if (rxcp->vlanf)
86a9bad3 1673 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
4c5102f9
AK
1674
1675 netif_receive_skb(skb);
6b7c5b94
SP
1676}
1677
5be93b9a 1678/* Process the RX completion indicated by rxcp when GRO is enabled */
4188e7df
JH
1679static void be_rx_compl_process_gro(struct be_rx_obj *rxo,
1680 struct napi_struct *napi,
1681 struct be_rx_compl_info *rxcp)
6b7c5b94 1682{
10ef9ab4 1683 struct be_adapter *adapter = rxo->adapter;
6b7c5b94 1684 struct be_rx_page_info *page_info;
5be93b9a 1685 struct sk_buff *skb = NULL;
2e588f84
SP
1686 u16 remaining, curr_frag_len;
1687 u16 i, j;
3968fa1e 1688
10ef9ab4 1689 skb = napi_get_frags(napi);
5be93b9a 1690 if (!skb) {
10ef9ab4 1691 be_rx_compl_discard(rxo, rxcp);
5be93b9a
AK
1692 return;
1693 }
1694
2e588f84
SP
1695 remaining = rxcp->pkt_size;
1696 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
0b0ef1d0 1697 page_info = get_rx_page_info(rxo);
6b7c5b94
SP
1698
1699 curr_frag_len = min(remaining, rx_frag_size);
1700
bd46cb6c
AK
1701 /* Coalesce all frags from the same physical page in one slot */
1702 if (i == 0 || page_info->page_offset == 0) {
1703 /* First frag or Fresh page */
1704 j++;
b061b39e 1705 skb_frag_set_page(skb, j, page_info->page);
5be93b9a
AK
1706 skb_shinfo(skb)->frags[j].page_offset =
1707 page_info->page_offset;
9e903e08 1708 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
bd46cb6c
AK
1709 } else {
1710 put_page(page_info->page);
1711 }
9e903e08 1712 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
bdb28a97 1713 skb->truesize += rx_frag_size;
bd46cb6c 1714 remaining -= curr_frag_len;
6b7c5b94
SP
1715 memset(page_info, 0, sizeof(*page_info));
1716 }
bd46cb6c 1717 BUG_ON(j > MAX_SKB_FRAGS);
6b7c5b94 1718
5be93b9a 1719 skb_shinfo(skb)->nr_frags = j + 1;
2e588f84
SP
1720 skb->len = rxcp->pkt_size;
1721 skb->data_len = rxcp->pkt_size;
5be93b9a 1722 skb->ip_summed = CHECKSUM_UNNECESSARY;
aaa6daec 1723 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
4b972914 1724 if (adapter->netdev->features & NETIF_F_RXHASH)
d2464c8c 1725 skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
c9c47142
SP
1726
1727 skb->encapsulation = rxcp->tunneled;
6384a4d0 1728 skb_mark_napi_id(skb, napi);
5be93b9a 1729
343e43c0 1730 if (rxcp->vlanf)
86a9bad3 1731 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
4c5102f9 1732
10ef9ab4 1733 napi_gro_frags(napi);
2e588f84
SP
1734}
1735
10ef9ab4
SP
1736static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
1737 struct be_rx_compl_info *rxcp)
2e588f84
SP
1738{
1739 rxcp->pkt_size =
1740 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1741 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1742 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1743 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
9ecb42fd 1744 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
2e588f84
SP
1745 rxcp->ip_csum =
1746 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1747 rxcp->l4_csum =
1748 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1749 rxcp->ipv6 =
1750 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
2e588f84
SP
1751 rxcp->num_rcvd =
1752 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1753 rxcp->pkt_type =
1754 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
4b972914 1755 rxcp->rss_hash =
c297977e 1756 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, compl);
15d72184 1757 if (rxcp->vlanf) {
f93f160b 1758 rxcp->qnq = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, qnq,
3c709f8f 1759 compl);
748b539a
SP
1760 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1,
1761 vlan_tag, compl);
15d72184 1762 }
12004ae9 1763 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, port, compl);
c9c47142
SP
1764 rxcp->tunneled =
1765 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tunneled, compl);
2e588f84
SP
1766}
1767
10ef9ab4
SP
1768static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
1769 struct be_rx_compl_info *rxcp)
2e588f84
SP
1770{
1771 rxcp->pkt_size =
1772 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1773 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1774 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1775 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
9ecb42fd 1776 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
2e588f84
SP
1777 rxcp->ip_csum =
1778 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1779 rxcp->l4_csum =
1780 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1781 rxcp->ipv6 =
1782 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
2e588f84
SP
1783 rxcp->num_rcvd =
1784 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1785 rxcp->pkt_type =
1786 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
4b972914 1787 rxcp->rss_hash =
c297977e 1788 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, compl);
15d72184 1789 if (rxcp->vlanf) {
f93f160b 1790 rxcp->qnq = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, qnq,
3c709f8f 1791 compl);
748b539a
SP
1792 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0,
1793 vlan_tag, compl);
15d72184 1794 }
12004ae9 1795 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, port, compl);
e38b1706
SK
1796 rxcp->ip_frag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0,
1797 ip_frag, compl);
2e588f84
SP
1798}
1799
1800static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1801{
1802 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1803 struct be_rx_compl_info *rxcp = &rxo->rxcp;
1804 struct be_adapter *adapter = rxo->adapter;
6b7c5b94 1805
2e588f84
SP
1806 /* For checking the valid bit it is Ok to use either definition as the
1807 * valid bit is at the same position in both v0 and v1 Rx compl */
1808 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
1809 return NULL;
6b7c5b94 1810
2e588f84
SP
1811 rmb();
1812 be_dws_le_to_cpu(compl, sizeof(*compl));
6b7c5b94 1813
2e588f84 1814 if (adapter->be3_native)
10ef9ab4 1815 be_parse_rx_compl_v1(compl, rxcp);
2e588f84 1816 else
10ef9ab4 1817 be_parse_rx_compl_v0(compl, rxcp);
6b7c5b94 1818
e38b1706
SK
1819 if (rxcp->ip_frag)
1820 rxcp->l4_csum = 0;
1821
15d72184 1822 if (rxcp->vlanf) {
f93f160b
VV
1823 /* In QNQ modes, if qnq bit is not set, then the packet was
1824 * tagged only with the transparent outer vlan-tag and must
1825 * not be treated as a vlan packet by host
1826 */
1827 if (be_is_qnq_mode(adapter) && !rxcp->qnq)
15d72184 1828 rxcp->vlanf = 0;
6b7c5b94 1829
15d72184 1830 if (!lancer_chip(adapter))
3c709f8f 1831 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
6b7c5b94 1832
939cf306 1833 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
f6cbd364 1834 !test_bit(rxcp->vlan_tag, adapter->vids))
15d72184
SP
1835 rxcp->vlanf = 0;
1836 }
2e588f84
SP
1837
1838 /* As the compl has been parsed, reset it; we wont touch it again */
1839 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
6b7c5b94 1840
3abcdeda 1841 queue_tail_inc(&rxo->cq);
6b7c5b94
SP
1842 return rxcp;
1843}
1844
1829b086 1845static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
6b7c5b94 1846{
6b7c5b94 1847 u32 order = get_order(size);
1829b086 1848
6b7c5b94 1849 if (order > 0)
1829b086
ED
1850 gfp |= __GFP_COMP;
1851 return alloc_pages(gfp, order);
6b7c5b94
SP
1852}
1853
1854/*
1855 * Allocate a page, split it to fragments of size rx_frag_size and post as
1856 * receive buffers to BE
1857 */
1829b086 1858static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
6b7c5b94 1859{
3abcdeda 1860 struct be_adapter *adapter = rxo->adapter;
26d92f92 1861 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
3abcdeda 1862 struct be_queue_info *rxq = &rxo->q;
6b7c5b94 1863 struct page *pagep = NULL;
ba42fad0 1864 struct device *dev = &adapter->pdev->dev;
6b7c5b94
SP
1865 struct be_eth_rx_d *rxd;
1866 u64 page_dmaaddr = 0, frag_dmaaddr;
1867 u32 posted, page_offset = 0;
1868
3abcdeda 1869 page_info = &rxo->page_info_tbl[rxq->head];
6b7c5b94
SP
1870 for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1871 if (!pagep) {
1829b086 1872 pagep = be_alloc_pages(adapter->big_page_size, gfp);
6b7c5b94 1873 if (unlikely(!pagep)) {
ac124ff9 1874 rx_stats(rxo)->rx_post_fail++;
6b7c5b94
SP
1875 break;
1876 }
ba42fad0
IV
1877 page_dmaaddr = dma_map_page(dev, pagep, 0,
1878 adapter->big_page_size,
2b7bcebf 1879 DMA_FROM_DEVICE);
ba42fad0
IV
1880 if (dma_mapping_error(dev, page_dmaaddr)) {
1881 put_page(pagep);
1882 pagep = NULL;
1883 rx_stats(rxo)->rx_post_fail++;
1884 break;
1885 }
e50287be 1886 page_offset = 0;
6b7c5b94
SP
1887 } else {
1888 get_page(pagep);
e50287be 1889 page_offset += rx_frag_size;
6b7c5b94 1890 }
e50287be 1891 page_info->page_offset = page_offset;
6b7c5b94 1892 page_info->page = pagep;
6b7c5b94
SP
1893
1894 rxd = queue_head_node(rxq);
e50287be 1895 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
6b7c5b94
SP
1896 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1897 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
6b7c5b94
SP
1898
1899 /* Any space left in the current big page for another frag? */
1900 if ((page_offset + rx_frag_size + rx_frag_size) >
1901 adapter->big_page_size) {
1902 pagep = NULL;
e50287be
SP
1903 page_info->last_frag = true;
1904 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
1905 } else {
1906 dma_unmap_addr_set(page_info, bus, frag_dmaaddr);
6b7c5b94 1907 }
26d92f92
SP
1908
1909 prev_page_info = page_info;
1910 queue_head_inc(rxq);
10ef9ab4 1911 page_info = &rxo->page_info_tbl[rxq->head];
6b7c5b94 1912 }
e50287be
SP
1913
1914 /* Mark the last frag of a page when we break out of the above loop
1915 * with no more slots available in the RXQ
1916 */
1917 if (pagep) {
1918 prev_page_info->last_frag = true;
1919 dma_unmap_addr_set(prev_page_info, bus, page_dmaaddr);
1920 }
6b7c5b94
SP
1921
1922 if (posted) {
6b7c5b94 1923 atomic_add(posted, &rxq->used);
6384a4d0
SP
1924 if (rxo->rx_post_starved)
1925 rxo->rx_post_starved = false;
8788fdc2 1926 be_rxq_notify(adapter, rxq->id, posted);
ea1dae11
SP
1927 } else if (atomic_read(&rxq->used) == 0) {
1928 /* Let be_worker replenish when memory is available */
3abcdeda 1929 rxo->rx_post_starved = true;
6b7c5b94 1930 }
6b7c5b94
SP
1931}
1932
5fb379ee 1933static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
6b7c5b94 1934{
6b7c5b94
SP
1935 struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1936
1937 if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1938 return NULL;
1939
f3eb62d2 1940 rmb();
6b7c5b94
SP
1941 be_dws_le_to_cpu(txcp, sizeof(*txcp));
1942
1943 txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1944
1945 queue_tail_inc(tx_cq);
1946 return txcp;
1947}
1948
3c8def97 1949static u16 be_tx_compl_process(struct be_adapter *adapter,
748b539a 1950 struct be_tx_obj *txo, u16 last_index)
6b7c5b94 1951{
3c8def97 1952 struct be_queue_info *txq = &txo->q;
a73b796e 1953 struct be_eth_wrb *wrb;
3c8def97 1954 struct sk_buff **sent_skbs = txo->sent_skb_list;
6b7c5b94 1955 struct sk_buff *sent_skb;
ec43b1a6
SP
1956 u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1957 bool unmap_skb_hdr = true;
6b7c5b94 1958
ec43b1a6 1959 sent_skb = sent_skbs[txq->tail];
6b7c5b94 1960 BUG_ON(!sent_skb);
ec43b1a6
SP
1961 sent_skbs[txq->tail] = NULL;
1962
1963 /* skip header wrb */
a73b796e 1964 queue_tail_inc(txq);
6b7c5b94 1965
ec43b1a6 1966 do {
6b7c5b94 1967 cur_index = txq->tail;
a73b796e 1968 wrb = queue_tail_node(txq);
2b7bcebf
IV
1969 unmap_tx_frag(&adapter->pdev->dev, wrb,
1970 (unmap_skb_hdr && skb_headlen(sent_skb)));
ec43b1a6
SP
1971 unmap_skb_hdr = false;
1972
6b7c5b94
SP
1973 num_wrbs++;
1974 queue_tail_inc(txq);
ec43b1a6 1975 } while (cur_index != last_index);
6b7c5b94 1976
d8ec2c02 1977 dev_kfree_skb_any(sent_skb);
4d586b82 1978 return num_wrbs;
6b7c5b94
SP
1979}
1980
10ef9ab4
SP
1981/* Return the number of events in the event queue */
1982static inline int events_get(struct be_eq_obj *eqo)
859b1e4e 1983{
10ef9ab4
SP
1984 struct be_eq_entry *eqe;
1985 int num = 0;
859b1e4e 1986
10ef9ab4
SP
1987 do {
1988 eqe = queue_tail_node(&eqo->q);
1989 if (eqe->evt == 0)
1990 break;
859b1e4e 1991
10ef9ab4
SP
1992 rmb();
1993 eqe->evt = 0;
1994 num++;
1995 queue_tail_inc(&eqo->q);
1996 } while (true);
1997
1998 return num;
859b1e4e
SP
1999}
2000
10ef9ab4
SP
2001/* Leaves the EQ is disarmed state */
2002static void be_eq_clean(struct be_eq_obj *eqo)
859b1e4e 2003{
10ef9ab4 2004 int num = events_get(eqo);
859b1e4e 2005
10ef9ab4 2006 be_eq_notify(eqo->adapter, eqo->q.id, false, true, num);
859b1e4e
SP
2007}
2008
10ef9ab4 2009static void be_rx_cq_clean(struct be_rx_obj *rxo)
6b7c5b94
SP
2010{
2011 struct be_rx_page_info *page_info;
3abcdeda
SP
2012 struct be_queue_info *rxq = &rxo->q;
2013 struct be_queue_info *rx_cq = &rxo->cq;
2e588f84 2014 struct be_rx_compl_info *rxcp;
d23e946c
SP
2015 struct be_adapter *adapter = rxo->adapter;
2016 int flush_wait = 0;
6b7c5b94 2017
d23e946c
SP
2018 /* Consume pending rx completions.
2019 * Wait for the flush completion (identified by zero num_rcvd)
2020 * to arrive. Notify CQ even when there are no more CQ entries
2021 * for HW to flush partially coalesced CQ entries.
2022 * In Lancer, there is no need to wait for flush compl.
2023 */
2024 for (;;) {
2025 rxcp = be_rx_compl_get(rxo);
2026 if (rxcp == NULL) {
2027 if (lancer_chip(adapter))
2028 break;
2029
2030 if (flush_wait++ > 10 || be_hw_error(adapter)) {
2031 dev_warn(&adapter->pdev->dev,
2032 "did not receive flush compl\n");
2033 break;
2034 }
2035 be_cq_notify(adapter, rx_cq->id, true, 0);
2036 mdelay(1);
2037 } else {
2038 be_rx_compl_discard(rxo, rxcp);
3f5dffe6 2039 be_cq_notify(adapter, rx_cq->id, false, 1);
d23e946c
SP
2040 if (rxcp->num_rcvd == 0)
2041 break;
2042 }
6b7c5b94
SP
2043 }
2044
d23e946c
SP
2045 /* After cleanup, leave the CQ in unarmed state */
2046 be_cq_notify(adapter, rx_cq->id, false, 0);
2047
2048 /* Then free posted rx buffers that were not used */
0b0ef1d0
SR
2049 while (atomic_read(&rxq->used) > 0) {
2050 page_info = get_rx_page_info(rxo);
6b7c5b94
SP
2051 put_page(page_info->page);
2052 memset(page_info, 0, sizeof(*page_info));
2053 }
2054 BUG_ON(atomic_read(&rxq->used));
482c9e79 2055 rxq->tail = rxq->head = 0;
6b7c5b94
SP
2056}
2057
0ae57bb3 2058static void be_tx_compl_clean(struct be_adapter *adapter)
6b7c5b94 2059{
0ae57bb3
SP
2060 struct be_tx_obj *txo;
2061 struct be_queue_info *txq;
a8e9179a 2062 struct be_eth_tx_compl *txcp;
4d586b82 2063 u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
b03388d6
SP
2064 struct sk_buff *sent_skb;
2065 bool dummy_wrb;
0ae57bb3 2066 int i, pending_txqs;
a8e9179a 2067
1a3d0717 2068 /* Stop polling for compls when HW has been silent for 10ms */
a8e9179a 2069 do {
0ae57bb3
SP
2070 pending_txqs = adapter->num_tx_qs;
2071
2072 for_all_tx_queues(adapter, txo, i) {
1a3d0717
VV
2073 cmpl = 0;
2074 num_wrbs = 0;
0ae57bb3
SP
2075 txq = &txo->q;
2076 while ((txcp = be_tx_compl_get(&txo->cq))) {
2077 end_idx =
2078 AMAP_GET_BITS(struct amap_eth_tx_compl,
2079 wrb_index, txcp);
2080 num_wrbs += be_tx_compl_process(adapter, txo,
2081 end_idx);
2082 cmpl++;
2083 }
2084 if (cmpl) {
2085 be_cq_notify(adapter, txo->cq.id, false, cmpl);
2086 atomic_sub(num_wrbs, &txq->used);
1a3d0717 2087 timeo = 0;
0ae57bb3
SP
2088 }
2089 if (atomic_read(&txq->used) == 0)
2090 pending_txqs--;
a8e9179a
SP
2091 }
2092
1a3d0717 2093 if (pending_txqs == 0 || ++timeo > 10 || be_hw_error(adapter))
a8e9179a
SP
2094 break;
2095
2096 mdelay(1);
2097 } while (true);
2098
0ae57bb3
SP
2099 for_all_tx_queues(adapter, txo, i) {
2100 txq = &txo->q;
2101 if (atomic_read(&txq->used))
2102 dev_err(&adapter->pdev->dev, "%d pending tx-compls\n",
2103 atomic_read(&txq->used));
2104
2105 /* free posted tx for which compls will never arrive */
2106 while (atomic_read(&txq->used)) {
2107 sent_skb = txo->sent_skb_list[txq->tail];
2108 end_idx = txq->tail;
2109 num_wrbs = wrb_cnt_for_skb(adapter, sent_skb,
2110 &dummy_wrb);
2111 index_adv(&end_idx, num_wrbs - 1, txq->len);
2112 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
2113 atomic_sub(num_wrbs, &txq->used);
2114 }
b03388d6 2115 }
6b7c5b94
SP
2116}
2117
10ef9ab4
SP
2118static void be_evt_queues_destroy(struct be_adapter *adapter)
2119{
2120 struct be_eq_obj *eqo;
2121 int i;
2122
2123 for_all_evt_queues(adapter, eqo, i) {
19d59aa7
PR
2124 if (eqo->q.created) {
2125 be_eq_clean(eqo);
10ef9ab4 2126 be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
6384a4d0 2127 napi_hash_del(&eqo->napi);
68d7bdcb 2128 netif_napi_del(&eqo->napi);
19d59aa7 2129 }
10ef9ab4
SP
2130 be_queue_free(adapter, &eqo->q);
2131 }
2132}
2133
2134static int be_evt_queues_create(struct be_adapter *adapter)
2135{
2136 struct be_queue_info *eq;
2137 struct be_eq_obj *eqo;
2632bafd 2138 struct be_aic_obj *aic;
10ef9ab4
SP
2139 int i, rc;
2140
92bf14ab
SP
2141 adapter->num_evt_qs = min_t(u16, num_irqs(adapter),
2142 adapter->cfg_num_qs);
10ef9ab4
SP
2143
2144 for_all_evt_queues(adapter, eqo, i) {
68d7bdcb
SP
2145 netif_napi_add(adapter->netdev, &eqo->napi, be_poll,
2146 BE_NAPI_WEIGHT);
6384a4d0 2147 napi_hash_add(&eqo->napi);
2632bafd 2148 aic = &adapter->aic_obj[i];
10ef9ab4
SP
2149 eqo->adapter = adapter;
2150 eqo->tx_budget = BE_TX_BUDGET;
2151 eqo->idx = i;
2632bafd
SP
2152 aic->max_eqd = BE_MAX_EQD;
2153 aic->enable = true;
10ef9ab4
SP
2154
2155 eq = &eqo->q;
2156 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
748b539a 2157 sizeof(struct be_eq_entry));
10ef9ab4
SP
2158 if (rc)
2159 return rc;
2160
f2f781a7 2161 rc = be_cmd_eq_create(adapter, eqo);
10ef9ab4
SP
2162 if (rc)
2163 return rc;
2164 }
1cfafab9 2165 return 0;
10ef9ab4
SP
2166}
2167
5fb379ee
SP
2168static void be_mcc_queues_destroy(struct be_adapter *adapter)
2169{
2170 struct be_queue_info *q;
5fb379ee 2171
8788fdc2 2172 q = &adapter->mcc_obj.q;
5fb379ee 2173 if (q->created)
8788fdc2 2174 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
5fb379ee
SP
2175 be_queue_free(adapter, q);
2176
8788fdc2 2177 q = &adapter->mcc_obj.cq;
5fb379ee 2178 if (q->created)
8788fdc2 2179 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
5fb379ee
SP
2180 be_queue_free(adapter, q);
2181}
2182
2183/* Must be called only after TX qs are created as MCC shares TX EQ */
2184static int be_mcc_queues_create(struct be_adapter *adapter)
2185{
2186 struct be_queue_info *q, *cq;
5fb379ee 2187
8788fdc2 2188 cq = &adapter->mcc_obj.cq;
5fb379ee 2189 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
748b539a 2190 sizeof(struct be_mcc_compl)))
5fb379ee
SP
2191 goto err;
2192
10ef9ab4
SP
2193 /* Use the default EQ for MCC completions */
2194 if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0))
5fb379ee
SP
2195 goto mcc_cq_free;
2196
8788fdc2 2197 q = &adapter->mcc_obj.q;
5fb379ee
SP
2198 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
2199 goto mcc_cq_destroy;
2200
8788fdc2 2201 if (be_cmd_mccq_create(adapter, q, cq))
5fb379ee
SP
2202 goto mcc_q_free;
2203
2204 return 0;
2205
2206mcc_q_free:
2207 be_queue_free(adapter, q);
2208mcc_cq_destroy:
8788fdc2 2209 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
5fb379ee
SP
2210mcc_cq_free:
2211 be_queue_free(adapter, cq);
2212err:
2213 return -1;
2214}
2215
6b7c5b94
SP
2216static void be_tx_queues_destroy(struct be_adapter *adapter)
2217{
2218 struct be_queue_info *q;
3c8def97
SP
2219 struct be_tx_obj *txo;
2220 u8 i;
6b7c5b94 2221
3c8def97
SP
2222 for_all_tx_queues(adapter, txo, i) {
2223 q = &txo->q;
2224 if (q->created)
2225 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
2226 be_queue_free(adapter, q);
6b7c5b94 2227
3c8def97
SP
2228 q = &txo->cq;
2229 if (q->created)
2230 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2231 be_queue_free(adapter, q);
2232 }
6b7c5b94
SP
2233}
2234
7707133c 2235static int be_tx_qs_create(struct be_adapter *adapter)
6b7c5b94 2236{
10ef9ab4 2237 struct be_queue_info *cq, *eq;
3c8def97 2238 struct be_tx_obj *txo;
92bf14ab 2239 int status, i;
6b7c5b94 2240
92bf14ab 2241 adapter->num_tx_qs = min(adapter->num_evt_qs, be_max_txqs(adapter));
dafc0fe3 2242
10ef9ab4
SP
2243 for_all_tx_queues(adapter, txo, i) {
2244 cq = &txo->cq;
2245 status = be_queue_alloc(adapter, cq, TX_CQ_LEN,
2246 sizeof(struct be_eth_tx_compl));
2247 if (status)
2248 return status;
3c8def97 2249
827da44c
JS
2250 u64_stats_init(&txo->stats.sync);
2251 u64_stats_init(&txo->stats.sync_compl);
2252
10ef9ab4
SP
2253 /* If num_evt_qs is less than num_tx_qs, then more than
2254 * one txq share an eq
2255 */
2256 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2257 status = be_cmd_cq_create(adapter, cq, eq, false, 3);
2258 if (status)
2259 return status;
6b7c5b94 2260
10ef9ab4
SP
2261 status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
2262 sizeof(struct be_eth_wrb));
2263 if (status)
2264 return status;
6b7c5b94 2265
94d73aaa 2266 status = be_cmd_txq_create(adapter, txo);
10ef9ab4
SP
2267 if (status)
2268 return status;
3c8def97 2269 }
6b7c5b94 2270
d379142b
SP
2271 dev_info(&adapter->pdev->dev, "created %d TX queue(s)\n",
2272 adapter->num_tx_qs);
10ef9ab4 2273 return 0;
6b7c5b94
SP
2274}
2275
10ef9ab4 2276static void be_rx_cqs_destroy(struct be_adapter *adapter)
6b7c5b94
SP
2277{
2278 struct be_queue_info *q;
3abcdeda
SP
2279 struct be_rx_obj *rxo;
2280 int i;
2281
2282 for_all_rx_queues(adapter, rxo, i) {
3abcdeda
SP
2283 q = &rxo->cq;
2284 if (q->created)
2285 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2286 be_queue_free(adapter, q);
ac6a0c4a
SP
2287 }
2288}
2289
10ef9ab4 2290static int be_rx_cqs_create(struct be_adapter *adapter)
6b7c5b94 2291{
10ef9ab4 2292 struct be_queue_info *eq, *cq;
3abcdeda
SP
2293 struct be_rx_obj *rxo;
2294 int rc, i;
6b7c5b94 2295
92bf14ab
SP
2296 /* We can create as many RSS rings as there are EQs. */
2297 adapter->num_rx_qs = adapter->num_evt_qs;
2298
2299 /* We'll use RSS only if atleast 2 RSS rings are supported.
2300 * When RSS is used, we'll need a default RXQ for non-IP traffic.
10ef9ab4 2301 */
92bf14ab
SP
2302 if (adapter->num_rx_qs > 1)
2303 adapter->num_rx_qs++;
2304
6b7c5b94 2305 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
3abcdeda
SP
2306 for_all_rx_queues(adapter, rxo, i) {
2307 rxo->adapter = adapter;
3abcdeda
SP
2308 cq = &rxo->cq;
2309 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
748b539a 2310 sizeof(struct be_eth_rx_compl));
3abcdeda 2311 if (rc)
10ef9ab4 2312 return rc;
3abcdeda 2313
827da44c 2314 u64_stats_init(&rxo->stats.sync);
10ef9ab4
SP
2315 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2316 rc = be_cmd_cq_create(adapter, cq, eq, false, 3);
3abcdeda 2317 if (rc)
10ef9ab4 2318 return rc;
3abcdeda 2319 }
6b7c5b94 2320
d379142b
SP
2321 dev_info(&adapter->pdev->dev,
2322 "created %d RSS queue(s) and 1 default RX queue\n",
2323 adapter->num_rx_qs - 1);
10ef9ab4 2324 return 0;
b628bde2
SP
2325}
2326
6b7c5b94
SP
2327static irqreturn_t be_intx(int irq, void *dev)
2328{
e49cc34f
SP
2329 struct be_eq_obj *eqo = dev;
2330 struct be_adapter *adapter = eqo->adapter;
2331 int num_evts = 0;
6b7c5b94 2332
d0b9cec3
SP
2333 /* IRQ is not expected when NAPI is scheduled as the EQ
2334 * will not be armed.
2335 * But, this can happen on Lancer INTx where it takes
2336 * a while to de-assert INTx or in BE2 where occasionaly
2337 * an interrupt may be raised even when EQ is unarmed.
2338 * If NAPI is already scheduled, then counting & notifying
2339 * events will orphan them.
e49cc34f 2340 */
d0b9cec3 2341 if (napi_schedule_prep(&eqo->napi)) {
e49cc34f 2342 num_evts = events_get(eqo);
d0b9cec3
SP
2343 __napi_schedule(&eqo->napi);
2344 if (num_evts)
2345 eqo->spurious_intr = 0;
2346 }
2347 be_eq_notify(adapter, eqo->q.id, false, true, num_evts);
e49cc34f 2348
d0b9cec3
SP
2349 /* Return IRQ_HANDLED only for the the first spurious intr
2350 * after a valid intr to stop the kernel from branding
2351 * this irq as a bad one!
e49cc34f 2352 */
d0b9cec3
SP
2353 if (num_evts || eqo->spurious_intr++ == 0)
2354 return IRQ_HANDLED;
2355 else
2356 return IRQ_NONE;
6b7c5b94
SP
2357}
2358
10ef9ab4 2359static irqreturn_t be_msix(int irq, void *dev)
6b7c5b94 2360{
10ef9ab4 2361 struct be_eq_obj *eqo = dev;
6b7c5b94 2362
0b545a62
SP
2363 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
2364 napi_schedule(&eqo->napi);
6b7c5b94
SP
2365 return IRQ_HANDLED;
2366}
2367
2e588f84 2368static inline bool do_gro(struct be_rx_compl_info *rxcp)
6b7c5b94 2369{
e38b1706 2370 return (rxcp->tcpf && !rxcp->err && rxcp->l4_csum) ? true : false;
6b7c5b94
SP
2371}
2372
10ef9ab4 2373static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
748b539a 2374 int budget, int polling)
6b7c5b94 2375{
3abcdeda
SP
2376 struct be_adapter *adapter = rxo->adapter;
2377 struct be_queue_info *rx_cq = &rxo->cq;
2e588f84 2378 struct be_rx_compl_info *rxcp;
6b7c5b94
SP
2379 u32 work_done;
2380
2381 for (work_done = 0; work_done < budget; work_done++) {
3abcdeda 2382 rxcp = be_rx_compl_get(rxo);
6b7c5b94
SP
2383 if (!rxcp)
2384 break;
2385
12004ae9
SP
2386 /* Is it a flush compl that has no data */
2387 if (unlikely(rxcp->num_rcvd == 0))
2388 goto loop_continue;
2389
2390 /* Discard compl with partial DMA Lancer B0 */
2391 if (unlikely(!rxcp->pkt_size)) {
10ef9ab4 2392 be_rx_compl_discard(rxo, rxcp);
12004ae9
SP
2393 goto loop_continue;
2394 }
2395
2396 /* On BE drop pkts that arrive due to imperfect filtering in
2397 * promiscuous mode on some skews
2398 */
2399 if (unlikely(rxcp->port != adapter->port_num &&
748b539a 2400 !lancer_chip(adapter))) {
10ef9ab4 2401 be_rx_compl_discard(rxo, rxcp);
12004ae9 2402 goto loop_continue;
64642811 2403 }
009dd872 2404
6384a4d0
SP
2405 /* Don't do gro when we're busy_polling */
2406 if (do_gro(rxcp) && polling != BUSY_POLLING)
10ef9ab4 2407 be_rx_compl_process_gro(rxo, napi, rxcp);
12004ae9 2408 else
6384a4d0
SP
2409 be_rx_compl_process(rxo, napi, rxcp);
2410
12004ae9 2411loop_continue:
2e588f84 2412 be_rx_stats_update(rxo, rxcp);
6b7c5b94
SP
2413 }
2414
10ef9ab4
SP
2415 if (work_done) {
2416 be_cq_notify(adapter, rx_cq->id, true, work_done);
9372cacb 2417
6384a4d0
SP
2418 /* When an rx-obj gets into post_starved state, just
2419 * let be_worker do the posting.
2420 */
2421 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM &&
2422 !rxo->rx_post_starved)
10ef9ab4 2423 be_post_rx_frags(rxo, GFP_ATOMIC);
6b7c5b94 2424 }
10ef9ab4 2425
6b7c5b94
SP
2426 return work_done;
2427}
2428
10ef9ab4
SP
2429static bool be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
2430 int budget, int idx)
6b7c5b94 2431{
6b7c5b94 2432 struct be_eth_tx_compl *txcp;
10ef9ab4 2433 int num_wrbs = 0, work_done;
3c8def97 2434
10ef9ab4
SP
2435 for (work_done = 0; work_done < budget; work_done++) {
2436 txcp = be_tx_compl_get(&txo->cq);
2437 if (!txcp)
2438 break;
2439 num_wrbs += be_tx_compl_process(adapter, txo,
748b539a
SP
2440 AMAP_GET_BITS(struct
2441 amap_eth_tx_compl,
2442 wrb_index, txcp));
10ef9ab4 2443 }
6b7c5b94 2444
10ef9ab4
SP
2445 if (work_done) {
2446 be_cq_notify(adapter, txo->cq.id, true, work_done);
2447 atomic_sub(num_wrbs, &txo->q.used);
3c8def97 2448
10ef9ab4
SP
2449 /* As Tx wrbs have been freed up, wake up netdev queue
2450 * if it was stopped due to lack of tx wrbs. */
2451 if (__netif_subqueue_stopped(adapter->netdev, idx) &&
748b539a 2452 atomic_read(&txo->q.used) < txo->q.len / 2) {
10ef9ab4 2453 netif_wake_subqueue(adapter->netdev, idx);
3c8def97 2454 }
10ef9ab4
SP
2455
2456 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
2457 tx_stats(txo)->tx_compl += work_done;
2458 u64_stats_update_end(&tx_stats(txo)->sync_compl);
6b7c5b94 2459 }
10ef9ab4
SP
2460 return (work_done < budget); /* Done */
2461}
6b7c5b94 2462
68d7bdcb 2463int be_poll(struct napi_struct *napi, int budget)
10ef9ab4
SP
2464{
2465 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2466 struct be_adapter *adapter = eqo->adapter;
0b545a62 2467 int max_work = 0, work, i, num_evts;
6384a4d0 2468 struct be_rx_obj *rxo;
10ef9ab4 2469 bool tx_done;
f31e50a8 2470
0b545a62
SP
2471 num_evts = events_get(eqo);
2472
10ef9ab4
SP
2473 /* Process all TXQs serviced by this EQ */
2474 for (i = eqo->idx; i < adapter->num_tx_qs; i += adapter->num_evt_qs) {
2475 tx_done = be_process_tx(adapter, &adapter->tx_obj[i],
2476 eqo->tx_budget, i);
2477 if (!tx_done)
2478 max_work = budget;
f31e50a8
SP
2479 }
2480
6384a4d0
SP
2481 if (be_lock_napi(eqo)) {
2482 /* This loop will iterate twice for EQ0 in which
2483 * completions of the last RXQ (default one) are also processed
2484 * For other EQs the loop iterates only once
2485 */
2486 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
2487 work = be_process_rx(rxo, napi, budget, NAPI_POLLING);
2488 max_work = max(work, max_work);
2489 }
2490 be_unlock_napi(eqo);
2491 } else {
2492 max_work = budget;
10ef9ab4 2493 }
6b7c5b94 2494
10ef9ab4
SP
2495 if (is_mcc_eqo(eqo))
2496 be_process_mcc(adapter);
93c86700 2497
10ef9ab4
SP
2498 if (max_work < budget) {
2499 napi_complete(napi);
0b545a62 2500 be_eq_notify(adapter, eqo->q.id, true, false, num_evts);
10ef9ab4
SP
2501 } else {
2502 /* As we'll continue in polling mode, count and clear events */
0b545a62 2503 be_eq_notify(adapter, eqo->q.id, false, false, num_evts);
93c86700 2504 }
10ef9ab4 2505 return max_work;
6b7c5b94
SP
2506}
2507
6384a4d0
SP
2508#ifdef CONFIG_NET_RX_BUSY_POLL
2509static int be_busy_poll(struct napi_struct *napi)
2510{
2511 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2512 struct be_adapter *adapter = eqo->adapter;
2513 struct be_rx_obj *rxo;
2514 int i, work = 0;
2515
2516 if (!be_lock_busy_poll(eqo))
2517 return LL_FLUSH_BUSY;
2518
2519 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
2520 work = be_process_rx(rxo, napi, 4, BUSY_POLLING);
2521 if (work)
2522 break;
2523 }
2524
2525 be_unlock_busy_poll(eqo);
2526 return work;
2527}
2528#endif
2529
f67ef7ba 2530void be_detect_error(struct be_adapter *adapter)
7c185276 2531{
e1cfb67a
PR
2532 u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
2533 u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
7c185276 2534 u32 i;
eb0eecc1
SK
2535 bool error_detected = false;
2536 struct device *dev = &adapter->pdev->dev;
2537 struct net_device *netdev = adapter->netdev;
7c185276 2538
d23e946c 2539 if (be_hw_error(adapter))
72f02485
SP
2540 return;
2541
e1cfb67a
PR
2542 if (lancer_chip(adapter)) {
2543 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2544 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2545 sliport_err1 = ioread32(adapter->db +
748b539a 2546 SLIPORT_ERROR1_OFFSET);
e1cfb67a 2547 sliport_err2 = ioread32(adapter->db +
748b539a 2548 SLIPORT_ERROR2_OFFSET);
eb0eecc1
SK
2549 adapter->hw_error = true;
2550 /* Do not log error messages if its a FW reset */
2551 if (sliport_err1 == SLIPORT_ERROR_FW_RESET1 &&
2552 sliport_err2 == SLIPORT_ERROR_FW_RESET2) {
2553 dev_info(dev, "Firmware update in progress\n");
2554 } else {
2555 error_detected = true;
2556 dev_err(dev, "Error detected in the card\n");
2557 dev_err(dev, "ERR: sliport status 0x%x\n",
2558 sliport_status);
2559 dev_err(dev, "ERR: sliport error1 0x%x\n",
2560 sliport_err1);
2561 dev_err(dev, "ERR: sliport error2 0x%x\n",
2562 sliport_err2);
2563 }
e1cfb67a
PR
2564 }
2565 } else {
2566 pci_read_config_dword(adapter->pdev,
748b539a 2567 PCICFG_UE_STATUS_LOW, &ue_lo);
e1cfb67a 2568 pci_read_config_dword(adapter->pdev,
748b539a 2569 PCICFG_UE_STATUS_HIGH, &ue_hi);
e1cfb67a 2570 pci_read_config_dword(adapter->pdev,
748b539a 2571 PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask);
e1cfb67a 2572 pci_read_config_dword(adapter->pdev,
748b539a 2573 PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
e1cfb67a 2574
f67ef7ba
PR
2575 ue_lo = (ue_lo & ~ue_lo_mask);
2576 ue_hi = (ue_hi & ~ue_hi_mask);
7c185276 2577
eb0eecc1
SK
2578 /* On certain platforms BE hardware can indicate spurious UEs.
2579 * Allow HW to stop working completely in case of a real UE.
2580 * Hence not setting the hw_error for UE detection.
2581 */
f67ef7ba 2582
eb0eecc1
SK
2583 if (ue_lo || ue_hi) {
2584 error_detected = true;
2585 dev_err(dev,
2586 "Unrecoverable Error detected in the adapter");
2587 dev_err(dev, "Please reboot server to recover");
2588 if (skyhawk_chip(adapter))
2589 adapter->hw_error = true;
2590 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
2591 if (ue_lo & 1)
2592 dev_err(dev, "UE: %s bit set\n",
2593 ue_status_low_desc[i]);
2594 }
2595 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
2596 if (ue_hi & 1)
2597 dev_err(dev, "UE: %s bit set\n",
2598 ue_status_hi_desc[i]);
2599 }
7c185276
AK
2600 }
2601 }
eb0eecc1
SK
2602 if (error_detected)
2603 netif_carrier_off(netdev);
7c185276
AK
2604}
2605
8d56ff11
SP
2606static void be_msix_disable(struct be_adapter *adapter)
2607{
ac6a0c4a 2608 if (msix_enabled(adapter)) {
8d56ff11 2609 pci_disable_msix(adapter->pdev);
ac6a0c4a 2610 adapter->num_msix_vec = 0;
68d7bdcb 2611 adapter->num_msix_roce_vec = 0;
3abcdeda
SP
2612 }
2613}
2614
c2bba3df 2615static int be_msix_enable(struct be_adapter *adapter)
6b7c5b94 2616{
7dc4c064 2617 int i, num_vec;
d379142b 2618 struct device *dev = &adapter->pdev->dev;
6b7c5b94 2619
92bf14ab
SP
2620 /* If RoCE is supported, program the max number of NIC vectors that
2621 * may be configured via set-channels, along with vectors needed for
2622 * RoCe. Else, just program the number we'll use initially.
2623 */
2624 if (be_roce_supported(adapter))
2625 num_vec = min_t(int, 2 * be_max_eqs(adapter),
2626 2 * num_online_cpus());
2627 else
2628 num_vec = adapter->cfg_num_qs;
3abcdeda 2629
ac6a0c4a 2630 for (i = 0; i < num_vec; i++)
6b7c5b94
SP
2631 adapter->msix_entries[i].entry = i;
2632
7dc4c064
AG
2633 num_vec = pci_enable_msix_range(adapter->pdev, adapter->msix_entries,
2634 MIN_MSIX_VECTORS, num_vec);
2635 if (num_vec < 0)
2636 goto fail;
92bf14ab 2637
92bf14ab
SP
2638 if (be_roce_supported(adapter) && num_vec > MIN_MSIX_VECTORS) {
2639 adapter->num_msix_roce_vec = num_vec / 2;
2640 dev_info(dev, "enabled %d MSI-x vector(s) for RoCE\n",
2641 adapter->num_msix_roce_vec);
2642 }
2643
2644 adapter->num_msix_vec = num_vec - adapter->num_msix_roce_vec;
2645
2646 dev_info(dev, "enabled %d MSI-x vector(s) for NIC\n",
2647 adapter->num_msix_vec);
c2bba3df 2648 return 0;
7dc4c064
AG
2649
2650fail:
2651 dev_warn(dev, "MSIx enable failed\n");
2652
2653 /* INTx is not supported in VFs, so fail probe if enable_msix fails */
2654 if (!be_physfn(adapter))
2655 return num_vec;
2656 return 0;
6b7c5b94
SP
2657}
2658
fe6d2a38 2659static inline int be_msix_vec_get(struct be_adapter *adapter,
748b539a 2660 struct be_eq_obj *eqo)
b628bde2 2661{
f2f781a7 2662 return adapter->msix_entries[eqo->msix_idx].vector;
b628bde2 2663}
6b7c5b94 2664
b628bde2
SP
2665static int be_msix_register(struct be_adapter *adapter)
2666{
10ef9ab4
SP
2667 struct net_device *netdev = adapter->netdev;
2668 struct be_eq_obj *eqo;
2669 int status, i, vec;
6b7c5b94 2670
10ef9ab4
SP
2671 for_all_evt_queues(adapter, eqo, i) {
2672 sprintf(eqo->desc, "%s-q%d", netdev->name, i);
2673 vec = be_msix_vec_get(adapter, eqo);
2674 status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
3abcdeda
SP
2675 if (status)
2676 goto err_msix;
2677 }
b628bde2 2678
6b7c5b94 2679 return 0;
3abcdeda 2680err_msix:
10ef9ab4
SP
2681 for (i--, eqo = &adapter->eq_obj[i]; i >= 0; i--, eqo--)
2682 free_irq(be_msix_vec_get(adapter, eqo), eqo);
2683 dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
748b539a 2684 status);
ac6a0c4a 2685 be_msix_disable(adapter);
6b7c5b94
SP
2686 return status;
2687}
2688
2689static int be_irq_register(struct be_adapter *adapter)
2690{
2691 struct net_device *netdev = adapter->netdev;
2692 int status;
2693
ac6a0c4a 2694 if (msix_enabled(adapter)) {
6b7c5b94
SP
2695 status = be_msix_register(adapter);
2696 if (status == 0)
2697 goto done;
ba343c77
SB
2698 /* INTx is not supported for VF */
2699 if (!be_physfn(adapter))
2700 return status;
6b7c5b94
SP
2701 }
2702
e49cc34f 2703 /* INTx: only the first EQ is used */
6b7c5b94
SP
2704 netdev->irq = adapter->pdev->irq;
2705 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
e49cc34f 2706 &adapter->eq_obj[0]);
6b7c5b94
SP
2707 if (status) {
2708 dev_err(&adapter->pdev->dev,
2709 "INTx request IRQ failed - err %d\n", status);
2710 return status;
2711 }
2712done:
2713 adapter->isr_registered = true;
2714 return 0;
2715}
2716
2717static void be_irq_unregister(struct be_adapter *adapter)
2718{
2719 struct net_device *netdev = adapter->netdev;
10ef9ab4 2720 struct be_eq_obj *eqo;
3abcdeda 2721 int i;
6b7c5b94
SP
2722
2723 if (!adapter->isr_registered)
2724 return;
2725
2726 /* INTx */
ac6a0c4a 2727 if (!msix_enabled(adapter)) {
e49cc34f 2728 free_irq(netdev->irq, &adapter->eq_obj[0]);
6b7c5b94
SP
2729 goto done;
2730 }
2731
2732 /* MSIx */
10ef9ab4
SP
2733 for_all_evt_queues(adapter, eqo, i)
2734 free_irq(be_msix_vec_get(adapter, eqo), eqo);
3abcdeda 2735
6b7c5b94
SP
2736done:
2737 adapter->isr_registered = false;
6b7c5b94
SP
2738}
2739
10ef9ab4 2740static void be_rx_qs_destroy(struct be_adapter *adapter)
482c9e79
SP
2741{
2742 struct be_queue_info *q;
2743 struct be_rx_obj *rxo;
2744 int i;
2745
2746 for_all_rx_queues(adapter, rxo, i) {
2747 q = &rxo->q;
2748 if (q->created) {
2749 be_cmd_rxq_destroy(adapter, q);
10ef9ab4 2750 be_rx_cq_clean(rxo);
482c9e79 2751 }
10ef9ab4 2752 be_queue_free(adapter, q);
482c9e79
SP
2753 }
2754}
2755
889cd4b2
SP
2756static int be_close(struct net_device *netdev)
2757{
2758 struct be_adapter *adapter = netdev_priv(netdev);
10ef9ab4
SP
2759 struct be_eq_obj *eqo;
2760 int i;
889cd4b2 2761
e1ad8e33
KA
2762 /* This protection is needed as be_close() may be called even when the
2763 * adapter is in cleared state (after eeh perm failure)
2764 */
2765 if (!(adapter->flags & BE_FLAGS_SETUP_DONE))
2766 return 0;
2767
045508a8
PP
2768 be_roce_dev_close(adapter);
2769
dff345c5
IV
2770 if (adapter->flags & BE_FLAGS_NAPI_ENABLED) {
2771 for_all_evt_queues(adapter, eqo, i) {
04d3d624 2772 napi_disable(&eqo->napi);
6384a4d0
SP
2773 be_disable_busy_poll(eqo);
2774 }
71237b6f 2775 adapter->flags &= ~BE_FLAGS_NAPI_ENABLED;
04d3d624 2776 }
a323d9bf
SP
2777
2778 be_async_mcc_disable(adapter);
2779
2780 /* Wait for all pending tx completions to arrive so that
2781 * all tx skbs are freed.
2782 */
fba87559 2783 netif_tx_disable(netdev);
6e1f9975 2784 be_tx_compl_clean(adapter);
a323d9bf
SP
2785
2786 be_rx_qs_destroy(adapter);
2787
d11a347d
AK
2788 for (i = 1; i < (adapter->uc_macs + 1); i++)
2789 be_cmd_pmac_del(adapter, adapter->if_handle,
2790 adapter->pmac_id[i], 0);
2791 adapter->uc_macs = 0;
2792
a323d9bf 2793 for_all_evt_queues(adapter, eqo, i) {
10ef9ab4
SP
2794 if (msix_enabled(adapter))
2795 synchronize_irq(be_msix_vec_get(adapter, eqo));
2796 else
2797 synchronize_irq(netdev->irq);
2798 be_eq_clean(eqo);
63fcb27f
PR
2799 }
2800
889cd4b2
SP
2801 be_irq_unregister(adapter);
2802
482c9e79
SP
2803 return 0;
2804}
2805
10ef9ab4 2806static int be_rx_qs_create(struct be_adapter *adapter)
482c9e79
SP
2807{
2808 struct be_rx_obj *rxo;
e9008ee9 2809 int rc, i, j;
e2557877
VD
2810 u8 rss_hkey[RSS_HASH_KEY_LEN];
2811 struct rss_info *rss = &adapter->rss_info;
482c9e79
SP
2812
2813 for_all_rx_queues(adapter, rxo, i) {
10ef9ab4
SP
2814 rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
2815 sizeof(struct be_eth_rx_d));
2816 if (rc)
2817 return rc;
2818 }
2819
2820 /* The FW would like the default RXQ to be created first */
2821 rxo = default_rxo(adapter);
2822 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id, rx_frag_size,
2823 adapter->if_handle, false, &rxo->rss_id);
2824 if (rc)
2825 return rc;
2826
2827 for_all_rss_queues(adapter, rxo, i) {
482c9e79 2828 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
10ef9ab4
SP
2829 rx_frag_size, adapter->if_handle,
2830 true, &rxo->rss_id);
482c9e79
SP
2831 if (rc)
2832 return rc;
2833 }
2834
2835 if (be_multi_rxq(adapter)) {
e2557877
VD
2836 for (j = 0; j < RSS_INDIR_TABLE_LEN;
2837 j += adapter->num_rx_qs - 1) {
e9008ee9 2838 for_all_rss_queues(adapter, rxo, i) {
e2557877 2839 if ((j + i) >= RSS_INDIR_TABLE_LEN)
e9008ee9 2840 break;
e2557877
VD
2841 rss->rsstable[j + i] = rxo->rss_id;
2842 rss->rss_queue[j + i] = i;
e9008ee9
PR
2843 }
2844 }
e2557877
VD
2845 rss->rss_flags = RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_IPV4 |
2846 RSS_ENABLE_TCP_IPV6 | RSS_ENABLE_IPV6;
594ad54a
SR
2847
2848 if (!BEx_chip(adapter))
e2557877
VD
2849 rss->rss_flags |= RSS_ENABLE_UDP_IPV4 |
2850 RSS_ENABLE_UDP_IPV6;
da1388d6
VV
2851 } else {
2852 /* Disable RSS, if only default RX Q is created */
e2557877 2853 rss->rss_flags = RSS_ENABLE_NONE;
da1388d6 2854 }
594ad54a 2855
e2557877 2856 get_random_bytes(rss_hkey, RSS_HASH_KEY_LEN);
748b539a 2857 rc = be_cmd_rss_config(adapter, rss->rsstable, rss->rss_flags,
e2557877 2858 128, rss_hkey);
da1388d6 2859 if (rc) {
e2557877 2860 rss->rss_flags = RSS_ENABLE_NONE;
da1388d6 2861 return rc;
482c9e79
SP
2862 }
2863
e2557877
VD
2864 memcpy(rss->rss_hkey, rss_hkey, RSS_HASH_KEY_LEN);
2865
482c9e79 2866 /* First time posting */
10ef9ab4 2867 for_all_rx_queues(adapter, rxo, i)
482c9e79 2868 be_post_rx_frags(rxo, GFP_KERNEL);
889cd4b2
SP
2869 return 0;
2870}
2871
6b7c5b94
SP
2872static int be_open(struct net_device *netdev)
2873{
2874 struct be_adapter *adapter = netdev_priv(netdev);
10ef9ab4 2875 struct be_eq_obj *eqo;
3abcdeda 2876 struct be_rx_obj *rxo;
10ef9ab4 2877 struct be_tx_obj *txo;
b236916a 2878 u8 link_status;
3abcdeda 2879 int status, i;
5fb379ee 2880
10ef9ab4 2881 status = be_rx_qs_create(adapter);
482c9e79
SP
2882 if (status)
2883 goto err;
2884
c2bba3df
SK
2885 status = be_irq_register(adapter);
2886 if (status)
2887 goto err;
5fb379ee 2888
10ef9ab4 2889 for_all_rx_queues(adapter, rxo, i)
3abcdeda 2890 be_cq_notify(adapter, rxo->cq.id, true, 0);
5fb379ee 2891
10ef9ab4
SP
2892 for_all_tx_queues(adapter, txo, i)
2893 be_cq_notify(adapter, txo->cq.id, true, 0);
2894
7a1e9b20
SP
2895 be_async_mcc_enable(adapter);
2896
10ef9ab4
SP
2897 for_all_evt_queues(adapter, eqo, i) {
2898 napi_enable(&eqo->napi);
6384a4d0 2899 be_enable_busy_poll(eqo);
4cad9f3b 2900 be_eq_notify(adapter, eqo->q.id, true, true, 0);
10ef9ab4 2901 }
04d3d624 2902 adapter->flags |= BE_FLAGS_NAPI_ENABLED;
10ef9ab4 2903
323ff71e 2904 status = be_cmd_link_status_query(adapter, NULL, &link_status, 0);
b236916a
AK
2905 if (!status)
2906 be_link_status_update(adapter, link_status);
2907
fba87559 2908 netif_tx_start_all_queues(netdev);
045508a8 2909 be_roce_dev_open(adapter);
c9c47142 2910
c5abe7c0 2911#ifdef CONFIG_BE2NET_VXLAN
c9c47142
SP
2912 if (skyhawk_chip(adapter))
2913 vxlan_get_rx_port(netdev);
c5abe7c0
SP
2914#endif
2915
889cd4b2
SP
2916 return 0;
2917err:
2918 be_close(adapter->netdev);
2919 return -EIO;
5fb379ee
SP
2920}
2921
71d8d1b5
AK
2922static int be_setup_wol(struct be_adapter *adapter, bool enable)
2923{
2924 struct be_dma_mem cmd;
2925 int status = 0;
2926 u8 mac[ETH_ALEN];
2927
2928 memset(mac, 0, ETH_ALEN);
2929
2930 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
ede23fa8
JP
2931 cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2932 GFP_KERNEL);
71d8d1b5
AK
2933 if (cmd.va == NULL)
2934 return -1;
71d8d1b5
AK
2935
2936 if (enable) {
2937 status = pci_write_config_dword(adapter->pdev,
748b539a
SP
2938 PCICFG_PM_CONTROL_OFFSET,
2939 PCICFG_PM_CONTROL_MASK);
71d8d1b5
AK
2940 if (status) {
2941 dev_err(&adapter->pdev->dev,
2381a55c 2942 "Could not enable Wake-on-lan\n");
2b7bcebf
IV
2943 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2944 cmd.dma);
71d8d1b5
AK
2945 return status;
2946 }
2947 status = be_cmd_enable_magic_wol(adapter,
748b539a
SP
2948 adapter->netdev->dev_addr,
2949 &cmd);
71d8d1b5
AK
2950 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2951 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2952 } else {
2953 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2954 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2955 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2956 }
2957
2b7bcebf 2958 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
71d8d1b5
AK
2959 return status;
2960}
2961
6d87f5c3
AK
2962/*
2963 * Generate a seed MAC address from the PF MAC Address using jhash.
2964 * MAC Address for VFs are assigned incrementally starting from the seed.
2965 * These addresses are programmed in the ASIC by the PF and the VF driver
2966 * queries for the MAC address during its probe.
2967 */
4c876616 2968static int be_vf_eth_addr_config(struct be_adapter *adapter)
6d87f5c3 2969{
f9449ab7 2970 u32 vf;
3abcdeda 2971 int status = 0;
6d87f5c3 2972 u8 mac[ETH_ALEN];
11ac75ed 2973 struct be_vf_cfg *vf_cfg;
6d87f5c3
AK
2974
2975 be_vf_eth_addr_generate(adapter, mac);
2976
11ac75ed 2977 for_all_vfs(adapter, vf_cfg, vf) {
3175d8c2 2978 if (BEx_chip(adapter))
590c391d 2979 status = be_cmd_pmac_add(adapter, mac,
11ac75ed
SP
2980 vf_cfg->if_handle,
2981 &vf_cfg->pmac_id, vf + 1);
3175d8c2
SP
2982 else
2983 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
2984 vf + 1);
590c391d 2985
6d87f5c3
AK
2986 if (status)
2987 dev_err(&adapter->pdev->dev,
748b539a
SP
2988 "Mac address assignment failed for VF %d\n",
2989 vf);
6d87f5c3 2990 else
11ac75ed 2991 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
6d87f5c3
AK
2992
2993 mac[5] += 1;
2994 }
2995 return status;
2996}
2997
4c876616
SP
2998static int be_vfs_mac_query(struct be_adapter *adapter)
2999{
3000 int status, vf;
3001 u8 mac[ETH_ALEN];
3002 struct be_vf_cfg *vf_cfg;
4c876616
SP
3003
3004 for_all_vfs(adapter, vf_cfg, vf) {
b188f090
SR
3005 status = be_cmd_get_active_mac(adapter, vf_cfg->pmac_id,
3006 mac, vf_cfg->if_handle,
3007 false, vf+1);
4c876616
SP
3008 if (status)
3009 return status;
3010 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
3011 }
3012 return 0;
3013}
3014
f9449ab7 3015static void be_vf_clear(struct be_adapter *adapter)
6d87f5c3 3016{
11ac75ed 3017 struct be_vf_cfg *vf_cfg;
6d87f5c3
AK
3018 u32 vf;
3019
257a3feb 3020 if (pci_vfs_assigned(adapter->pdev)) {
4c876616
SP
3021 dev_warn(&adapter->pdev->dev,
3022 "VFs are assigned to VMs: not disabling VFs\n");
39f1d94d
SP
3023 goto done;
3024 }
3025
b4c1df93
SP
3026 pci_disable_sriov(adapter->pdev);
3027
11ac75ed 3028 for_all_vfs(adapter, vf_cfg, vf) {
3175d8c2 3029 if (BEx_chip(adapter))
11ac75ed
SP
3030 be_cmd_pmac_del(adapter, vf_cfg->if_handle,
3031 vf_cfg->pmac_id, vf + 1);
3175d8c2
SP
3032 else
3033 be_cmd_set_mac(adapter, NULL, vf_cfg->if_handle,
3034 vf + 1);
f9449ab7 3035
11ac75ed
SP
3036 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
3037 }
39f1d94d
SP
3038done:
3039 kfree(adapter->vf_cfg);
3040 adapter->num_vfs = 0;
6d87f5c3
AK
3041}
3042
7707133c
SP
3043static void be_clear_queues(struct be_adapter *adapter)
3044{
3045 be_mcc_queues_destroy(adapter);
3046 be_rx_cqs_destroy(adapter);
3047 be_tx_queues_destroy(adapter);
3048 be_evt_queues_destroy(adapter);
3049}
3050
68d7bdcb 3051static void be_cancel_worker(struct be_adapter *adapter)
a54769f5 3052{
191eb756
SP
3053 if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) {
3054 cancel_delayed_work_sync(&adapter->work);
3055 adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED;
3056 }
68d7bdcb
SP
3057}
3058
b05004ad 3059static void be_mac_clear(struct be_adapter *adapter)
68d7bdcb
SP
3060{
3061 int i;
3062
b05004ad
SK
3063 if (adapter->pmac_id) {
3064 for (i = 0; i < (adapter->uc_macs + 1); i++)
3065 be_cmd_pmac_del(adapter, adapter->if_handle,
3066 adapter->pmac_id[i], 0);
3067 adapter->uc_macs = 0;
3068
3069 kfree(adapter->pmac_id);
3070 adapter->pmac_id = NULL;
3071 }
3072}
3073
c5abe7c0 3074#ifdef CONFIG_BE2NET_VXLAN
c9c47142
SP
3075static void be_disable_vxlan_offloads(struct be_adapter *adapter)
3076{
3077 if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS)
3078 be_cmd_manage_iface(adapter, adapter->if_handle,
3079 OP_CONVERT_TUNNEL_TO_NORMAL);
3080
3081 if (adapter->vxlan_port)
3082 be_cmd_set_vxlan_port(adapter, 0);
3083
3084 adapter->flags &= ~BE_FLAGS_VXLAN_OFFLOADS;
3085 adapter->vxlan_port = 0;
3086}
c5abe7c0 3087#endif
c9c47142 3088
b05004ad
SK
3089static int be_clear(struct be_adapter *adapter)
3090{
68d7bdcb 3091 be_cancel_worker(adapter);
191eb756 3092
11ac75ed 3093 if (sriov_enabled(adapter))
f9449ab7
SP
3094 be_vf_clear(adapter);
3095
bec84e6b
VV
3096 /* Re-configure FW to distribute resources evenly across max-supported
3097 * number of VFs, only when VFs are not already enabled.
3098 */
3099 if (be_physfn(adapter) && !pci_vfs_assigned(adapter->pdev))
3100 be_cmd_set_sriov_config(adapter, adapter->pool_res,
3101 pci_sriov_get_totalvfs(adapter->pdev));
3102
c5abe7c0 3103#ifdef CONFIG_BE2NET_VXLAN
c9c47142 3104 be_disable_vxlan_offloads(adapter);
c5abe7c0 3105#endif
2d17f403 3106 /* delete the primary mac along with the uc-mac list */
b05004ad 3107 be_mac_clear(adapter);
fbc13f01 3108
f9449ab7 3109 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
a54769f5 3110
7707133c 3111 be_clear_queues(adapter);
a54769f5 3112
10ef9ab4 3113 be_msix_disable(adapter);
e1ad8e33 3114 adapter->flags &= ~BE_FLAGS_SETUP_DONE;
a54769f5
SP
3115 return 0;
3116}
3117
4c876616 3118static int be_vfs_if_create(struct be_adapter *adapter)
abb93951 3119{
92bf14ab 3120 struct be_resources res = {0};
4c876616
SP
3121 struct be_vf_cfg *vf_cfg;
3122 u32 cap_flags, en_flags, vf;
922bbe88 3123 int status = 0;
abb93951 3124
4c876616
SP
3125 cap_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
3126 BE_IF_FLAGS_MULTICAST;
abb93951 3127
4c876616 3128 for_all_vfs(adapter, vf_cfg, vf) {
92bf14ab
SP
3129 if (!BE3_chip(adapter)) {
3130 status = be_cmd_get_profile_config(adapter, &res,
3131 vf + 1);
3132 if (!status)
3133 cap_flags = res.if_cap_flags;
3134 }
4c876616
SP
3135
3136 /* If a FW profile exists, then cap_flags are updated */
3137 en_flags = cap_flags & (BE_IF_FLAGS_UNTAGGED |
748b539a
SP
3138 BE_IF_FLAGS_BROADCAST |
3139 BE_IF_FLAGS_MULTICAST);
3140 status =
3141 be_cmd_if_create(adapter, cap_flags, en_flags,
3142 &vf_cfg->if_handle, vf + 1);
4c876616
SP
3143 if (status)
3144 goto err;
3145 }
3146err:
3147 return status;
abb93951
PR
3148}
3149
39f1d94d 3150static int be_vf_setup_init(struct be_adapter *adapter)
30128031 3151{
11ac75ed 3152 struct be_vf_cfg *vf_cfg;
30128031
SP
3153 int vf;
3154
39f1d94d
SP
3155 adapter->vf_cfg = kcalloc(adapter->num_vfs, sizeof(*vf_cfg),
3156 GFP_KERNEL);
3157 if (!adapter->vf_cfg)
3158 return -ENOMEM;
3159
11ac75ed
SP
3160 for_all_vfs(adapter, vf_cfg, vf) {
3161 vf_cfg->if_handle = -1;
3162 vf_cfg->pmac_id = -1;
30128031 3163 }
39f1d94d 3164 return 0;
30128031
SP
3165}
3166
f9449ab7
SP
3167static int be_vf_setup(struct be_adapter *adapter)
3168{
c502224e 3169 struct device *dev = &adapter->pdev->dev;
11ac75ed 3170 struct be_vf_cfg *vf_cfg;
4c876616 3171 int status, old_vfs, vf;
04a06028 3172 u32 privileges;
39f1d94d 3173
257a3feb 3174 old_vfs = pci_num_vf(adapter->pdev);
39f1d94d
SP
3175
3176 status = be_vf_setup_init(adapter);
3177 if (status)
3178 goto err;
30128031 3179
4c876616
SP
3180 if (old_vfs) {
3181 for_all_vfs(adapter, vf_cfg, vf) {
3182 status = be_cmd_get_if_id(adapter, vf_cfg, vf);
3183 if (status)
3184 goto err;
3185 }
f9449ab7 3186
4c876616
SP
3187 status = be_vfs_mac_query(adapter);
3188 if (status)
3189 goto err;
3190 } else {
bec84e6b
VV
3191 status = be_vfs_if_create(adapter);
3192 if (status)
3193 goto err;
3194
39f1d94d
SP
3195 status = be_vf_eth_addr_config(adapter);
3196 if (status)
3197 goto err;
3198 }
f9449ab7 3199
11ac75ed 3200 for_all_vfs(adapter, vf_cfg, vf) {
04a06028
SP
3201 /* Allow VFs to programs MAC/VLAN filters */
3202 status = be_cmd_get_fn_privileges(adapter, &privileges, vf + 1);
3203 if (!status && !(privileges & BE_PRIV_FILTMGMT)) {
3204 status = be_cmd_set_fn_privileges(adapter,
3205 privileges |
3206 BE_PRIV_FILTMGMT,
3207 vf + 1);
3208 if (!status)
3209 dev_info(dev, "VF%d has FILTMGMT privilege\n",
3210 vf);
3211 }
3212
0f77ba73
RN
3213 /* Allow full available bandwidth */
3214 if (!old_vfs)
3215 be_cmd_config_qos(adapter, 0, 0, vf + 1);
f1f3ee1b 3216
bdce2ad7 3217 if (!old_vfs) {
0599863d 3218 be_cmd_enable_vf(adapter, vf + 1);
bdce2ad7
SR
3219 be_cmd_set_logical_link_config(adapter,
3220 IFLA_VF_LINK_STATE_AUTO,
3221 vf+1);
3222 }
f9449ab7 3223 }
b4c1df93
SP
3224
3225 if (!old_vfs) {
3226 status = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
3227 if (status) {
3228 dev_err(dev, "SRIOV enable failed\n");
3229 adapter->num_vfs = 0;
3230 goto err;
3231 }
3232 }
f9449ab7
SP
3233 return 0;
3234err:
4c876616
SP
3235 dev_err(dev, "VF setup failed\n");
3236 be_vf_clear(adapter);
f9449ab7
SP
3237 return status;
3238}
3239
f93f160b
VV
3240/* Converting function_mode bits on BE3 to SH mc_type enums */
3241
3242static u8 be_convert_mc_type(u32 function_mode)
3243{
66064dbc 3244 if (function_mode & VNIC_MODE && function_mode & QNQ_MODE)
f93f160b 3245 return vNIC1;
66064dbc 3246 else if (function_mode & QNQ_MODE)
f93f160b
VV
3247 return FLEX10;
3248 else if (function_mode & VNIC_MODE)
3249 return vNIC2;
3250 else if (function_mode & UMC_ENABLED)
3251 return UMC;
3252 else
3253 return MC_NONE;
3254}
3255
92bf14ab
SP
3256/* On BE2/BE3 FW does not suggest the supported limits */
3257static void BEx_get_resources(struct be_adapter *adapter,
3258 struct be_resources *res)
3259{
bec84e6b 3260 bool use_sriov = adapter->num_vfs ? 1 : 0;
92bf14ab
SP
3261
3262 if (be_physfn(adapter))
3263 res->max_uc_mac = BE_UC_PMAC_COUNT;
3264 else
3265 res->max_uc_mac = BE_VF_UC_PMAC_COUNT;
3266
f93f160b
VV
3267 adapter->mc_type = be_convert_mc_type(adapter->function_mode);
3268
3269 if (be_is_mc(adapter)) {
3270 /* Assuming that there are 4 channels per port,
3271 * when multi-channel is enabled
3272 */
3273 if (be_is_qnq_mode(adapter))
3274 res->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
3275 else
3276 /* In a non-qnq multichannel mode, the pvid
3277 * takes up one vlan entry
3278 */
3279 res->max_vlans = (BE_NUM_VLANS_SUPPORTED / 4) - 1;
3280 } else {
92bf14ab 3281 res->max_vlans = BE_NUM_VLANS_SUPPORTED;
f93f160b
VV
3282 }
3283
92bf14ab
SP
3284 res->max_mcast_mac = BE_MAX_MC;
3285
a5243dab
VV
3286 /* 1) For BE3 1Gb ports, FW does not support multiple TXQs
3287 * 2) Create multiple TX rings on a BE3-R multi-channel interface
3288 * *only* if it is RSS-capable.
3289 */
3290 if (BE2_chip(adapter) || use_sriov || (adapter->port_num > 1) ||
3291 !be_physfn(adapter) || (be_is_mc(adapter) &&
3292 !(adapter->function_caps & BE_FUNCTION_CAPS_RSS)))
92bf14ab
SP
3293 res->max_tx_qs = 1;
3294 else
3295 res->max_tx_qs = BE3_MAX_TX_QS;
3296
3297 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
3298 !use_sriov && be_physfn(adapter))
3299 res->max_rss_qs = (adapter->be3_native) ?
3300 BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
3301 res->max_rx_qs = res->max_rss_qs + 1;
3302
e3dc867c 3303 if (be_physfn(adapter))
ecf1f6e1 3304 res->max_evt_qs = (res->max_vfs > 0) ?
e3dc867c
SR
3305 BE3_SRIOV_MAX_EVT_QS : BE3_MAX_EVT_QS;
3306 else
3307 res->max_evt_qs = 1;
92bf14ab
SP
3308
3309 res->if_cap_flags = BE_IF_CAP_FLAGS_WANT;
3310 if (!(adapter->function_caps & BE_FUNCTION_CAPS_RSS))
3311 res->if_cap_flags &= ~BE_IF_FLAGS_RSS;
3312}
3313
30128031
SP
3314static void be_setup_init(struct be_adapter *adapter)
3315{
3316 adapter->vlan_prio_bmap = 0xff;
42f11cf2 3317 adapter->phy.link_speed = -1;
30128031
SP
3318 adapter->if_handle = -1;
3319 adapter->be3_native = false;
3320 adapter->promiscuous = false;
f25b119c
PR
3321 if (be_physfn(adapter))
3322 adapter->cmd_privileges = MAX_PRIVILEGES;
3323 else
3324 adapter->cmd_privileges = MIN_PRIVILEGES;
30128031
SP
3325}
3326
bec84e6b
VV
3327static int be_get_sriov_config(struct be_adapter *adapter)
3328{
3329 struct device *dev = &adapter->pdev->dev;
3330 struct be_resources res = {0};
3331 int status, max_vfs, old_vfs;
3332
3333 status = be_cmd_get_profile_config(adapter, &res, 0);
3334 if (status)
3335 return status;
3336
3337 adapter->pool_res = res;
3338
3339 /* Some old versions of BE3 FW don't report max_vfs value */
3340 if (BE3_chip(adapter) && !res.max_vfs) {
3341 max_vfs = pci_sriov_get_totalvfs(adapter->pdev);
3342 res.max_vfs = max_vfs > 0 ? min(MAX_VFS, max_vfs) : 0;
3343 }
3344
3345 adapter->pool_res.max_vfs = res.max_vfs;
3346 pci_sriov_set_totalvfs(adapter->pdev, be_max_vfs(adapter));
3347
3348 if (!be_max_vfs(adapter)) {
3349 if (num_vfs)
3350 dev_warn(dev, "device doesn't support SRIOV\n");
3351 adapter->num_vfs = 0;
3352 return 0;
3353 }
3354
3355 /* validate num_vfs module param */
3356 old_vfs = pci_num_vf(adapter->pdev);
3357 if (old_vfs) {
3358 dev_info(dev, "%d VFs are already enabled\n", old_vfs);
3359 if (old_vfs != num_vfs)
3360 dev_warn(dev, "Ignoring num_vfs=%d setting\n", num_vfs);
3361 adapter->num_vfs = old_vfs;
3362 } else {
3363 if (num_vfs > be_max_vfs(adapter)) {
3364 dev_info(dev, "Resources unavailable to init %d VFs\n",
3365 num_vfs);
3366 dev_info(dev, "Limiting to %d VFs\n",
3367 be_max_vfs(adapter));
3368 }
3369 adapter->num_vfs = min_t(u16, num_vfs, be_max_vfs(adapter));
3370 }
3371
3372 return 0;
3373}
3374
92bf14ab 3375static int be_get_resources(struct be_adapter *adapter)
abb93951 3376{
92bf14ab
SP
3377 struct device *dev = &adapter->pdev->dev;
3378 struct be_resources res = {0};
3379 int status;
abb93951 3380
92bf14ab
SP
3381 if (BEx_chip(adapter)) {
3382 BEx_get_resources(adapter, &res);
3383 adapter->res = res;
abb93951
PR
3384 }
3385
92bf14ab
SP
3386 /* For Lancer, SH etc read per-function resource limits from FW.
3387 * GET_FUNC_CONFIG returns per function guaranteed limits.
3388 * GET_PROFILE_CONFIG returns PCI-E related limits PF-pool limits
3389 */
3390 if (!BEx_chip(adapter)) {
3391 status = be_cmd_get_func_config(adapter, &res);
3392 if (status)
3393 return status;
abb93951 3394
92bf14ab
SP
3395 /* If RoCE may be enabled stash away half the EQs for RoCE */
3396 if (be_roce_supported(adapter))
3397 res.max_evt_qs /= 2;
3398 adapter->res = res;
abb93951 3399
92bf14ab
SP
3400 dev_info(dev, "Max: txqs %d, rxqs %d, rss %d, eqs %d, vfs %d\n",
3401 be_max_txqs(adapter), be_max_rxqs(adapter),
3402 be_max_rss(adapter), be_max_eqs(adapter),
3403 be_max_vfs(adapter));
3404 dev_info(dev, "Max: uc-macs %d, mc-macs %d, vlans %d\n",
3405 be_max_uc(adapter), be_max_mc(adapter),
3406 be_max_vlans(adapter));
abb93951 3407 }
4c876616 3408
92bf14ab 3409 return 0;
abb93951
PR
3410}
3411
39f1d94d
SP
3412static int be_get_config(struct be_adapter *adapter)
3413{
542963b7 3414 u16 profile_id;
4c876616 3415 int status;
39f1d94d 3416
abb93951
PR
3417 status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
3418 &adapter->function_mode,
0ad3157e
VV
3419 &adapter->function_caps,
3420 &adapter->asic_rev);
abb93951 3421 if (status)
92bf14ab 3422 return status;
abb93951 3423
542963b7
VV
3424 if (be_physfn(adapter)) {
3425 status = be_cmd_get_active_profile(adapter, &profile_id);
3426 if (!status)
3427 dev_info(&adapter->pdev->dev,
3428 "Using profile 0x%x\n", profile_id);
bec84e6b
VV
3429
3430 status = be_get_sriov_config(adapter);
3431 if (status)
3432 return status;
3433
3434 /* When the HW is in SRIOV capable configuration, the PF-pool
3435 * resources are equally distributed across the max-number of
3436 * VFs. The user may request only a subset of the max-vfs to be
3437 * enabled. Based on num_vfs, redistribute the resources across
3438 * num_vfs so that each VF will have access to more number of
3439 * resources. This facility is not available in BE3 FW.
3440 * Also, this is done by FW in Lancer chip.
3441 */
3442 if (!pci_num_vf(adapter->pdev)) {
3443 status = be_cmd_set_sriov_config(adapter,
3444 adapter->pool_res,
3445 adapter->num_vfs);
3446 if (status)
3447 return status;
3448 }
542963b7
VV
3449 }
3450
92bf14ab
SP
3451 status = be_get_resources(adapter);
3452 if (status)
3453 return status;
abb93951 3454
46ee9c14
RN
3455 adapter->pmac_id = kcalloc(be_max_uc(adapter),
3456 sizeof(*adapter->pmac_id), GFP_KERNEL);
92bf14ab
SP
3457 if (!adapter->pmac_id)
3458 return -ENOMEM;
abb93951 3459
92bf14ab
SP
3460 /* Sanitize cfg_num_qs based on HW and platform limits */
3461 adapter->cfg_num_qs = min(adapter->cfg_num_qs, be_max_qs(adapter));
3462
3463 return 0;
39f1d94d
SP
3464}
3465
95046b92
SP
3466static int be_mac_setup(struct be_adapter *adapter)
3467{
3468 u8 mac[ETH_ALEN];
3469 int status;
3470
3471 if (is_zero_ether_addr(adapter->netdev->dev_addr)) {
3472 status = be_cmd_get_perm_mac(adapter, mac);
3473 if (status)
3474 return status;
3475
3476 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
3477 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
3478 } else {
3479 /* Maybe the HW was reset; dev_addr must be re-programmed */
3480 memcpy(mac, adapter->netdev->dev_addr, ETH_ALEN);
3481 }
3482
2c7a9dc1
AK
3483 /* For BE3-R VFs, the PF programs the initial MAC address */
3484 if (!(BEx_chip(adapter) && be_virtfn(adapter)))
3485 be_cmd_pmac_add(adapter, mac, adapter->if_handle,
3486 &adapter->pmac_id[0], 0);
95046b92
SP
3487 return 0;
3488}
3489
68d7bdcb
SP
3490static void be_schedule_worker(struct be_adapter *adapter)
3491{
3492 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
3493 adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
3494}
3495
7707133c 3496static int be_setup_queues(struct be_adapter *adapter)
5fb379ee 3497{
68d7bdcb 3498 struct net_device *netdev = adapter->netdev;
10ef9ab4 3499 int status;
ba343c77 3500
7707133c 3501 status = be_evt_queues_create(adapter);
abb93951
PR
3502 if (status)
3503 goto err;
73d540f2 3504
7707133c 3505 status = be_tx_qs_create(adapter);
c2bba3df
SK
3506 if (status)
3507 goto err;
10ef9ab4 3508
7707133c 3509 status = be_rx_cqs_create(adapter);
10ef9ab4 3510 if (status)
a54769f5 3511 goto err;
6b7c5b94 3512
7707133c 3513 status = be_mcc_queues_create(adapter);
10ef9ab4
SP
3514 if (status)
3515 goto err;
3516
68d7bdcb
SP
3517 status = netif_set_real_num_rx_queues(netdev, adapter->num_rx_qs);
3518 if (status)
3519 goto err;
3520
3521 status = netif_set_real_num_tx_queues(netdev, adapter->num_tx_qs);
3522 if (status)
3523 goto err;
3524
7707133c
SP
3525 return 0;
3526err:
3527 dev_err(&adapter->pdev->dev, "queue_setup failed\n");
3528 return status;
3529}
3530
68d7bdcb
SP
3531int be_update_queues(struct be_adapter *adapter)
3532{
3533 struct net_device *netdev = adapter->netdev;
3534 int status;
3535
3536 if (netif_running(netdev))
3537 be_close(netdev);
3538
3539 be_cancel_worker(adapter);
3540
3541 /* If any vectors have been shared with RoCE we cannot re-program
3542 * the MSIx table.
3543 */
3544 if (!adapter->num_msix_roce_vec)
3545 be_msix_disable(adapter);
3546
3547 be_clear_queues(adapter);
3548
3549 if (!msix_enabled(adapter)) {
3550 status = be_msix_enable(adapter);
3551 if (status)
3552 return status;
3553 }
3554
3555 status = be_setup_queues(adapter);
3556 if (status)
3557 return status;
3558
3559 be_schedule_worker(adapter);
3560
3561 if (netif_running(netdev))
3562 status = be_open(netdev);
3563
3564 return status;
3565}
3566
7707133c
SP
3567static int be_setup(struct be_adapter *adapter)
3568{
3569 struct device *dev = &adapter->pdev->dev;
3570 u32 tx_fc, rx_fc, en_flags;
3571 int status;
3572
3573 be_setup_init(adapter);
3574
3575 if (!lancer_chip(adapter))
3576 be_cmd_req_native_mode(adapter);
3577
3578 status = be_get_config(adapter);
10ef9ab4 3579 if (status)
a54769f5 3580 goto err;
6b7c5b94 3581
7707133c 3582 status = be_msix_enable(adapter);
10ef9ab4 3583 if (status)
a54769f5 3584 goto err;
6b7c5b94 3585
f9449ab7 3586 en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
7707133c 3587 BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
abb93951 3588 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS)
f9449ab7 3589 en_flags |= BE_IF_FLAGS_RSS;
92bf14ab
SP
3590 en_flags = en_flags & be_if_cap_flags(adapter);
3591 status = be_cmd_if_create(adapter, be_if_cap_flags(adapter), en_flags,
1578e777 3592 &adapter->if_handle, 0);
7707133c 3593 if (status)
a54769f5 3594 goto err;
6b7c5b94 3595
68d7bdcb
SP
3596 /* Updating real_num_tx/rx_queues() requires rtnl_lock() */
3597 rtnl_lock();
7707133c 3598 status = be_setup_queues(adapter);
68d7bdcb 3599 rtnl_unlock();
95046b92 3600 if (status)
1578e777
PR
3601 goto err;
3602
7707133c 3603 be_cmd_get_fn_privileges(adapter, &adapter->cmd_privileges, 0);
7707133c
SP
3604
3605 status = be_mac_setup(adapter);
10ef9ab4
SP
3606 if (status)
3607 goto err;
3608
eeb65ced 3609 be_cmd_get_fw_ver(adapter, adapter->fw_ver, adapter->fw_on_flash);
5a56eb10 3610
e9e2a904
SK
3611 if (BE2_chip(adapter) && fw_major_num(adapter->fw_ver) < 4) {
3612 dev_err(dev, "Firmware on card is old(%s), IRQs may not work.",
3613 adapter->fw_ver);
3614 dev_err(dev, "Please upgrade firmware to version >= 4.0\n");
3615 }
3616
1d1e9a46 3617 if (adapter->vlans_added)
10329df8 3618 be_vid_config(adapter);
7ab8b0b4 3619
a54769f5 3620 be_set_rx_mode(adapter->netdev);
5fb379ee 3621
76a9e08e
SR
3622 be_cmd_get_acpi_wol_cap(adapter);
3623
ddc3f5cb 3624 be_cmd_get_flow_control(adapter, &tx_fc, &rx_fc);
590c391d 3625
ddc3f5cb
AK
3626 if (rx_fc != adapter->rx_fc || tx_fc != adapter->tx_fc)
3627 be_cmd_set_flow_control(adapter, adapter->tx_fc,
a54769f5 3628 adapter->rx_fc);
2dc1deb6 3629
bdce2ad7
SR
3630 if (be_physfn(adapter))
3631 be_cmd_set_logical_link_config(adapter,
3632 IFLA_VF_LINK_STATE_AUTO, 0);
3633
bec84e6b
VV
3634 if (adapter->num_vfs)
3635 be_vf_setup(adapter);
f9449ab7 3636
f25b119c
PR
3637 status = be_cmd_get_phy_info(adapter);
3638 if (!status && be_pause_supported(adapter))
42f11cf2
AK
3639 adapter->phy.fc_autoneg = 1;
3640
68d7bdcb 3641 be_schedule_worker(adapter);
e1ad8e33 3642 adapter->flags |= BE_FLAGS_SETUP_DONE;
f9449ab7 3643 return 0;
a54769f5
SP
3644err:
3645 be_clear(adapter);
3646 return status;
3647}
6b7c5b94 3648
66268739
IV
3649#ifdef CONFIG_NET_POLL_CONTROLLER
3650static void be_netpoll(struct net_device *netdev)
3651{
3652 struct be_adapter *adapter = netdev_priv(netdev);
10ef9ab4 3653 struct be_eq_obj *eqo;
66268739
IV
3654 int i;
3655
e49cc34f
SP
3656 for_all_evt_queues(adapter, eqo, i) {
3657 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
3658 napi_schedule(&eqo->napi);
3659 }
10ef9ab4
SP
3660
3661 return;
66268739
IV
3662}
3663#endif
3664
96c9b2e4 3665static char flash_cookie[2][16] = {"*** SE FLAS", "H DIRECTORY *** "};
fa9a6fed 3666
306f1348
SP
3667static bool phy_flashing_required(struct be_adapter *adapter)
3668{
42f11cf2
AK
3669 return (adapter->phy.phy_type == TN_8022 &&
3670 adapter->phy.interface_type == PHY_TYPE_BASET_10GB);
306f1348
SP
3671}
3672
c165541e
PR
3673static bool is_comp_in_ufi(struct be_adapter *adapter,
3674 struct flash_section_info *fsec, int type)
3675{
3676 int i = 0, img_type = 0;
3677 struct flash_section_info_g2 *fsec_g2 = NULL;
3678
ca34fe38 3679 if (BE2_chip(adapter))
c165541e
PR
3680 fsec_g2 = (struct flash_section_info_g2 *)fsec;
3681
3682 for (i = 0; i < MAX_FLASH_COMP; i++) {
3683 if (fsec_g2)
3684 img_type = le32_to_cpu(fsec_g2->fsec_entry[i].type);
3685 else
3686 img_type = le32_to_cpu(fsec->fsec_entry[i].type);
3687
3688 if (img_type == type)
3689 return true;
3690 }
3691 return false;
3692
3693}
3694
4188e7df 3695static struct flash_section_info *get_fsec_info(struct be_adapter *adapter,
748b539a
SP
3696 int header_size,
3697 const struct firmware *fw)
c165541e
PR
3698{
3699 struct flash_section_info *fsec = NULL;
3700 const u8 *p = fw->data;
3701
3702 p += header_size;
3703 while (p < (fw->data + fw->size)) {
3704 fsec = (struct flash_section_info *)p;
3705 if (!memcmp(flash_cookie, fsec->cookie, sizeof(flash_cookie)))
3706 return fsec;
3707 p += 32;
3708 }
3709 return NULL;
3710}
3711
96c9b2e4
VV
3712static int be_check_flash_crc(struct be_adapter *adapter, const u8 *p,
3713 u32 img_offset, u32 img_size, int hdr_size,
3714 u16 img_optype, bool *crc_match)
3715{
3716 u32 crc_offset;
3717 int status;
3718 u8 crc[4];
3719
3720 status = be_cmd_get_flash_crc(adapter, crc, img_optype, img_size - 4);
3721 if (status)
3722 return status;
3723
3724 crc_offset = hdr_size + img_offset + img_size - 4;
3725
3726 /* Skip flashing, if crc of flashed region matches */
3727 if (!memcmp(crc, p + crc_offset, 4))
3728 *crc_match = true;
3729 else
3730 *crc_match = false;
3731
3732 return status;
3733}
3734
773a2d7c 3735static int be_flash(struct be_adapter *adapter, const u8 *img,
748b539a 3736 struct be_dma_mem *flash_cmd, int optype, int img_size)
773a2d7c 3737{
773a2d7c 3738 struct be_cmd_write_flashrom *req = flash_cmd->va;
96c9b2e4
VV
3739 u32 total_bytes, flash_op, num_bytes;
3740 int status;
773a2d7c
PR
3741
3742 total_bytes = img_size;
3743 while (total_bytes) {
3744 num_bytes = min_t(u32, 32*1024, total_bytes);
3745
3746 total_bytes -= num_bytes;
3747
3748 if (!total_bytes) {
3749 if (optype == OPTYPE_PHY_FW)
3750 flash_op = FLASHROM_OPER_PHY_FLASH;
3751 else
3752 flash_op = FLASHROM_OPER_FLASH;
3753 } else {
3754 if (optype == OPTYPE_PHY_FW)
3755 flash_op = FLASHROM_OPER_PHY_SAVE;
3756 else
3757 flash_op = FLASHROM_OPER_SAVE;
3758 }
3759
be716446 3760 memcpy(req->data_buf, img, num_bytes);
773a2d7c
PR
3761 img += num_bytes;
3762 status = be_cmd_write_flashrom(adapter, flash_cmd, optype,
748b539a 3763 flash_op, num_bytes);
4c60005f 3764 if (base_status(status) == MCC_STATUS_ILLEGAL_REQUEST &&
96c9b2e4
VV
3765 optype == OPTYPE_PHY_FW)
3766 break;
3767 else if (status)
773a2d7c 3768 return status;
773a2d7c
PR
3769 }
3770 return 0;
3771}
3772
0ad3157e 3773/* For BE2, BE3 and BE3-R */
ca34fe38 3774static int be_flash_BEx(struct be_adapter *adapter,
748b539a
SP
3775 const struct firmware *fw,
3776 struct be_dma_mem *flash_cmd, int num_of_images)
84517482 3777{
c165541e 3778 int img_hdrs_size = (num_of_images * sizeof(struct image_hdr));
96c9b2e4 3779 struct device *dev = &adapter->pdev->dev;
c165541e 3780 struct flash_section_info *fsec = NULL;
96c9b2e4
VV
3781 int status, i, filehdr_size, num_comp;
3782 const struct flash_comp *pflashcomp;
3783 bool crc_match;
3784 const u8 *p;
c165541e
PR
3785
3786 struct flash_comp gen3_flash_types[] = {
3787 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, OPTYPE_ISCSI_ACTIVE,
3788 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_iSCSI},
3789 { FLASH_REDBOOT_START_g3, OPTYPE_REDBOOT,
3790 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3, IMAGE_BOOT_CODE},
3791 { FLASH_iSCSI_BIOS_START_g3, OPTYPE_BIOS,
3792 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_ISCSI},
3793 { FLASH_PXE_BIOS_START_g3, OPTYPE_PXE_BIOS,
3794 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_PXE},
3795 { FLASH_FCoE_BIOS_START_g3, OPTYPE_FCOE_BIOS,
3796 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_FCoE},
3797 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, OPTYPE_ISCSI_BACKUP,
3798 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_iSCSI},
3799 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, OPTYPE_FCOE_FW_ACTIVE,
3800 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_FCoE},
3801 { FLASH_FCoE_BACKUP_IMAGE_START_g3, OPTYPE_FCOE_FW_BACKUP,
3802 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_FCoE},
3803 { FLASH_NCSI_START_g3, OPTYPE_NCSI_FW,
3804 FLASH_NCSI_IMAGE_MAX_SIZE_g3, IMAGE_NCSI},
3805 { FLASH_PHY_FW_START_g3, OPTYPE_PHY_FW,
3806 FLASH_PHY_FW_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_PHY}
3f0d4560 3807 };
c165541e
PR
3808
3809 struct flash_comp gen2_flash_types[] = {
3810 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, OPTYPE_ISCSI_ACTIVE,
3811 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_iSCSI},
3812 { FLASH_REDBOOT_START_g2, OPTYPE_REDBOOT,
3813 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2, IMAGE_BOOT_CODE},
3814 { FLASH_iSCSI_BIOS_START_g2, OPTYPE_BIOS,
3815 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_ISCSI},
3816 { FLASH_PXE_BIOS_START_g2, OPTYPE_PXE_BIOS,
3817 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_PXE},
3818 { FLASH_FCoE_BIOS_START_g2, OPTYPE_FCOE_BIOS,
3819 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_FCoE},
3820 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, OPTYPE_ISCSI_BACKUP,
3821 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_iSCSI},
3822 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, OPTYPE_FCOE_FW_ACTIVE,
3823 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_FCoE},
3824 { FLASH_FCoE_BACKUP_IMAGE_START_g2, OPTYPE_FCOE_FW_BACKUP,
3825 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_FCoE}
3f0d4560
AK
3826 };
3827
ca34fe38 3828 if (BE3_chip(adapter)) {
3f0d4560
AK
3829 pflashcomp = gen3_flash_types;
3830 filehdr_size = sizeof(struct flash_file_hdr_g3);
215faf9c 3831 num_comp = ARRAY_SIZE(gen3_flash_types);
3f0d4560
AK
3832 } else {
3833 pflashcomp = gen2_flash_types;
3834 filehdr_size = sizeof(struct flash_file_hdr_g2);
215faf9c 3835 num_comp = ARRAY_SIZE(gen2_flash_types);
84517482 3836 }
ca34fe38 3837
c165541e
PR
3838 /* Get flash section info*/
3839 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3840 if (!fsec) {
96c9b2e4 3841 dev_err(dev, "Invalid Cookie. FW image may be corrupted\n");
c165541e
PR
3842 return -1;
3843 }
9fe96934 3844 for (i = 0; i < num_comp; i++) {
c165541e 3845 if (!is_comp_in_ufi(adapter, fsec, pflashcomp[i].img_type))
9fe96934 3846 continue;
c165541e
PR
3847
3848 if ((pflashcomp[i].optype == OPTYPE_NCSI_FW) &&
3849 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
3850 continue;
3851
773a2d7c
PR
3852 if (pflashcomp[i].optype == OPTYPE_PHY_FW &&
3853 !phy_flashing_required(adapter))
306f1348 3854 continue;
c165541e 3855
773a2d7c 3856 if (pflashcomp[i].optype == OPTYPE_REDBOOT) {
96c9b2e4
VV
3857 status = be_check_flash_crc(adapter, fw->data,
3858 pflashcomp[i].offset,
3859 pflashcomp[i].size,
3860 filehdr_size +
3861 img_hdrs_size,
3862 OPTYPE_REDBOOT, &crc_match);
3863 if (status) {
3864 dev_err(dev,
3865 "Could not get CRC for 0x%x region\n",
3866 pflashcomp[i].optype);
3867 continue;
3868 }
3869
3870 if (crc_match)
773a2d7c
PR
3871 continue;
3872 }
c165541e 3873
96c9b2e4
VV
3874 p = fw->data + filehdr_size + pflashcomp[i].offset +
3875 img_hdrs_size;
306f1348
SP
3876 if (p + pflashcomp[i].size > fw->data + fw->size)
3877 return -1;
773a2d7c
PR
3878
3879 status = be_flash(adapter, p, flash_cmd, pflashcomp[i].optype,
748b539a 3880 pflashcomp[i].size);
773a2d7c 3881 if (status) {
96c9b2e4 3882 dev_err(dev, "Flashing section type 0x%x failed\n",
773a2d7c
PR
3883 pflashcomp[i].img_type);
3884 return status;
84517482 3885 }
84517482 3886 }
84517482
AK
3887 return 0;
3888}
3889
96c9b2e4
VV
3890static u16 be_get_img_optype(struct flash_section_entry fsec_entry)
3891{
3892 u32 img_type = le32_to_cpu(fsec_entry.type);
3893 u16 img_optype = le16_to_cpu(fsec_entry.optype);
3894
3895 if (img_optype != 0xFFFF)
3896 return img_optype;
3897
3898 switch (img_type) {
3899 case IMAGE_FIRMWARE_iSCSI:
3900 img_optype = OPTYPE_ISCSI_ACTIVE;
3901 break;
3902 case IMAGE_BOOT_CODE:
3903 img_optype = OPTYPE_REDBOOT;
3904 break;
3905 case IMAGE_OPTION_ROM_ISCSI:
3906 img_optype = OPTYPE_BIOS;
3907 break;
3908 case IMAGE_OPTION_ROM_PXE:
3909 img_optype = OPTYPE_PXE_BIOS;
3910 break;
3911 case IMAGE_OPTION_ROM_FCoE:
3912 img_optype = OPTYPE_FCOE_BIOS;
3913 break;
3914 case IMAGE_FIRMWARE_BACKUP_iSCSI:
3915 img_optype = OPTYPE_ISCSI_BACKUP;
3916 break;
3917 case IMAGE_NCSI:
3918 img_optype = OPTYPE_NCSI_FW;
3919 break;
3920 case IMAGE_FLASHISM_JUMPVECTOR:
3921 img_optype = OPTYPE_FLASHISM_JUMPVECTOR;
3922 break;
3923 case IMAGE_FIRMWARE_PHY:
3924 img_optype = OPTYPE_SH_PHY_FW;
3925 break;
3926 case IMAGE_REDBOOT_DIR:
3927 img_optype = OPTYPE_REDBOOT_DIR;
3928 break;
3929 case IMAGE_REDBOOT_CONFIG:
3930 img_optype = OPTYPE_REDBOOT_CONFIG;
3931 break;
3932 case IMAGE_UFI_DIR:
3933 img_optype = OPTYPE_UFI_DIR;
3934 break;
3935 default:
3936 break;
3937 }
3938
3939 return img_optype;
3940}
3941
773a2d7c 3942static int be_flash_skyhawk(struct be_adapter *adapter,
748b539a
SP
3943 const struct firmware *fw,
3944 struct be_dma_mem *flash_cmd, int num_of_images)
3f0d4560 3945{
773a2d7c 3946 int img_hdrs_size = num_of_images * sizeof(struct image_hdr);
96c9b2e4 3947 struct device *dev = &adapter->pdev->dev;
773a2d7c 3948 struct flash_section_info *fsec = NULL;
96c9b2e4
VV
3949 u32 img_offset, img_size, img_type;
3950 int status, i, filehdr_size;
3951 bool crc_match, old_fw_img;
3952 u16 img_optype;
3953 const u8 *p;
773a2d7c
PR
3954
3955 filehdr_size = sizeof(struct flash_file_hdr_g3);
3956 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3957 if (!fsec) {
96c9b2e4 3958 dev_err(dev, "Invalid Cookie. FW image may be corrupted\n");
773a2d7c
PR
3959 return -1;
3960 }
3961
3962 for (i = 0; i < le32_to_cpu(fsec->fsec_hdr.num_images); i++) {
3963 img_offset = le32_to_cpu(fsec->fsec_entry[i].offset);
3964 img_size = le32_to_cpu(fsec->fsec_entry[i].pad_size);
96c9b2e4
VV
3965 img_type = le32_to_cpu(fsec->fsec_entry[i].type);
3966 img_optype = be_get_img_optype(fsec->fsec_entry[i]);
3967 old_fw_img = fsec->fsec_entry[i].optype == 0xFFFF;
773a2d7c 3968
96c9b2e4 3969 if (img_optype == 0xFFFF)
773a2d7c 3970 continue;
96c9b2e4
VV
3971 /* Don't bother verifying CRC if an old FW image is being
3972 * flashed
3973 */
3974 if (old_fw_img)
3975 goto flash;
3976
3977 status = be_check_flash_crc(adapter, fw->data, img_offset,
3978 img_size, filehdr_size +
3979 img_hdrs_size, img_optype,
3980 &crc_match);
3981 /* The current FW image on the card does not recognize the new
3982 * FLASH op_type. The FW download is partially complete.
3983 * Reboot the server now to enable FW image to recognize the
3984 * new FLASH op_type. To complete the remaining process,
3985 * download the same FW again after the reboot.
3986 */
4c60005f
KA
3987 if (base_status(status) == MCC_STATUS_ILLEGAL_REQUEST ||
3988 base_status(status) == MCC_STATUS_ILLEGAL_FIELD) {
96c9b2e4
VV
3989 dev_err(dev, "Flash incomplete. Reset the server\n");
3990 dev_err(dev, "Download FW image again after reset\n");
3991 return -EAGAIN;
3992 } else if (status) {
3993 dev_err(dev, "Could not get CRC for 0x%x region\n",
3994 img_optype);
3995 return -EFAULT;
773a2d7c
PR
3996 }
3997
96c9b2e4
VV
3998 if (crc_match)
3999 continue;
773a2d7c 4000
96c9b2e4
VV
4001flash:
4002 p = fw->data + filehdr_size + img_offset + img_hdrs_size;
773a2d7c
PR
4003 if (p + img_size > fw->data + fw->size)
4004 return -1;
4005
4006 status = be_flash(adapter, p, flash_cmd, img_optype, img_size);
96c9b2e4
VV
4007 /* For old FW images ignore ILLEGAL_FIELD error or errors on
4008 * UFI_DIR region
4009 */
4c60005f
KA
4010 if (old_fw_img &&
4011 (base_status(status) == MCC_STATUS_ILLEGAL_FIELD ||
4012 (img_optype == OPTYPE_UFI_DIR &&
4013 base_status(status) == MCC_STATUS_FAILED))) {
96c9b2e4
VV
4014 continue;
4015 } else if (status) {
4016 dev_err(dev, "Flashing section type 0x%x failed\n",
4017 img_type);
4018 return -EFAULT;
773a2d7c
PR
4019 }
4020 }
4021 return 0;
3f0d4560
AK
4022}
4023
485bf569 4024static int lancer_fw_download(struct be_adapter *adapter,
748b539a 4025 const struct firmware *fw)
84517482 4026{
485bf569
SN
4027#define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024)
4028#define LANCER_FW_DOWNLOAD_LOCATION "/prg"
84517482 4029 struct be_dma_mem flash_cmd;
485bf569
SN
4030 const u8 *data_ptr = NULL;
4031 u8 *dest_image_ptr = NULL;
4032 size_t image_size = 0;
4033 u32 chunk_size = 0;
4034 u32 data_written = 0;
4035 u32 offset = 0;
4036 int status = 0;
4037 u8 add_status = 0;
f67ef7ba 4038 u8 change_status;
84517482 4039
485bf569 4040 if (!IS_ALIGNED(fw->size, sizeof(u32))) {
d9efd2af 4041 dev_err(&adapter->pdev->dev,
485bf569
SN
4042 "FW Image not properly aligned. "
4043 "Length must be 4 byte aligned.\n");
4044 status = -EINVAL;
4045 goto lancer_fw_exit;
d9efd2af
SB
4046 }
4047
485bf569
SN
4048 flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
4049 + LANCER_FW_DOWNLOAD_CHUNK;
4050 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
d0320f75 4051 &flash_cmd.dma, GFP_KERNEL);
485bf569
SN
4052 if (!flash_cmd.va) {
4053 status = -ENOMEM;
485bf569
SN
4054 goto lancer_fw_exit;
4055 }
84517482 4056
485bf569
SN
4057 dest_image_ptr = flash_cmd.va +
4058 sizeof(struct lancer_cmd_req_write_object);
4059 image_size = fw->size;
4060 data_ptr = fw->data;
4061
4062 while (image_size) {
4063 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
4064
4065 /* Copy the image chunk content. */
4066 memcpy(dest_image_ptr, data_ptr, chunk_size);
4067
4068 status = lancer_cmd_write_object(adapter, &flash_cmd,
f67ef7ba
PR
4069 chunk_size, offset,
4070 LANCER_FW_DOWNLOAD_LOCATION,
4071 &data_written, &change_status,
4072 &add_status);
485bf569
SN
4073 if (status)
4074 break;
4075
4076 offset += data_written;
4077 data_ptr += data_written;
4078 image_size -= data_written;
4079 }
4080
4081 if (!status) {
4082 /* Commit the FW written */
4083 status = lancer_cmd_write_object(adapter, &flash_cmd,
f67ef7ba
PR
4084 0, offset,
4085 LANCER_FW_DOWNLOAD_LOCATION,
4086 &data_written, &change_status,
4087 &add_status);
485bf569
SN
4088 }
4089
4090 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
748b539a 4091 flash_cmd.dma);
485bf569
SN
4092 if (status) {
4093 dev_err(&adapter->pdev->dev,
4094 "Firmware load error. "
4095 "Status code: 0x%x Additional Status: 0x%x\n",
4096 status, add_status);
4097 goto lancer_fw_exit;
4098 }
4099
f67ef7ba 4100 if (change_status == LANCER_FW_RESET_NEEDED) {
4bebb56a
SK
4101 dev_info(&adapter->pdev->dev,
4102 "Resetting adapter to activate new FW\n");
5c510811
SK
4103 status = lancer_physdev_ctrl(adapter,
4104 PHYSDEV_CONTROL_FW_RESET_MASK);
f67ef7ba
PR
4105 if (status) {
4106 dev_err(&adapter->pdev->dev,
4107 "Adapter busy for FW reset.\n"
4108 "New FW will not be active.\n");
4109 goto lancer_fw_exit;
4110 }
4111 } else if (change_status != LANCER_NO_RESET_NEEDED) {
748b539a
SP
4112 dev_err(&adapter->pdev->dev,
4113 "System reboot required for new FW to be active\n");
f67ef7ba
PR
4114 }
4115
485bf569
SN
4116 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
4117lancer_fw_exit:
4118 return status;
4119}
4120
ca34fe38
SP
4121#define UFI_TYPE2 2
4122#define UFI_TYPE3 3
0ad3157e 4123#define UFI_TYPE3R 10
ca34fe38
SP
4124#define UFI_TYPE4 4
4125static int be_get_ufi_type(struct be_adapter *adapter,
0ad3157e 4126 struct flash_file_hdr_g3 *fhdr)
773a2d7c
PR
4127{
4128 if (fhdr == NULL)
4129 goto be_get_ufi_exit;
4130
ca34fe38
SP
4131 if (skyhawk_chip(adapter) && fhdr->build[0] == '4')
4132 return UFI_TYPE4;
0ad3157e
VV
4133 else if (BE3_chip(adapter) && fhdr->build[0] == '3') {
4134 if (fhdr->asic_type_rev == 0x10)
4135 return UFI_TYPE3R;
4136 else
4137 return UFI_TYPE3;
4138 } else if (BE2_chip(adapter) && fhdr->build[0] == '2')
ca34fe38 4139 return UFI_TYPE2;
773a2d7c
PR
4140
4141be_get_ufi_exit:
4142 dev_err(&adapter->pdev->dev,
4143 "UFI and Interface are not compatible for flashing\n");
4144 return -1;
4145}
4146
485bf569
SN
4147static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
4148{
485bf569
SN
4149 struct flash_file_hdr_g3 *fhdr3;
4150 struct image_hdr *img_hdr_ptr = NULL;
4151 struct be_dma_mem flash_cmd;
4152 const u8 *p;
773a2d7c 4153 int status = 0, i = 0, num_imgs = 0, ufi_type = 0;
84517482 4154
be716446 4155 flash_cmd.size = sizeof(struct be_cmd_write_flashrom);
2b7bcebf
IV
4156 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
4157 &flash_cmd.dma, GFP_KERNEL);
84517482
AK
4158 if (!flash_cmd.va) {
4159 status = -ENOMEM;
485bf569 4160 goto be_fw_exit;
84517482
AK
4161 }
4162
773a2d7c 4163 p = fw->data;
0ad3157e 4164 fhdr3 = (struct flash_file_hdr_g3 *)p;
773a2d7c 4165
0ad3157e 4166 ufi_type = be_get_ufi_type(adapter, fhdr3);
773a2d7c 4167
773a2d7c
PR
4168 num_imgs = le32_to_cpu(fhdr3->num_imgs);
4169 for (i = 0; i < num_imgs; i++) {
4170 img_hdr_ptr = (struct image_hdr *)(fw->data +
4171 (sizeof(struct flash_file_hdr_g3) +
4172 i * sizeof(struct image_hdr)));
4173 if (le32_to_cpu(img_hdr_ptr->imageid) == 1) {
0ad3157e
VV
4174 switch (ufi_type) {
4175 case UFI_TYPE4:
773a2d7c 4176 status = be_flash_skyhawk(adapter, fw,
748b539a 4177 &flash_cmd, num_imgs);
0ad3157e
VV
4178 break;
4179 case UFI_TYPE3R:
ca34fe38
SP
4180 status = be_flash_BEx(adapter, fw, &flash_cmd,
4181 num_imgs);
0ad3157e
VV
4182 break;
4183 case UFI_TYPE3:
4184 /* Do not flash this ufi on BE3-R cards */
4185 if (adapter->asic_rev < 0x10)
4186 status = be_flash_BEx(adapter, fw,
4187 &flash_cmd,
4188 num_imgs);
4189 else {
4190 status = -1;
4191 dev_err(&adapter->pdev->dev,
4192 "Can't load BE3 UFI on BE3R\n");
4193 }
4194 }
3f0d4560 4195 }
773a2d7c
PR
4196 }
4197
ca34fe38
SP
4198 if (ufi_type == UFI_TYPE2)
4199 status = be_flash_BEx(adapter, fw, &flash_cmd, 0);
773a2d7c 4200 else if (ufi_type == -1)
3f0d4560 4201 status = -1;
84517482 4202
2b7bcebf
IV
4203 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
4204 flash_cmd.dma);
84517482
AK
4205 if (status) {
4206 dev_err(&adapter->pdev->dev, "Firmware load error\n");
485bf569 4207 goto be_fw_exit;
84517482
AK
4208 }
4209
af901ca1 4210 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
84517482 4211
485bf569
SN
4212be_fw_exit:
4213 return status;
4214}
4215
4216int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
4217{
4218 const struct firmware *fw;
4219 int status;
4220
4221 if (!netif_running(adapter->netdev)) {
4222 dev_err(&adapter->pdev->dev,
4223 "Firmware load not allowed (interface is down)\n");
4224 return -1;
4225 }
4226
4227 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
4228 if (status)
4229 goto fw_exit;
4230
4231 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
4232
4233 if (lancer_chip(adapter))
4234 status = lancer_fw_download(adapter, fw);
4235 else
4236 status = be_fw_download(adapter, fw);
4237
eeb65ced
SK
4238 if (!status)
4239 be_cmd_get_fw_ver(adapter, adapter->fw_ver,
4240 adapter->fw_on_flash);
4241
84517482
AK
4242fw_exit:
4243 release_firmware(fw);
4244 return status;
4245}
4246
748b539a 4247static int be_ndo_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh)
a77dcb8c
AK
4248{
4249 struct be_adapter *adapter = netdev_priv(dev);
4250 struct nlattr *attr, *br_spec;
4251 int rem;
4252 int status = 0;
4253 u16 mode = 0;
4254
4255 if (!sriov_enabled(adapter))
4256 return -EOPNOTSUPP;
4257
4258 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
4259
4260 nla_for_each_nested(attr, br_spec, rem) {
4261 if (nla_type(attr) != IFLA_BRIDGE_MODE)
4262 continue;
4263
4264 mode = nla_get_u16(attr);
4265 if (mode != BRIDGE_MODE_VEPA && mode != BRIDGE_MODE_VEB)
4266 return -EINVAL;
4267
4268 status = be_cmd_set_hsw_config(adapter, 0, 0,
4269 adapter->if_handle,
4270 mode == BRIDGE_MODE_VEPA ?
4271 PORT_FWD_TYPE_VEPA :
4272 PORT_FWD_TYPE_VEB);
4273 if (status)
4274 goto err;
4275
4276 dev_info(&adapter->pdev->dev, "enabled switch mode: %s\n",
4277 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
4278
4279 return status;
4280 }
4281err:
4282 dev_err(&adapter->pdev->dev, "Failed to set switch mode %s\n",
4283 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
4284
4285 return status;
4286}
4287
4288static int be_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
748b539a 4289 struct net_device *dev, u32 filter_mask)
a77dcb8c
AK
4290{
4291 struct be_adapter *adapter = netdev_priv(dev);
4292 int status = 0;
4293 u8 hsw_mode;
4294
4295 if (!sriov_enabled(adapter))
4296 return 0;
4297
4298 /* BE and Lancer chips support VEB mode only */
4299 if (BEx_chip(adapter) || lancer_chip(adapter)) {
4300 hsw_mode = PORT_FWD_TYPE_VEB;
4301 } else {
4302 status = be_cmd_get_hsw_config(adapter, NULL, 0,
4303 adapter->if_handle, &hsw_mode);
4304 if (status)
4305 return 0;
4306 }
4307
4308 return ndo_dflt_bridge_getlink(skb, pid, seq, dev,
4309 hsw_mode == PORT_FWD_TYPE_VEPA ?
4310 BRIDGE_MODE_VEPA : BRIDGE_MODE_VEB);
4311}
4312
c5abe7c0 4313#ifdef CONFIG_BE2NET_VXLAN
c9c47142
SP
4314static void be_add_vxlan_port(struct net_device *netdev, sa_family_t sa_family,
4315 __be16 port)
4316{
4317 struct be_adapter *adapter = netdev_priv(netdev);
4318 struct device *dev = &adapter->pdev->dev;
4319 int status;
4320
4321 if (lancer_chip(adapter) || BEx_chip(adapter))
4322 return;
4323
4324 if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS) {
4325 dev_warn(dev, "Cannot add UDP port %d for VxLAN offloads\n",
4326 be16_to_cpu(port));
4327 dev_info(dev,
4328 "Only one UDP port supported for VxLAN offloads\n");
4329 return;
4330 }
4331
4332 status = be_cmd_manage_iface(adapter, adapter->if_handle,
4333 OP_CONVERT_NORMAL_TO_TUNNEL);
4334 if (status) {
4335 dev_warn(dev, "Failed to convert normal interface to tunnel\n");
4336 goto err;
4337 }
4338
4339 status = be_cmd_set_vxlan_port(adapter, port);
4340 if (status) {
4341 dev_warn(dev, "Failed to add VxLAN port\n");
4342 goto err;
4343 }
4344 adapter->flags |= BE_FLAGS_VXLAN_OFFLOADS;
4345 adapter->vxlan_port = port;
4346
4347 dev_info(dev, "Enabled VxLAN offloads for UDP port %d\n",
4348 be16_to_cpu(port));
4349 return;
4350err:
4351 be_disable_vxlan_offloads(adapter);
4352 return;
4353}
4354
4355static void be_del_vxlan_port(struct net_device *netdev, sa_family_t sa_family,
4356 __be16 port)
4357{
4358 struct be_adapter *adapter = netdev_priv(netdev);
4359
4360 if (lancer_chip(adapter) || BEx_chip(adapter))
4361 return;
4362
4363 if (adapter->vxlan_port != port)
4364 return;
4365
4366 be_disable_vxlan_offloads(adapter);
4367
4368 dev_info(&adapter->pdev->dev,
4369 "Disabled VxLAN offloads for UDP port %d\n",
4370 be16_to_cpu(port));
4371}
c5abe7c0 4372#endif
c9c47142 4373
e5686ad8 4374static const struct net_device_ops be_netdev_ops = {
6b7c5b94
SP
4375 .ndo_open = be_open,
4376 .ndo_stop = be_close,
4377 .ndo_start_xmit = be_xmit,
a54769f5 4378 .ndo_set_rx_mode = be_set_rx_mode,
6b7c5b94
SP
4379 .ndo_set_mac_address = be_mac_addr_set,
4380 .ndo_change_mtu = be_change_mtu,
ab1594e9 4381 .ndo_get_stats64 = be_get_stats64,
6b7c5b94 4382 .ndo_validate_addr = eth_validate_addr,
6b7c5b94
SP
4383 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
4384 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
64600ea5 4385 .ndo_set_vf_mac = be_set_vf_mac,
1da87b7f 4386 .ndo_set_vf_vlan = be_set_vf_vlan,
ed616689 4387 .ndo_set_vf_rate = be_set_vf_tx_rate,
66268739 4388 .ndo_get_vf_config = be_get_vf_config,
bdce2ad7 4389 .ndo_set_vf_link_state = be_set_vf_link_state,
66268739
IV
4390#ifdef CONFIG_NET_POLL_CONTROLLER
4391 .ndo_poll_controller = be_netpoll,
4392#endif
a77dcb8c
AK
4393 .ndo_bridge_setlink = be_ndo_bridge_setlink,
4394 .ndo_bridge_getlink = be_ndo_bridge_getlink,
6384a4d0 4395#ifdef CONFIG_NET_RX_BUSY_POLL
c9c47142 4396 .ndo_busy_poll = be_busy_poll,
6384a4d0 4397#endif
c5abe7c0 4398#ifdef CONFIG_BE2NET_VXLAN
c9c47142
SP
4399 .ndo_add_vxlan_port = be_add_vxlan_port,
4400 .ndo_del_vxlan_port = be_del_vxlan_port,
c5abe7c0 4401#endif
6b7c5b94
SP
4402};
4403
4404static void be_netdev_init(struct net_device *netdev)
4405{
4406 struct be_adapter *adapter = netdev_priv(netdev);
4407
c9c47142
SP
4408 if (skyhawk_chip(adapter)) {
4409 netdev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
4410 NETIF_F_TSO | NETIF_F_TSO6 |
4411 NETIF_F_GSO_UDP_TUNNEL;
4412 netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL;
4413 }
6332c8d3 4414 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
8b8ddc68 4415 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
f646968f 4416 NETIF_F_HW_VLAN_CTAG_TX;
8b8ddc68
MM
4417 if (be_multi_rxq(adapter))
4418 netdev->hw_features |= NETIF_F_RXHASH;
6332c8d3
MM
4419
4420 netdev->features |= netdev->hw_features |
f646968f 4421 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER;
4b972914 4422
eb8a50d9 4423 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
79032644 4424 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
51c59870 4425
fbc13f01
AK
4426 netdev->priv_flags |= IFF_UNICAST_FLT;
4427
6b7c5b94
SP
4428 netdev->flags |= IFF_MULTICAST;
4429
b7e5887e 4430 netif_set_gso_max_size(netdev, 65535 - ETH_HLEN);
c190e3c8 4431
10ef9ab4 4432 netdev->netdev_ops = &be_netdev_ops;
6b7c5b94 4433
7ad24ea4 4434 netdev->ethtool_ops = &be_ethtool_ops;
6b7c5b94
SP
4435}
4436
4437static void be_unmap_pci_bars(struct be_adapter *adapter)
4438{
c5b3ad4c
SP
4439 if (adapter->csr)
4440 pci_iounmap(adapter->pdev, adapter->csr);
8788fdc2 4441 if (adapter->db)
ce66f781 4442 pci_iounmap(adapter->pdev, adapter->db);
045508a8
PP
4443}
4444
ce66f781
SP
4445static int db_bar(struct be_adapter *adapter)
4446{
4447 if (lancer_chip(adapter) || !be_physfn(adapter))
4448 return 0;
4449 else
4450 return 4;
4451}
4452
4453static int be_roce_map_pci_bars(struct be_adapter *adapter)
045508a8 4454{
dbf0f2a7 4455 if (skyhawk_chip(adapter)) {
ce66f781
SP
4456 adapter->roce_db.size = 4096;
4457 adapter->roce_db.io_addr = pci_resource_start(adapter->pdev,
4458 db_bar(adapter));
4459 adapter->roce_db.total_size = pci_resource_len(adapter->pdev,
4460 db_bar(adapter));
4461 }
045508a8 4462 return 0;
6b7c5b94
SP
4463}
4464
4465static int be_map_pci_bars(struct be_adapter *adapter)
4466{
4467 u8 __iomem *addr;
fe6d2a38 4468
c5b3ad4c
SP
4469 if (BEx_chip(adapter) && be_physfn(adapter)) {
4470 adapter->csr = pci_iomap(adapter->pdev, 2, 0);
4471 if (adapter->csr == NULL)
4472 return -ENOMEM;
4473 }
4474
ce66f781 4475 addr = pci_iomap(adapter->pdev, db_bar(adapter), 0);
6b7c5b94
SP
4476 if (addr == NULL)
4477 goto pci_map_err;
ba343c77 4478 adapter->db = addr;
ce66f781
SP
4479
4480 be_roce_map_pci_bars(adapter);
6b7c5b94 4481 return 0;
ce66f781 4482
6b7c5b94
SP
4483pci_map_err:
4484 be_unmap_pci_bars(adapter);
4485 return -ENOMEM;
4486}
4487
6b7c5b94
SP
4488static void be_ctrl_cleanup(struct be_adapter *adapter)
4489{
8788fdc2 4490 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
6b7c5b94
SP
4491
4492 be_unmap_pci_bars(adapter);
4493
4494 if (mem->va)
2b7bcebf
IV
4495 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
4496 mem->dma);
e7b909a6 4497
5b8821b7 4498 mem = &adapter->rx_filter;
e7b909a6 4499 if (mem->va)
2b7bcebf
IV
4500 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
4501 mem->dma);
6b7c5b94
SP
4502}
4503
6b7c5b94
SP
4504static int be_ctrl_init(struct be_adapter *adapter)
4505{
8788fdc2
SP
4506 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
4507 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
5b8821b7 4508 struct be_dma_mem *rx_filter = &adapter->rx_filter;
ce66f781 4509 u32 sli_intf;
6b7c5b94 4510 int status;
6b7c5b94 4511
ce66f781
SP
4512 pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
4513 adapter->sli_family = (sli_intf & SLI_INTF_FAMILY_MASK) >>
4514 SLI_INTF_FAMILY_SHIFT;
4515 adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
4516
6b7c5b94
SP
4517 status = be_map_pci_bars(adapter);
4518 if (status)
e7b909a6 4519 goto done;
6b7c5b94
SP
4520
4521 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
2b7bcebf
IV
4522 mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
4523 mbox_mem_alloc->size,
4524 &mbox_mem_alloc->dma,
4525 GFP_KERNEL);
6b7c5b94 4526 if (!mbox_mem_alloc->va) {
e7b909a6
SP
4527 status = -ENOMEM;
4528 goto unmap_pci_bars;
6b7c5b94
SP
4529 }
4530 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
4531 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
4532 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
4533 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
e7b909a6 4534
5b8821b7 4535 rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
ede23fa8
JP
4536 rx_filter->va = dma_zalloc_coherent(&adapter->pdev->dev,
4537 rx_filter->size, &rx_filter->dma,
4538 GFP_KERNEL);
5b8821b7 4539 if (rx_filter->va == NULL) {
e7b909a6
SP
4540 status = -ENOMEM;
4541 goto free_mbox;
4542 }
1f9061d2 4543
2984961c 4544 mutex_init(&adapter->mbox_lock);
8788fdc2
SP
4545 spin_lock_init(&adapter->mcc_lock);
4546 spin_lock_init(&adapter->mcc_cq_lock);
a8f447bd 4547
5eeff635 4548 init_completion(&adapter->et_cmd_compl);
cf588477 4549 pci_save_state(adapter->pdev);
6b7c5b94 4550 return 0;
e7b909a6
SP
4551
4552free_mbox:
2b7bcebf
IV
4553 dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
4554 mbox_mem_alloc->va, mbox_mem_alloc->dma);
e7b909a6
SP
4555
4556unmap_pci_bars:
4557 be_unmap_pci_bars(adapter);
4558
4559done:
4560 return status;
6b7c5b94
SP
4561}
4562
4563static void be_stats_cleanup(struct be_adapter *adapter)
4564{
3abcdeda 4565 struct be_dma_mem *cmd = &adapter->stats_cmd;
6b7c5b94
SP
4566
4567 if (cmd->va)
2b7bcebf
IV
4568 dma_free_coherent(&adapter->pdev->dev, cmd->size,
4569 cmd->va, cmd->dma);
6b7c5b94
SP
4570}
4571
4572static int be_stats_init(struct be_adapter *adapter)
4573{
3abcdeda 4574 struct be_dma_mem *cmd = &adapter->stats_cmd;
6b7c5b94 4575
ca34fe38
SP
4576 if (lancer_chip(adapter))
4577 cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
4578 else if (BE2_chip(adapter))
89a88ab8 4579 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
61000861 4580 else if (BE3_chip(adapter))
ca34fe38 4581 cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
61000861
AK
4582 else
4583 /* ALL non-BE ASICs */
4584 cmd->size = sizeof(struct be_cmd_req_get_stats_v2);
ca34fe38 4585
ede23fa8
JP
4586 cmd->va = dma_zalloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
4587 GFP_KERNEL);
6b7c5b94
SP
4588 if (cmd->va == NULL)
4589 return -1;
4590 return 0;
4591}
4592
3bc6b06c 4593static void be_remove(struct pci_dev *pdev)
6b7c5b94
SP
4594{
4595 struct be_adapter *adapter = pci_get_drvdata(pdev);
8d56ff11 4596
6b7c5b94
SP
4597 if (!adapter)
4598 return;
4599
045508a8 4600 be_roce_dev_remove(adapter);
8cef7a78 4601 be_intr_set(adapter, false);
045508a8 4602
f67ef7ba
PR
4603 cancel_delayed_work_sync(&adapter->func_recovery_work);
4604
6b7c5b94
SP
4605 unregister_netdev(adapter->netdev);
4606
5fb379ee
SP
4607 be_clear(adapter);
4608
bf99e50d
PR
4609 /* tell fw we're done with firing cmds */
4610 be_cmd_fw_clean(adapter);
4611
6b7c5b94
SP
4612 be_stats_cleanup(adapter);
4613
4614 be_ctrl_cleanup(adapter);
4615
d6b6d987
SP
4616 pci_disable_pcie_error_reporting(pdev);
4617
6b7c5b94
SP
4618 pci_release_regions(pdev);
4619 pci_disable_device(pdev);
4620
4621 free_netdev(adapter->netdev);
4622}
4623
39f1d94d 4624static int be_get_initial_config(struct be_adapter *adapter)
6b7c5b94 4625{
baaa08d1 4626 int status, level;
6b7c5b94 4627
9e1453c5
AK
4628 status = be_cmd_get_cntl_attributes(adapter);
4629 if (status)
4630 return status;
4631
7aeb2156
PR
4632 /* Must be a power of 2 or else MODULO will BUG_ON */
4633 adapter->be_get_temp_freq = 64;
4634
baaa08d1
VV
4635 if (BEx_chip(adapter)) {
4636 level = be_cmd_get_fw_log_level(adapter);
4637 adapter->msg_enable =
4638 level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0;
4639 }
941a77d5 4640
92bf14ab 4641 adapter->cfg_num_qs = netif_get_num_default_rss_queues();
2243e2e9 4642 return 0;
6b7c5b94
SP
4643}
4644
f67ef7ba 4645static int lancer_recover_func(struct be_adapter *adapter)
d8110f62 4646{
01e5b2c4 4647 struct device *dev = &adapter->pdev->dev;
d8110f62 4648 int status;
d8110f62 4649
f67ef7ba
PR
4650 status = lancer_test_and_set_rdy_state(adapter);
4651 if (status)
4652 goto err;
d8110f62 4653
f67ef7ba
PR
4654 if (netif_running(adapter->netdev))
4655 be_close(adapter->netdev);
d8110f62 4656
f67ef7ba
PR
4657 be_clear(adapter);
4658
01e5b2c4 4659 be_clear_all_error(adapter);
f67ef7ba
PR
4660
4661 status = be_setup(adapter);
4662 if (status)
4663 goto err;
d8110f62 4664
f67ef7ba
PR
4665 if (netif_running(adapter->netdev)) {
4666 status = be_open(adapter->netdev);
d8110f62
PR
4667 if (status)
4668 goto err;
f67ef7ba 4669 }
d8110f62 4670
4bebb56a 4671 dev_err(dev, "Adapter recovery successful\n");
f67ef7ba
PR
4672 return 0;
4673err:
01e5b2c4
SK
4674 if (status == -EAGAIN)
4675 dev_err(dev, "Waiting for resource provisioning\n");
4676 else
4bebb56a 4677 dev_err(dev, "Adapter recovery failed\n");
d8110f62 4678
f67ef7ba
PR
4679 return status;
4680}
4681
4682static void be_func_recovery_task(struct work_struct *work)
4683{
4684 struct be_adapter *adapter =
4685 container_of(work, struct be_adapter, func_recovery_work.work);
01e5b2c4 4686 int status = 0;
d8110f62 4687
f67ef7ba 4688 be_detect_error(adapter);
d8110f62 4689
f67ef7ba 4690 if (adapter->hw_error && lancer_chip(adapter)) {
d8110f62 4691
f67ef7ba
PR
4692 rtnl_lock();
4693 netif_device_detach(adapter->netdev);
4694 rtnl_unlock();
d8110f62 4695
f67ef7ba 4696 status = lancer_recover_func(adapter);
f67ef7ba
PR
4697 if (!status)
4698 netif_device_attach(adapter->netdev);
d8110f62 4699 }
f67ef7ba 4700
01e5b2c4
SK
4701 /* In Lancer, for all errors other than provisioning error (-EAGAIN),
4702 * no need to attempt further recovery.
4703 */
4704 if (!status || status == -EAGAIN)
4705 schedule_delayed_work(&adapter->func_recovery_work,
4706 msecs_to_jiffies(1000));
d8110f62
PR
4707}
4708
4709static void be_worker(struct work_struct *work)
4710{
4711 struct be_adapter *adapter =
4712 container_of(work, struct be_adapter, work.work);
4713 struct be_rx_obj *rxo;
4714 int i;
4715
d8110f62
PR
4716 /* when interrupts are not yet enabled, just reap any pending
4717 * mcc completions */
4718 if (!netif_running(adapter->netdev)) {
072a9c48 4719 local_bh_disable();
10ef9ab4 4720 be_process_mcc(adapter);
072a9c48 4721 local_bh_enable();
d8110f62
PR
4722 goto reschedule;
4723 }
4724
4725 if (!adapter->stats_cmd_sent) {
4726 if (lancer_chip(adapter))
4727 lancer_cmd_get_pport_stats(adapter,
4728 &adapter->stats_cmd);
4729 else
4730 be_cmd_get_stats(adapter, &adapter->stats_cmd);
4731 }
4732
d696b5e2
VV
4733 if (be_physfn(adapter) &&
4734 MODULO(adapter->work_counter, adapter->be_get_temp_freq) == 0)
7aeb2156
PR
4735 be_cmd_get_die_temperature(adapter);
4736
d8110f62 4737 for_all_rx_queues(adapter, rxo, i) {
6384a4d0
SP
4738 /* Replenish RX-queues starved due to memory
4739 * allocation failures.
4740 */
4741 if (rxo->rx_post_starved)
d8110f62 4742 be_post_rx_frags(rxo, GFP_KERNEL);
d8110f62
PR
4743 }
4744
2632bafd 4745 be_eqd_update(adapter);
10ef9ab4 4746
d8110f62
PR
4747reschedule:
4748 adapter->work_counter++;
4749 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
4750}
4751
257a3feb 4752/* If any VFs are already enabled don't FLR the PF */
39f1d94d
SP
4753static bool be_reset_required(struct be_adapter *adapter)
4754{
257a3feb 4755 return pci_num_vf(adapter->pdev) ? false : true;
39f1d94d
SP
4756}
4757
d379142b
SP
4758static char *mc_name(struct be_adapter *adapter)
4759{
f93f160b
VV
4760 char *str = ""; /* default */
4761
4762 switch (adapter->mc_type) {
4763 case UMC:
4764 str = "UMC";
4765 break;
4766 case FLEX10:
4767 str = "FLEX10";
4768 break;
4769 case vNIC1:
4770 str = "vNIC-1";
4771 break;
4772 case nPAR:
4773 str = "nPAR";
4774 break;
4775 case UFP:
4776 str = "UFP";
4777 break;
4778 case vNIC2:
4779 str = "vNIC-2";
4780 break;
4781 default:
4782 str = "";
4783 }
4784
4785 return str;
d379142b
SP
4786}
4787
4788static inline char *func_name(struct be_adapter *adapter)
4789{
4790 return be_physfn(adapter) ? "PF" : "VF";
4791}
4792
1dd06ae8 4793static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id)
6b7c5b94
SP
4794{
4795 int status = 0;
4796 struct be_adapter *adapter;
4797 struct net_device *netdev;
b4e32a71 4798 char port_name;
6b7c5b94
SP
4799
4800 status = pci_enable_device(pdev);
4801 if (status)
4802 goto do_none;
4803
4804 status = pci_request_regions(pdev, DRV_NAME);
4805 if (status)
4806 goto disable_dev;
4807 pci_set_master(pdev);
4808
7f640062 4809 netdev = alloc_etherdev_mqs(sizeof(*adapter), MAX_TX_QS, MAX_RX_QS);
6b7c5b94
SP
4810 if (netdev == NULL) {
4811 status = -ENOMEM;
4812 goto rel_reg;
4813 }
4814 adapter = netdev_priv(netdev);
4815 adapter->pdev = pdev;
4816 pci_set_drvdata(pdev, adapter);
4817 adapter->netdev = netdev;
2243e2e9 4818 SET_NETDEV_DEV(netdev, &pdev->dev);
6b7c5b94 4819
4c15c243 4820 status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
6b7c5b94
SP
4821 if (!status) {
4822 netdev->features |= NETIF_F_HIGHDMA;
4823 } else {
4c15c243 4824 status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
6b7c5b94
SP
4825 if (status) {
4826 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
4827 goto free_netdev;
4828 }
4829 }
4830
ea58c180
AK
4831 if (be_physfn(adapter)) {
4832 status = pci_enable_pcie_error_reporting(pdev);
4833 if (!status)
4834 dev_info(&pdev->dev, "PCIe error reporting enabled\n");
4835 }
d6b6d987 4836
6b7c5b94
SP
4837 status = be_ctrl_init(adapter);
4838 if (status)
39f1d94d 4839 goto free_netdev;
6b7c5b94 4840
2243e2e9 4841 /* sync up with fw's ready state */
ba343c77 4842 if (be_physfn(adapter)) {
bf99e50d 4843 status = be_fw_wait_ready(adapter);
ba343c77
SB
4844 if (status)
4845 goto ctrl_clean;
ba343c77 4846 }
6b7c5b94 4847
39f1d94d
SP
4848 if (be_reset_required(adapter)) {
4849 status = be_cmd_reset_function(adapter);
4850 if (status)
4851 goto ctrl_clean;
556ae191 4852
2d177be8
KA
4853 /* Wait for interrupts to quiesce after an FLR */
4854 msleep(100);
4855 }
8cef7a78
SK
4856
4857 /* Allow interrupts for other ULPs running on NIC function */
4858 be_intr_set(adapter, true);
10ef9ab4 4859
2d177be8
KA
4860 /* tell fw we're ready to fire cmds */
4861 status = be_cmd_fw_init(adapter);
4862 if (status)
4863 goto ctrl_clean;
4864
2243e2e9
SP
4865 status = be_stats_init(adapter);
4866 if (status)
4867 goto ctrl_clean;
4868
39f1d94d 4869 status = be_get_initial_config(adapter);
6b7c5b94
SP
4870 if (status)
4871 goto stats_clean;
6b7c5b94
SP
4872
4873 INIT_DELAYED_WORK(&adapter->work, be_worker);
f67ef7ba 4874 INIT_DELAYED_WORK(&adapter->func_recovery_work, be_func_recovery_task);
a54769f5 4875 adapter->rx_fc = adapter->tx_fc = true;
6b7c5b94 4876
5fb379ee
SP
4877 status = be_setup(adapter);
4878 if (status)
55f5c3c5 4879 goto stats_clean;
2243e2e9 4880
3abcdeda 4881 be_netdev_init(netdev);
6b7c5b94
SP
4882 status = register_netdev(netdev);
4883 if (status != 0)
5fb379ee 4884 goto unsetup;
6b7c5b94 4885
045508a8
PP
4886 be_roce_dev_add(adapter);
4887
f67ef7ba
PR
4888 schedule_delayed_work(&adapter->func_recovery_work,
4889 msecs_to_jiffies(1000));
b4e32a71
PR
4890
4891 be_cmd_query_port_name(adapter, &port_name);
4892
d379142b
SP
4893 dev_info(&pdev->dev, "%s: %s %s port %c\n", nic_name(pdev),
4894 func_name(adapter), mc_name(adapter), port_name);
34b1ef04 4895
6b7c5b94
SP
4896 return 0;
4897
5fb379ee
SP
4898unsetup:
4899 be_clear(adapter);
6b7c5b94
SP
4900stats_clean:
4901 be_stats_cleanup(adapter);
4902ctrl_clean:
4903 be_ctrl_cleanup(adapter);
f9449ab7 4904free_netdev:
fe6d2a38 4905 free_netdev(netdev);
6b7c5b94
SP
4906rel_reg:
4907 pci_release_regions(pdev);
4908disable_dev:
4909 pci_disable_device(pdev);
4910do_none:
c4ca2374 4911 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
6b7c5b94
SP
4912 return status;
4913}
4914
4915static int be_suspend(struct pci_dev *pdev, pm_message_t state)
4916{
4917 struct be_adapter *adapter = pci_get_drvdata(pdev);
4918 struct net_device *netdev = adapter->netdev;
4919
76a9e08e 4920 if (adapter->wol_en)
71d8d1b5
AK
4921 be_setup_wol(adapter, true);
4922
d4360d6f 4923 be_intr_set(adapter, false);
f67ef7ba
PR
4924 cancel_delayed_work_sync(&adapter->func_recovery_work);
4925
6b7c5b94
SP
4926 netif_device_detach(netdev);
4927 if (netif_running(netdev)) {
4928 rtnl_lock();
4929 be_close(netdev);
4930 rtnl_unlock();
4931 }
9b0365f1 4932 be_clear(adapter);
6b7c5b94
SP
4933
4934 pci_save_state(pdev);
4935 pci_disable_device(pdev);
4936 pci_set_power_state(pdev, pci_choose_state(pdev, state));
4937 return 0;
4938}
4939
4940static int be_resume(struct pci_dev *pdev)
4941{
4942 int status = 0;
4943 struct be_adapter *adapter = pci_get_drvdata(pdev);
4944 struct net_device *netdev = adapter->netdev;
4945
4946 netif_device_detach(netdev);
4947
4948 status = pci_enable_device(pdev);
4949 if (status)
4950 return status;
4951
1ca01512 4952 pci_set_power_state(pdev, PCI_D0);
6b7c5b94
SP
4953 pci_restore_state(pdev);
4954
dd5746bf
SB
4955 status = be_fw_wait_ready(adapter);
4956 if (status)
4957 return status;
4958
d4360d6f 4959 be_intr_set(adapter, true);
2243e2e9
SP
4960 /* tell fw we're ready to fire cmds */
4961 status = be_cmd_fw_init(adapter);
4962 if (status)
4963 return status;
4964
9b0365f1 4965 be_setup(adapter);
6b7c5b94
SP
4966 if (netif_running(netdev)) {
4967 rtnl_lock();
4968 be_open(netdev);
4969 rtnl_unlock();
4970 }
f67ef7ba
PR
4971
4972 schedule_delayed_work(&adapter->func_recovery_work,
4973 msecs_to_jiffies(1000));
6b7c5b94 4974 netif_device_attach(netdev);
71d8d1b5 4975
76a9e08e 4976 if (adapter->wol_en)
71d8d1b5 4977 be_setup_wol(adapter, false);
a4ca055f 4978
6b7c5b94
SP
4979 return 0;
4980}
4981
82456b03
SP
4982/*
4983 * An FLR will stop BE from DMAing any data.
4984 */
4985static void be_shutdown(struct pci_dev *pdev)
4986{
4987 struct be_adapter *adapter = pci_get_drvdata(pdev);
82456b03 4988
2d5d4154
AK
4989 if (!adapter)
4990 return;
82456b03 4991
0f4a6828 4992 cancel_delayed_work_sync(&adapter->work);
f67ef7ba 4993 cancel_delayed_work_sync(&adapter->func_recovery_work);
a4ca055f 4994
2d5d4154 4995 netif_device_detach(adapter->netdev);
82456b03 4996
57841869
AK
4997 be_cmd_reset_function(adapter);
4998
82456b03 4999 pci_disable_device(pdev);
82456b03
SP
5000}
5001
cf588477 5002static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
748b539a 5003 pci_channel_state_t state)
cf588477
SP
5004{
5005 struct be_adapter *adapter = pci_get_drvdata(pdev);
5006 struct net_device *netdev = adapter->netdev;
5007
5008 dev_err(&adapter->pdev->dev, "EEH error detected\n");
5009
01e5b2c4
SK
5010 if (!adapter->eeh_error) {
5011 adapter->eeh_error = true;
cf588477 5012
01e5b2c4 5013 cancel_delayed_work_sync(&adapter->func_recovery_work);
cf588477 5014
cf588477 5015 rtnl_lock();
01e5b2c4
SK
5016 netif_device_detach(netdev);
5017 if (netif_running(netdev))
5018 be_close(netdev);
cf588477 5019 rtnl_unlock();
01e5b2c4
SK
5020
5021 be_clear(adapter);
cf588477 5022 }
cf588477
SP
5023
5024 if (state == pci_channel_io_perm_failure)
5025 return PCI_ERS_RESULT_DISCONNECT;
5026
5027 pci_disable_device(pdev);
5028
eeb7fc7b
SK
5029 /* The error could cause the FW to trigger a flash debug dump.
5030 * Resetting the card while flash dump is in progress
c8a54163
PR
5031 * can cause it not to recover; wait for it to finish.
5032 * Wait only for first function as it is needed only once per
5033 * adapter.
eeb7fc7b 5034 */
c8a54163
PR
5035 if (pdev->devfn == 0)
5036 ssleep(30);
5037
cf588477
SP
5038 return PCI_ERS_RESULT_NEED_RESET;
5039}
5040
5041static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
5042{
5043 struct be_adapter *adapter = pci_get_drvdata(pdev);
5044 int status;
5045
5046 dev_info(&adapter->pdev->dev, "EEH reset\n");
cf588477
SP
5047
5048 status = pci_enable_device(pdev);
5049 if (status)
5050 return PCI_ERS_RESULT_DISCONNECT;
5051
5052 pci_set_master(pdev);
1ca01512 5053 pci_set_power_state(pdev, PCI_D0);
cf588477
SP
5054 pci_restore_state(pdev);
5055
5056 /* Check if card is ok and fw is ready */
c5b3ad4c
SP
5057 dev_info(&adapter->pdev->dev,
5058 "Waiting for FW to be ready after EEH reset\n");
bf99e50d 5059 status = be_fw_wait_ready(adapter);
cf588477
SP
5060 if (status)
5061 return PCI_ERS_RESULT_DISCONNECT;
5062
d6b6d987 5063 pci_cleanup_aer_uncorrect_error_status(pdev);
01e5b2c4 5064 be_clear_all_error(adapter);
cf588477
SP
5065 return PCI_ERS_RESULT_RECOVERED;
5066}
5067
5068static void be_eeh_resume(struct pci_dev *pdev)
5069{
5070 int status = 0;
5071 struct be_adapter *adapter = pci_get_drvdata(pdev);
5072 struct net_device *netdev = adapter->netdev;
5073
5074 dev_info(&adapter->pdev->dev, "EEH resume\n");
5075
5076 pci_save_state(pdev);
5077
2d177be8 5078 status = be_cmd_reset_function(adapter);
cf588477
SP
5079 if (status)
5080 goto err;
5081
03a58baa
KA
5082 /* On some BE3 FW versions, after a HW reset,
5083 * interrupts will remain disabled for each function.
5084 * So, explicitly enable interrupts
5085 */
5086 be_intr_set(adapter, true);
5087
2d177be8
KA
5088 /* tell fw we're ready to fire cmds */
5089 status = be_cmd_fw_init(adapter);
bf99e50d
PR
5090 if (status)
5091 goto err;
5092
cf588477
SP
5093 status = be_setup(adapter);
5094 if (status)
5095 goto err;
5096
5097 if (netif_running(netdev)) {
5098 status = be_open(netdev);
5099 if (status)
5100 goto err;
5101 }
f67ef7ba
PR
5102
5103 schedule_delayed_work(&adapter->func_recovery_work,
5104 msecs_to_jiffies(1000));
cf588477
SP
5105 netif_device_attach(netdev);
5106 return;
5107err:
5108 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
cf588477
SP
5109}
5110
3646f0e5 5111static const struct pci_error_handlers be_eeh_handlers = {
cf588477
SP
5112 .error_detected = be_eeh_err_detected,
5113 .slot_reset = be_eeh_reset,
5114 .resume = be_eeh_resume,
5115};
5116
6b7c5b94
SP
5117static struct pci_driver be_driver = {
5118 .name = DRV_NAME,
5119 .id_table = be_dev_ids,
5120 .probe = be_probe,
5121 .remove = be_remove,
5122 .suspend = be_suspend,
cf588477 5123 .resume = be_resume,
82456b03 5124 .shutdown = be_shutdown,
cf588477 5125 .err_handler = &be_eeh_handlers
6b7c5b94
SP
5126};
5127
5128static int __init be_init_module(void)
5129{
8e95a202
JP
5130 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
5131 rx_frag_size != 2048) {
6b7c5b94
SP
5132 printk(KERN_WARNING DRV_NAME
5133 " : Module param rx_frag_size must be 2048/4096/8192."
5134 " Using 2048\n");
5135 rx_frag_size = 2048;
5136 }
6b7c5b94
SP
5137
5138 return pci_register_driver(&be_driver);
5139}
5140module_init(be_init_module);
5141
5142static void __exit be_exit_module(void)
5143{
5144 pci_unregister_driver(&be_driver);
5145}
5146module_exit(be_exit_module);