Linux 6.12-rc1
[linux-block.git] / drivers / net / ethernet / emulex / benet / be_main.c
CommitLineData
6e9ef509 1// SPDX-License-Identifier: GPL-2.0-only
6b7c5b94 2/*
7dfbe7d7 3 * Copyright (C) 2005 - 2016 Broadcom
6b7c5b94
SP
4 * All rights reserved.
5 *
6b7c5b94 6 * Contact Information:
d2145cde 7 * linux-drivers@emulex.com
6b7c5b94 8 *
d2145cde
AK
9 * Emulex
10 * 3333 Susan Street
11 * Costa Mesa, CA 92626
6b7c5b94
SP
12 */
13
70c71606 14#include <linux/prefetch.h>
9d9779e7 15#include <linux/module.h>
6b7c5b94 16#include "be.h"
8788fdc2 17#include "be_cmds.h"
65f71b8b 18#include <asm/div64.h>
a77dcb8c 19#include <linux/if_bridge.h>
6384a4d0 20#include <net/busy_poll.h>
c9c47142 21#include <net/vxlan.h>
6b7c5b94 22
80a1608f 23MODULE_DESCRIPTION(DRV_DESC);
00d3d51e 24MODULE_AUTHOR("Emulex Corporation");
6b7c5b94
SP
25MODULE_LICENSE("GPL");
26
ace40aff
VV
27/* num_vfs module param is obsolete.
28 * Use sysfs method to enable/disable VFs.
29 */
ba343c77 30static unsigned int num_vfs;
d3757ba4 31module_param(num_vfs, uint, 0444);
ba343c77 32MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
6b7c5b94 33
11ac75ed 34static ushort rx_frag_size = 2048;
d3757ba4 35module_param(rx_frag_size, ushort, 0444);
11ac75ed
SP
36MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
37
710f3e59
SB
38/* Per-module error detection/recovery workq shared across all functions.
39 * Each function schedules its own work request on this shared workq.
40 */
e6053dd5 41static struct workqueue_struct *be_err_recovery_workq;
710f3e59 42
9baa3c34 43static const struct pci_device_id be_dev_ids[] = {
98471b5b 44#ifdef CONFIG_BE2NET_BE2
c4ca2374
AK
45 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
46 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
98471b5b
PO
47#endif /* CONFIG_BE2NET_BE2 */
48#ifdef CONFIG_BE2NET_BE3
49 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
c4ca2374 50 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
98471b5b
PO
51#endif /* CONFIG_BE2NET_BE3 */
52#ifdef CONFIG_BE2NET_LANCER
fe6d2a38 53 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
12f4d0a8 54 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
98471b5b
PO
55#endif /* CONFIG_BE2NET_LANCER */
56#ifdef CONFIG_BE2NET_SKYHAWK
ecedb6ae 57 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
76b73530 58 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID6)},
98471b5b 59#endif /* CONFIG_BE2NET_SKYHAWK */
6b7c5b94
SP
60 { 0 }
61};
62MODULE_DEVICE_TABLE(pci, be_dev_ids);
b7172414
SP
63
64/* Workqueue used by all functions for defering cmd calls to the adapter */
e6053dd5 65static struct workqueue_struct *be_wq;
b7172414 66
7c185276 67/* UE Status Low CSR */
42c8b11e 68static const char * const ue_status_low_desc[] = {
7c185276
AK
69 "CEV",
70 "CTX",
71 "DBUF",
72 "ERX",
73 "Host",
74 "MPU",
75 "NDMA",
76 "PTC ",
77 "RDMA ",
78 "RXF ",
79 "RXIPS ",
80 "RXULP0 ",
81 "RXULP1 ",
82 "RXULP2 ",
83 "TIM ",
84 "TPOST ",
85 "TPRE ",
86 "TXIPS ",
87 "TXULP0 ",
88 "TXULP1 ",
89 "UC ",
90 "WDMA ",
91 "TXULP2 ",
92 "HOST1 ",
93 "P0_OB_LINK ",
94 "P1_OB_LINK ",
95 "HOST_GPIO ",
96 "MBOX ",
6bdf8f55
VV
97 "ERX2 ",
98 "SPARE ",
99 "JTAG ",
100 "MPU_INTPEND "
7c185276 101};
e2fb1afa 102
7c185276 103/* UE Status High CSR */
42c8b11e 104static const char * const ue_status_hi_desc[] = {
7c185276
AK
105 "LPCMEMHOST",
106 "MGMT_MAC",
107 "PCS0ONLINE",
108 "MPU_IRAM",
109 "PCS1ONLINE",
110 "PCTL0",
111 "PCTL1",
112 "PMEM",
113 "RR",
114 "TXPB",
115 "RXPP",
116 "XAUI",
117 "TXP",
118 "ARM",
119 "IPC",
120 "HOST2",
121 "HOST3",
122 "HOST4",
123 "HOST5",
124 "HOST6",
125 "HOST7",
6bdf8f55
VV
126 "ECRC",
127 "Poison TLP",
42c8b11e 128 "NETC",
6bdf8f55
VV
129 "PERIPH",
130 "LLTXULP",
131 "D2P",
132 "RCON",
133 "LDMA",
134 "LLTXP",
135 "LLTXPB",
7c185276
AK
136 "Unknown"
137};
6b7c5b94 138
c1bb0a55
VD
139#define BE_VF_IF_EN_FLAGS (BE_IF_FLAGS_UNTAGGED | \
140 BE_IF_FLAGS_BROADCAST | \
141 BE_IF_FLAGS_MULTICAST | \
142 BE_IF_FLAGS_PASS_L3L4_ERRORS)
143
6b7c5b94
SP
144static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
145{
146 struct be_dma_mem *mem = &q->dma_mem;
03d28ffe 147
1cfafab9 148 if (mem->va) {
2b7bcebf
IV
149 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
150 mem->dma);
1cfafab9
SP
151 mem->va = NULL;
152 }
6b7c5b94
SP
153}
154
155static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
748b539a 156 u16 len, u16 entry_size)
6b7c5b94
SP
157{
158 struct be_dma_mem *mem = &q->dma_mem;
159
160 memset(q, 0, sizeof(*q));
161 q->len = len;
162 q->entry_size = entry_size;
163 mem->size = len * entry_size;
750afb08
LC
164 mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size,
165 &mem->dma, GFP_KERNEL);
6b7c5b94 166 if (!mem->va)
10ef9ab4 167 return -ENOMEM;
6b7c5b94
SP
168 return 0;
169}
170
68c45a2d 171static void be_reg_intr_set(struct be_adapter *adapter, bool enable)
6b7c5b94 172{
db3ea781 173 u32 reg, enabled;
5f0b849e 174
db3ea781 175 pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
748b539a 176 &reg);
db3ea781
SP
177 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
178
5f0b849e 179 if (!enabled && enable)
6b7c5b94 180 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
5f0b849e 181 else if (enabled && !enable)
6b7c5b94 182 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
5f0b849e 183 else
6b7c5b94 184 return;
5f0b849e 185
db3ea781 186 pci_write_config_dword(adapter->pdev,
748b539a 187 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
6b7c5b94
SP
188}
189
68c45a2d
SK
190static void be_intr_set(struct be_adapter *adapter, bool enable)
191{
192 int status = 0;
193
194 /* On lancer interrupts can't be controlled via this register */
195 if (lancer_chip(adapter))
196 return;
197
954f6825 198 if (be_check_error(adapter, BE_ERROR_EEH))
68c45a2d
SK
199 return;
200
201 status = be_cmd_intr_set(adapter, enable);
202 if (status)
203 be_reg_intr_set(adapter, enable);
204}
205
8788fdc2 206static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
6b7c5b94
SP
207{
208 u32 val = 0;
03d28ffe 209
954f6825
VD
210 if (be_check_error(adapter, BE_ERROR_HW))
211 return;
212
6b7c5b94
SP
213 val |= qid & DB_RQ_RING_ID_MASK;
214 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
f3eb62d2
SP
215
216 wmb();
8788fdc2 217 iowrite32(val, adapter->db + DB_RQ_OFFSET);
6b7c5b94
SP
218}
219
94d73aaa
VV
220static void be_txq_notify(struct be_adapter *adapter, struct be_tx_obj *txo,
221 u16 posted)
6b7c5b94
SP
222{
223 u32 val = 0;
03d28ffe 224
954f6825
VD
225 if (be_check_error(adapter, BE_ERROR_HW))
226 return;
227
94d73aaa 228 val |= txo->q.id & DB_TXULP_RING_ID_MASK;
6b7c5b94 229 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
f3eb62d2
SP
230
231 wmb();
94d73aaa 232 iowrite32(val, adapter->db + txo->db_offset);
6b7c5b94
SP
233}
234
8788fdc2 235static void be_eq_notify(struct be_adapter *adapter, u16 qid,
20947770
PR
236 bool arm, bool clear_int, u16 num_popped,
237 u32 eq_delay_mult_enc)
6b7c5b94
SP
238{
239 u32 val = 0;
03d28ffe 240
6b7c5b94 241 val |= qid & DB_EQ_RING_ID_MASK;
748b539a 242 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) << DB_EQ_RING_ID_EXT_MASK_SHIFT);
cf588477 243
954f6825 244 if (be_check_error(adapter, BE_ERROR_HW))
cf588477
SP
245 return;
246
6b7c5b94
SP
247 if (arm)
248 val |= 1 << DB_EQ_REARM_SHIFT;
249 if (clear_int)
250 val |= 1 << DB_EQ_CLR_SHIFT;
251 val |= 1 << DB_EQ_EVNT_SHIFT;
252 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
20947770 253 val |= eq_delay_mult_enc << DB_EQ_R2I_DLY_SHIFT;
8788fdc2 254 iowrite32(val, adapter->db + DB_EQ_OFFSET);
6b7c5b94
SP
255}
256
8788fdc2 257void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
6b7c5b94
SP
258{
259 u32 val = 0;
03d28ffe 260
6b7c5b94 261 val |= qid & DB_CQ_RING_ID_MASK;
fe6d2a38
SP
262 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
263 DB_CQ_RING_ID_EXT_MASK_SHIFT);
cf588477 264
954f6825 265 if (be_check_error(adapter, BE_ERROR_HW))
cf588477
SP
266 return;
267
6b7c5b94
SP
268 if (arm)
269 val |= 1 << DB_CQ_REARM_SHIFT;
270 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
8788fdc2 271 iowrite32(val, adapter->db + DB_CQ_OFFSET);
6b7c5b94
SP
272}
273
76660757 274static int be_dev_mac_add(struct be_adapter *adapter, const u8 *mac)
988d44b1
SR
275{
276 int i;
277
278 /* Check if mac has already been added as part of uc-list */
279 for (i = 0; i < adapter->uc_macs; i++) {
1d0f110a 280 if (ether_addr_equal(adapter->uc_list[i].mac, mac)) {
988d44b1
SR
281 /* mac already added, skip addition */
282 adapter->pmac_id[0] = adapter->pmac_id[i + 1];
283 return 0;
284 }
285 }
286
287 return be_cmd_pmac_add(adapter, mac, adapter->if_handle,
288 &adapter->pmac_id[0], 0);
289}
290
291static void be_dev_mac_del(struct be_adapter *adapter, int pmac_id)
292{
293 int i;
294
295 /* Skip deletion if the programmed mac is
296 * being used in uc-list
297 */
298 for (i = 0; i < adapter->uc_macs; i++) {
299 if (adapter->pmac_id[i + 1] == pmac_id)
300 return;
301 }
302 be_cmd_pmac_del(adapter, adapter->if_handle, pmac_id, 0);
303}
304
6b7c5b94
SP
305static int be_mac_addr_set(struct net_device *netdev, void *p)
306{
307 struct be_adapter *adapter = netdev_priv(netdev);
5a712c13 308 struct device *dev = &adapter->pdev->dev;
6b7c5b94 309 struct sockaddr *addr = p;
5a712c13
SP
310 int status;
311 u8 mac[ETH_ALEN];
988d44b1 312 u32 old_pmac_id = adapter->pmac_id[0];
6b7c5b94 313
ca9e4988
AK
314 if (!is_valid_ether_addr(addr->sa_data))
315 return -EADDRNOTAVAIL;
316
ff32f8ab
VV
317 /* Proceed further only if, User provided MAC is different
318 * from active MAC
319 */
c27ebf58 320 if (ether_addr_equal(addr->sa_data, adapter->dev_mac))
ff32f8ab
VV
321 return 0;
322
34393529
IV
323 /* BE3 VFs without FILTMGMT privilege are not allowed to set its MAC
324 * address
325 */
326 if (BEx_chip(adapter) && be_virtfn(adapter) &&
327 !check_privilege(adapter, BE_PRIV_FILTMGMT))
328 return -EPERM;
329
bcc84140
KA
330 /* if device is not running, copy MAC to netdev->dev_addr */
331 if (!netif_running(netdev))
332 goto done;
333
5a712c13
SP
334 /* The PMAC_ADD cmd may fail if the VF doesn't have FILTMGMT
335 * privilege or if PF did not provision the new MAC address.
336 * On BE3, this cmd will always fail if the VF doesn't have the
337 * FILTMGMT privilege. This failure is OK, only if the PF programmed
338 * the MAC for the VF.
704e4c88 339 */
988d44b1
SR
340 mutex_lock(&adapter->rx_filter_lock);
341 status = be_dev_mac_add(adapter, (u8 *)addr->sa_data);
5a712c13 342 if (!status) {
5a712c13
SP
343
344 /* Delete the old programmed MAC. This call may fail if the
345 * old MAC was already deleted by the PF driver.
346 */
347 if (adapter->pmac_id[0] != old_pmac_id)
988d44b1 348 be_dev_mac_del(adapter, old_pmac_id);
704e4c88
PR
349 }
350
988d44b1 351 mutex_unlock(&adapter->rx_filter_lock);
5a712c13
SP
352 /* Decide if the new MAC is successfully activated only after
353 * querying the FW
704e4c88 354 */
988d44b1 355 status = be_cmd_get_active_mac(adapter, adapter->pmac_id[0], mac,
b188f090 356 adapter->if_handle, true, 0);
a65027e4 357 if (status)
e3a7ae2c 358 goto err;
6b7c5b94 359
5a712c13
SP
360 /* The MAC change did not happen, either due to lack of privilege
361 * or PF didn't pre-provision.
362 */
61d23e9f 363 if (!ether_addr_equal(addr->sa_data, mac)) {
5a712c13
SP
364 status = -EPERM;
365 goto err;
366 }
4993b39a
IV
367
368 /* Remember currently programmed MAC */
c27ebf58 369 ether_addr_copy(adapter->dev_mac, addr->sa_data);
4993b39a 370done:
f3956ebb 371 eth_hw_addr_set(netdev, addr->sa_data);
bcc84140 372 dev_info(dev, "MAC address changed to %pM\n", addr->sa_data);
e3a7ae2c
SK
373 return 0;
374err:
5a712c13 375 dev_warn(dev, "MAC address change to %pM failed\n", addr->sa_data);
6b7c5b94
SP
376 return status;
377}
378
ca34fe38
SP
379/* BE2 supports only v0 cmd */
380static void *hw_stats_from_cmd(struct be_adapter *adapter)
381{
382 if (BE2_chip(adapter)) {
383 struct be_cmd_resp_get_stats_v0 *cmd = adapter->stats_cmd.va;
384
385 return &cmd->hw_stats;
61000861 386 } else if (BE3_chip(adapter)) {
ca34fe38
SP
387 struct be_cmd_resp_get_stats_v1 *cmd = adapter->stats_cmd.va;
388
61000861
AK
389 return &cmd->hw_stats;
390 } else {
391 struct be_cmd_resp_get_stats_v2 *cmd = adapter->stats_cmd.va;
392
ca34fe38
SP
393 return &cmd->hw_stats;
394 }
395}
396
397/* BE2 supports only v0 cmd */
398static void *be_erx_stats_from_cmd(struct be_adapter *adapter)
399{
400 if (BE2_chip(adapter)) {
401 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
402
403 return &hw_stats->erx;
61000861 404 } else if (BE3_chip(adapter)) {
ca34fe38
SP
405 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
406
61000861
AK
407 return &hw_stats->erx;
408 } else {
409 struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
410
ca34fe38
SP
411 return &hw_stats->erx;
412 }
413}
414
415static void populate_be_v0_stats(struct be_adapter *adapter)
89a88ab8 416{
ac124ff9
SP
417 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
418 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
419 struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
89a88ab8 420 struct be_port_rxf_stats_v0 *port_stats =
ac124ff9
SP
421 &rxf_stats->port[adapter->port_num];
422 struct be_drv_stats *drvs = &adapter->drv_stats;
89a88ab8 423
ac124ff9 424 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
89a88ab8
AK
425 drvs->rx_pause_frames = port_stats->rx_pause_frames;
426 drvs->rx_crc_errors = port_stats->rx_crc_errors;
427 drvs->rx_control_frames = port_stats->rx_control_frames;
428 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
429 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
430 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
431 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
432 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
433 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
434 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
435 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
436 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
437 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
438 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
ac124ff9 439 drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
89a88ab8
AK
440 drvs->rx_dropped_header_too_small =
441 port_stats->rx_dropped_header_too_small;
18fb06a1
SR
442 drvs->rx_address_filtered =
443 port_stats->rx_address_filtered +
444 port_stats->rx_vlan_filtered;
89a88ab8
AK
445 drvs->rx_alignment_symbol_errors =
446 port_stats->rx_alignment_symbol_errors;
447
448 drvs->tx_pauseframes = port_stats->tx_pauseframes;
449 drvs->tx_controlframes = port_stats->tx_controlframes;
450
451 if (adapter->port_num)
ac124ff9 452 drvs->jabber_events = rxf_stats->port1_jabber_events;
89a88ab8 453 else
ac124ff9 454 drvs->jabber_events = rxf_stats->port0_jabber_events;
89a88ab8 455 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
89a88ab8 456 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
89a88ab8
AK
457 drvs->forwarded_packets = rxf_stats->forwarded_packets;
458 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
ac124ff9
SP
459 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
460 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
89a88ab8
AK
461 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
462}
463
ca34fe38 464static void populate_be_v1_stats(struct be_adapter *adapter)
89a88ab8 465{
ac124ff9
SP
466 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
467 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
468 struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
89a88ab8 469 struct be_port_rxf_stats_v1 *port_stats =
ac124ff9
SP
470 &rxf_stats->port[adapter->port_num];
471 struct be_drv_stats *drvs = &adapter->drv_stats;
89a88ab8 472
ac124ff9 473 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
02fe7027
AK
474 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
475 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
89a88ab8
AK
476 drvs->rx_pause_frames = port_stats->rx_pause_frames;
477 drvs->rx_crc_errors = port_stats->rx_crc_errors;
478 drvs->rx_control_frames = port_stats->rx_control_frames;
479 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
480 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
481 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
482 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
483 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
484 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
485 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
486 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
487 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
488 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
489 drvs->rx_dropped_header_too_small =
490 port_stats->rx_dropped_header_too_small;
491 drvs->rx_input_fifo_overflow_drop =
492 port_stats->rx_input_fifo_overflow_drop;
18fb06a1 493 drvs->rx_address_filtered = port_stats->rx_address_filtered;
89a88ab8
AK
494 drvs->rx_alignment_symbol_errors =
495 port_stats->rx_alignment_symbol_errors;
ac124ff9 496 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
89a88ab8
AK
497 drvs->tx_pauseframes = port_stats->tx_pauseframes;
498 drvs->tx_controlframes = port_stats->tx_controlframes;
b5adffc4 499 drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
89a88ab8
AK
500 drvs->jabber_events = port_stats->jabber_events;
501 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
89a88ab8 502 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
89a88ab8
AK
503 drvs->forwarded_packets = rxf_stats->forwarded_packets;
504 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
ac124ff9
SP
505 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
506 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
89a88ab8
AK
507 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
508}
509
61000861
AK
510static void populate_be_v2_stats(struct be_adapter *adapter)
511{
512 struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
513 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
514 struct be_rxf_stats_v2 *rxf_stats = &hw_stats->rxf;
515 struct be_port_rxf_stats_v2 *port_stats =
516 &rxf_stats->port[adapter->port_num];
517 struct be_drv_stats *drvs = &adapter->drv_stats;
518
519 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
520 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
521 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
522 drvs->rx_pause_frames = port_stats->rx_pause_frames;
523 drvs->rx_crc_errors = port_stats->rx_crc_errors;
524 drvs->rx_control_frames = port_stats->rx_control_frames;
525 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
526 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
527 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
528 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
529 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
530 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
531 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
532 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
533 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
534 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
535 drvs->rx_dropped_header_too_small =
536 port_stats->rx_dropped_header_too_small;
537 drvs->rx_input_fifo_overflow_drop =
538 port_stats->rx_input_fifo_overflow_drop;
539 drvs->rx_address_filtered = port_stats->rx_address_filtered;
540 drvs->rx_alignment_symbol_errors =
541 port_stats->rx_alignment_symbol_errors;
542 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
543 drvs->tx_pauseframes = port_stats->tx_pauseframes;
544 drvs->tx_controlframes = port_stats->tx_controlframes;
545 drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
546 drvs->jabber_events = port_stats->jabber_events;
547 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
548 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
549 drvs->forwarded_packets = rxf_stats->forwarded_packets;
550 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
551 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
552 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
553 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
748b539a 554 if (be_roce_supported(adapter)) {
461ae379
AK
555 drvs->rx_roce_bytes_lsd = port_stats->roce_bytes_received_lsd;
556 drvs->rx_roce_bytes_msd = port_stats->roce_bytes_received_msd;
557 drvs->rx_roce_frames = port_stats->roce_frames_received;
558 drvs->roce_drops_crc = port_stats->roce_drops_crc;
559 drvs->roce_drops_payload_len =
560 port_stats->roce_drops_payload_len;
561 }
61000861
AK
562}
563
005d5696
SX
564static void populate_lancer_stats(struct be_adapter *adapter)
565{
005d5696 566 struct be_drv_stats *drvs = &adapter->drv_stats;
748b539a 567 struct lancer_pport_stats *pport_stats = pport_stats_from_cmd(adapter);
ac124ff9
SP
568
569 be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
570 drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
571 drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
572 drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
005d5696 573 drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
ac124ff9 574 drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
005d5696
SX
575 drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
576 drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
577 drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
578 drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
579 drvs->rx_dropped_tcp_length =
580 pport_stats->rx_dropped_invalid_tcp_length;
581 drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
582 drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
583 drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
584 drvs->rx_dropped_header_too_small =
585 pport_stats->rx_dropped_header_too_small;
586 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
18fb06a1
SR
587 drvs->rx_address_filtered =
588 pport_stats->rx_address_filtered +
589 pport_stats->rx_vlan_filtered;
ac124ff9 590 drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
005d5696 591 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
ac124ff9
SP
592 drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
593 drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
005d5696 594 drvs->jabber_events = pport_stats->rx_jabbers;
ac124ff9
SP
595 drvs->forwarded_packets = pport_stats->num_forwards_lo;
596 drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
005d5696 597 drvs->rx_drops_too_many_frags =
ac124ff9 598 pport_stats->rx_drops_too_many_frags_lo;
005d5696 599}
89a88ab8 600
09c1c68f
SP
601static void accumulate_16bit_val(u32 *acc, u16 val)
602{
603#define lo(x) (x & 0xFFFF)
604#define hi(x) (x & 0xFFFF0000)
605 bool wrapped = val < lo(*acc);
606 u32 newacc = hi(*acc) + val;
607
608 if (wrapped)
609 newacc += 65536;
6aa7de05 610 WRITE_ONCE(*acc, newacc);
09c1c68f
SP
611}
612
4188e7df 613static void populate_erx_stats(struct be_adapter *adapter,
748b539a 614 struct be_rx_obj *rxo, u32 erx_stat)
a6c578ef
AK
615{
616 if (!BEx_chip(adapter))
617 rx_stats(rxo)->rx_drops_no_frags = erx_stat;
618 else
619 /* below erx HW counter can actually wrap around after
620 * 65535. Driver accumulates a 32-bit value
621 */
622 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
623 (u16)erx_stat);
624}
625
89a88ab8
AK
626void be_parse_stats(struct be_adapter *adapter)
627{
61000861 628 struct be_erx_stats_v2 *erx = be_erx_stats_from_cmd(adapter);
ac124ff9
SP
629 struct be_rx_obj *rxo;
630 int i;
a6c578ef 631 u32 erx_stat;
ac124ff9 632
ca34fe38
SP
633 if (lancer_chip(adapter)) {
634 populate_lancer_stats(adapter);
005d5696 635 } else {
ca34fe38
SP
636 if (BE2_chip(adapter))
637 populate_be_v0_stats(adapter);
61000861
AK
638 else if (BE3_chip(adapter))
639 /* for BE3 */
ca34fe38 640 populate_be_v1_stats(adapter);
61000861
AK
641 else
642 populate_be_v2_stats(adapter);
d51ebd33 643
61000861 644 /* erx_v2 is longer than v0, v1. use v2 for v0, v1 access */
ca34fe38 645 for_all_rx_queues(adapter, rxo, i) {
a6c578ef
AK
646 erx_stat = erx->rx_drops_no_fragments[rxo->q.id];
647 populate_erx_stats(adapter, rxo, erx_stat);
ca34fe38 648 }
09c1c68f 649 }
89a88ab8
AK
650}
651
bc1f4470 652static void be_get_stats64(struct net_device *netdev,
653 struct rtnl_link_stats64 *stats)
6b7c5b94 654{
ab1594e9 655 struct be_adapter *adapter = netdev_priv(netdev);
89a88ab8 656 struct be_drv_stats *drvs = &adapter->drv_stats;
3abcdeda 657 struct be_rx_obj *rxo;
3c8def97 658 struct be_tx_obj *txo;
ab1594e9
SP
659 u64 pkts, bytes;
660 unsigned int start;
3abcdeda 661 int i;
6b7c5b94 662
3abcdeda 663 for_all_rx_queues(adapter, rxo, i) {
ab1594e9 664 const struct be_rx_stats *rx_stats = rx_stats(rxo);
03d28ffe 665
ab1594e9 666 do {
068c38ad 667 start = u64_stats_fetch_begin(&rx_stats->sync);
ab1594e9
SP
668 pkts = rx_stats(rxo)->rx_pkts;
669 bytes = rx_stats(rxo)->rx_bytes;
068c38ad 670 } while (u64_stats_fetch_retry(&rx_stats->sync, start));
ab1594e9
SP
671 stats->rx_packets += pkts;
672 stats->rx_bytes += bytes;
673 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
674 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
675 rx_stats(rxo)->rx_drops_no_frags;
3abcdeda
SP
676 }
677
3c8def97 678 for_all_tx_queues(adapter, txo, i) {
ab1594e9 679 const struct be_tx_stats *tx_stats = tx_stats(txo);
03d28ffe 680
ab1594e9 681 do {
068c38ad 682 start = u64_stats_fetch_begin(&tx_stats->sync);
ab1594e9
SP
683 pkts = tx_stats(txo)->tx_pkts;
684 bytes = tx_stats(txo)->tx_bytes;
068c38ad 685 } while (u64_stats_fetch_retry(&tx_stats->sync, start));
ab1594e9
SP
686 stats->tx_packets += pkts;
687 stats->tx_bytes += bytes;
3c8def97 688 }
6b7c5b94
SP
689
690 /* bad pkts received */
ab1594e9 691 stats->rx_errors = drvs->rx_crc_errors +
89a88ab8
AK
692 drvs->rx_alignment_symbol_errors +
693 drvs->rx_in_range_errors +
694 drvs->rx_out_range_errors +
695 drvs->rx_frame_too_long +
696 drvs->rx_dropped_too_small +
697 drvs->rx_dropped_too_short +
698 drvs->rx_dropped_header_too_small +
699 drvs->rx_dropped_tcp_length +
ab1594e9 700 drvs->rx_dropped_runt;
68110868 701
6b7c5b94 702 /* detailed rx errors */
ab1594e9 703 stats->rx_length_errors = drvs->rx_in_range_errors +
89a88ab8
AK
704 drvs->rx_out_range_errors +
705 drvs->rx_frame_too_long;
68110868 706
ab1594e9 707 stats->rx_crc_errors = drvs->rx_crc_errors;
6b7c5b94
SP
708
709 /* frame alignment errors */
ab1594e9 710 stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
68110868 711
6b7c5b94
SP
712 /* receiver fifo overrun */
713 /* drops_no_pbuf is no per i/f, it's per BE card */
ab1594e9 714 stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
89a88ab8
AK
715 drvs->rx_input_fifo_overflow_drop +
716 drvs->rx_drops_no_pbuf;
6b7c5b94
SP
717}
718
b236916a 719void be_link_status_update(struct be_adapter *adapter, u8 link_status)
6b7c5b94 720{
6b7c5b94
SP
721 struct net_device *netdev = adapter->netdev;
722
b236916a 723 if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
ea172a01 724 netif_carrier_off(netdev);
b236916a 725 adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
6b7c5b94 726 }
b236916a 727
bdce2ad7 728 if (link_status)
b236916a
AK
729 netif_carrier_on(netdev);
730 else
731 netif_carrier_off(netdev);
18824894
IV
732
733 netdev_info(netdev, "Link is %s\n", link_status ? "Up" : "Down");
6b7c5b94
SP
734}
735
f3d6ad84
SB
736static int be_gso_hdr_len(struct sk_buff *skb)
737{
738 if (skb->encapsulation)
504148fe
ED
739 return skb_inner_tcp_all_headers(skb);
740
741 return skb_tcp_all_headers(skb);
f3d6ad84
SB
742}
743
5f07b3c5 744static void be_tx_stats_update(struct be_tx_obj *txo, struct sk_buff *skb)
6b7c5b94 745{
3c8def97 746 struct be_tx_stats *stats = tx_stats(txo);
f3d6ad84
SB
747 u32 tx_pkts = skb_shinfo(skb)->gso_segs ? : 1;
748 /* Account for headers which get duplicated in TSO pkt */
749 u32 dup_hdr_len = tx_pkts > 1 ? be_gso_hdr_len(skb) * (tx_pkts - 1) : 0;
3c8def97 750
ab1594e9 751 u64_stats_update_begin(&stats->sync);
ac124ff9 752 stats->tx_reqs++;
f3d6ad84 753 stats->tx_bytes += skb->len + dup_hdr_len;
8670f2a5
SB
754 stats->tx_pkts += tx_pkts;
755 if (skb->encapsulation && skb->ip_summed == CHECKSUM_PARTIAL)
756 stats->tx_vxlan_offload_pkts += tx_pkts;
ab1594e9 757 u64_stats_update_end(&stats->sync);
6b7c5b94
SP
758}
759
5f07b3c5
SP
760/* Returns number of WRBs needed for the skb */
761static u32 skb_wrb_cnt(struct sk_buff *skb)
6b7c5b94 762{
5f07b3c5
SP
763 /* +1 for the header wrb */
764 return 1 + (skb_headlen(skb) ? 1 : 0) + skb_shinfo(skb)->nr_frags;
6b7c5b94
SP
765}
766
767static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
768{
f986afcb
SP
769 wrb->frag_pa_hi = cpu_to_le32(upper_32_bits(addr));
770 wrb->frag_pa_lo = cpu_to_le32(lower_32_bits(addr));
771 wrb->frag_len = cpu_to_le32(len & ETH_WRB_FRAG_LEN_MASK);
772 wrb->rsvd0 = 0;
773}
774
775/* A dummy wrb is just all zeros. Using a separate routine for dummy-wrb
776 * to avoid the swap and shift/mask operations in wrb_fill().
777 */
778static inline void wrb_fill_dummy(struct be_eth_wrb *wrb)
779{
780 wrb->frag_pa_hi = 0;
781 wrb->frag_pa_lo = 0;
782 wrb->frag_len = 0;
89b1f496 783 wrb->rsvd0 = 0;
6b7c5b94
SP
784}
785
1ded132d 786static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
748b539a 787 struct sk_buff *skb)
1ded132d
AK
788{
789 u8 vlan_prio;
790 u16 vlan_tag;
791
df8a39de 792 vlan_tag = skb_vlan_tag_get(skb);
fb1e3df0 793 vlan_prio = skb_vlan_tag_get_prio(skb);
1ded132d
AK
794 /* If vlan priority provided by OS is NOT in available bmap */
795 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
796 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
fdf81bfb 797 adapter->recommended_prio_bits;
1ded132d
AK
798
799 return vlan_tag;
800}
801
c9c47142
SP
802/* Used only for IP tunnel packets */
803static u16 skb_inner_ip_proto(struct sk_buff *skb)
804{
805 return (inner_ip_hdr(skb)->version == 4) ?
806 inner_ip_hdr(skb)->protocol : inner_ipv6_hdr(skb)->nexthdr;
807}
808
809static u16 skb_ip_proto(struct sk_buff *skb)
810{
811 return (ip_hdr(skb)->version == 4) ?
812 ip_hdr(skb)->protocol : ipv6_hdr(skb)->nexthdr;
813}
814
cf5671e6
SB
815static inline bool be_is_txq_full(struct be_tx_obj *txo)
816{
817 return atomic_read(&txo->q.used) + BE_MAX_TX_FRAG_COUNT >= txo->q.len;
818}
819
820static inline bool be_can_txq_wake(struct be_tx_obj *txo)
821{
822 return atomic_read(&txo->q.used) < txo->q.len / 2;
823}
824
825static inline bool be_is_tx_compl_pending(struct be_tx_obj *txo)
826{
827 return atomic_read(&txo->q.used) > txo->pend_wrb_cnt;
828}
829
804abcdb
SB
830static void be_get_wrb_params_from_skb(struct be_adapter *adapter,
831 struct sk_buff *skb,
832 struct be_wrb_params *wrb_params)
6b7c5b94 833{
804abcdb 834 u16 proto;
6b7c5b94 835
49e4b847 836 if (skb_is_gso(skb)) {
804abcdb
SB
837 BE_WRB_F_SET(wrb_params->features, LSO, 1);
838 wrb_params->lso_mss = skb_shinfo(skb)->gso_size;
fe6d2a38 839 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
804abcdb 840 BE_WRB_F_SET(wrb_params->features, LSO6, 1);
6b7c5b94 841 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
c9c47142 842 if (skb->encapsulation) {
804abcdb 843 BE_WRB_F_SET(wrb_params->features, IPCS, 1);
c9c47142
SP
844 proto = skb_inner_ip_proto(skb);
845 } else {
846 proto = skb_ip_proto(skb);
847 }
848 if (proto == IPPROTO_TCP)
804abcdb 849 BE_WRB_F_SET(wrb_params->features, TCPCS, 1);
c9c47142 850 else if (proto == IPPROTO_UDP)
804abcdb 851 BE_WRB_F_SET(wrb_params->features, UDPCS, 1);
6b7c5b94
SP
852 }
853
df8a39de 854 if (skb_vlan_tag_present(skb)) {
804abcdb
SB
855 BE_WRB_F_SET(wrb_params->features, VLAN, 1);
856 wrb_params->vlan_tag = be_get_tx_vlan_tag(adapter, skb);
6b7c5b94
SP
857 }
858
804abcdb
SB
859 BE_WRB_F_SET(wrb_params->features, CRC, 1);
860}
5f07b3c5 861
804abcdb
SB
862static void wrb_fill_hdr(struct be_adapter *adapter,
863 struct be_eth_hdr_wrb *hdr,
864 struct be_wrb_params *wrb_params,
865 struct sk_buff *skb)
866{
867 memset(hdr, 0, sizeof(*hdr));
868
869 SET_TX_WRB_HDR_BITS(crc, hdr,
870 BE_WRB_F_GET(wrb_params->features, CRC));
871 SET_TX_WRB_HDR_BITS(ipcs, hdr,
872 BE_WRB_F_GET(wrb_params->features, IPCS));
873 SET_TX_WRB_HDR_BITS(tcpcs, hdr,
874 BE_WRB_F_GET(wrb_params->features, TCPCS));
875 SET_TX_WRB_HDR_BITS(udpcs, hdr,
876 BE_WRB_F_GET(wrb_params->features, UDPCS));
877
878 SET_TX_WRB_HDR_BITS(lso, hdr,
879 BE_WRB_F_GET(wrb_params->features, LSO));
880 SET_TX_WRB_HDR_BITS(lso6, hdr,
881 BE_WRB_F_GET(wrb_params->features, LSO6));
882 SET_TX_WRB_HDR_BITS(lso_mss, hdr, wrb_params->lso_mss);
883
884 /* Hack to skip HW VLAN tagging needs evt = 1, compl = 0. When this
885 * hack is not needed, the evt bit is set while ringing DB.
5f07b3c5 886 */
804abcdb
SB
887 SET_TX_WRB_HDR_BITS(event, hdr,
888 BE_WRB_F_GET(wrb_params->features, VLAN_SKIP_HW));
889 SET_TX_WRB_HDR_BITS(vlan, hdr,
890 BE_WRB_F_GET(wrb_params->features, VLAN));
891 SET_TX_WRB_HDR_BITS(vlan_tag, hdr, wrb_params->vlan_tag);
892
893 SET_TX_WRB_HDR_BITS(num_wrb, hdr, skb_wrb_cnt(skb));
894 SET_TX_WRB_HDR_BITS(len, hdr, skb->len);
760c295e
VD
895 SET_TX_WRB_HDR_BITS(mgmt, hdr,
896 BE_WRB_F_GET(wrb_params->features, OS2BMC));
6b7c5b94
SP
897}
898
2b7bcebf 899static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
748b539a 900 bool unmap_single)
7101e111
SP
901{
902 dma_addr_t dma;
f986afcb 903 u32 frag_len = le32_to_cpu(wrb->frag_len);
7101e111 904
7101e111 905
f986afcb
SP
906 dma = (u64)le32_to_cpu(wrb->frag_pa_hi) << 32 |
907 (u64)le32_to_cpu(wrb->frag_pa_lo);
908 if (frag_len) {
7101e111 909 if (unmap_single)
f986afcb 910 dma_unmap_single(dev, dma, frag_len, DMA_TO_DEVICE);
7101e111 911 else
f986afcb 912 dma_unmap_page(dev, dma, frag_len, DMA_TO_DEVICE);
7101e111
SP
913 }
914}
6b7c5b94 915
79a0d7d8 916/* Grab a WRB header for xmit */
b0fd2eb2 917static u32 be_tx_get_wrb_hdr(struct be_tx_obj *txo)
79a0d7d8 918{
b0fd2eb2 919 u32 head = txo->q.head;
79a0d7d8
SB
920
921 queue_head_inc(&txo->q);
922 return head;
923}
924
925/* Set up the WRB header for xmit */
926static void be_tx_setup_wrb_hdr(struct be_adapter *adapter,
927 struct be_tx_obj *txo,
928 struct be_wrb_params *wrb_params,
929 struct sk_buff *skb, u16 head)
930{
931 u32 num_frags = skb_wrb_cnt(skb);
932 struct be_queue_info *txq = &txo->q;
933 struct be_eth_hdr_wrb *hdr = queue_index_node(txq, head);
934
935 wrb_fill_hdr(adapter, hdr, wrb_params, skb);
936 be_dws_cpu_to_le(hdr, sizeof(*hdr));
937
938 BUG_ON(txo->sent_skb_list[head]);
939 txo->sent_skb_list[head] = skb;
940 txo->last_req_hdr = head;
941 atomic_add(num_frags, &txq->used);
942 txo->last_req_wrb_cnt = num_frags;
943 txo->pend_wrb_cnt += num_frags;
944}
945
946/* Setup a WRB fragment (buffer descriptor) for xmit */
947static void be_tx_setup_wrb_frag(struct be_tx_obj *txo, dma_addr_t busaddr,
948 int len)
949{
950 struct be_eth_wrb *wrb;
951 struct be_queue_info *txq = &txo->q;
952
953 wrb = queue_head_node(txq);
954 wrb_fill(wrb, busaddr, len);
955 queue_head_inc(txq);
956}
957
958/* Bring the queue back to the state it was in before be_xmit_enqueue() routine
959 * was invoked. The producer index is restored to the previous packet and the
960 * WRBs of the current packet are unmapped. Invoked to handle tx setup errors.
961 */
962static void be_xmit_restore(struct be_adapter *adapter,
b0fd2eb2 963 struct be_tx_obj *txo, u32 head, bool map_single,
79a0d7d8
SB
964 u32 copied)
965{
966 struct device *dev;
967 struct be_eth_wrb *wrb;
968 struct be_queue_info *txq = &txo->q;
969
970 dev = &adapter->pdev->dev;
971 txq->head = head;
972
973 /* skip the first wrb (hdr); it's not mapped */
974 queue_head_inc(txq);
975 while (copied) {
976 wrb = queue_head_node(txq);
977 unmap_tx_frag(dev, wrb, map_single);
978 map_single = false;
979 copied -= le32_to_cpu(wrb->frag_len);
980 queue_head_inc(txq);
981 }
982
983 txq->head = head;
984}
985
986/* Enqueue the given packet for transmit. This routine allocates WRBs for the
987 * packet, dma maps the packet buffers and sets up the WRBs. Returns the number
988 * of WRBs used up by the packet.
989 */
5f07b3c5 990static u32 be_xmit_enqueue(struct be_adapter *adapter, struct be_tx_obj *txo,
804abcdb
SB
991 struct sk_buff *skb,
992 struct be_wrb_params *wrb_params)
6b7c5b94 993{
5f07b3c5 994 u32 i, copied = 0, wrb_cnt = skb_wrb_cnt(skb);
2b7bcebf 995 struct device *dev = &adapter->pdev->dev;
7101e111 996 bool map_single = false;
2e85283d 997 u32 head;
79a0d7d8
SB
998 dma_addr_t busaddr;
999 int len;
6b7c5b94 1000
79a0d7d8 1001 head = be_tx_get_wrb_hdr(txo);
6b7c5b94 1002
ebc8d2ab 1003 if (skb->len > skb->data_len) {
79a0d7d8 1004 len = skb_headlen(skb);
03d28ffe 1005
2b7bcebf
IV
1006 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
1007 if (dma_mapping_error(dev, busaddr))
7101e111
SP
1008 goto dma_err;
1009 map_single = true;
79a0d7d8 1010 be_tx_setup_wrb_frag(txo, busaddr, len);
ebc8d2ab
DM
1011 copied += len;
1012 }
6b7c5b94 1013
ebc8d2ab 1014 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
d7840976 1015 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
79a0d7d8 1016 len = skb_frag_size(frag);
03d28ffe 1017
79a0d7d8 1018 busaddr = skb_frag_dma_map(dev, frag, 0, len, DMA_TO_DEVICE);
2b7bcebf 1019 if (dma_mapping_error(dev, busaddr))
7101e111 1020 goto dma_err;
79a0d7d8
SB
1021 be_tx_setup_wrb_frag(txo, busaddr, len);
1022 copied += len;
6b7c5b94
SP
1023 }
1024
79a0d7d8 1025 be_tx_setup_wrb_hdr(adapter, txo, wrb_params, skb, head);
6b7c5b94 1026
5f07b3c5
SP
1027 be_tx_stats_update(txo, skb);
1028 return wrb_cnt;
6b7c5b94 1029
7101e111 1030dma_err:
79a0d7d8
SB
1031 adapter->drv_stats.dma_map_errors++;
1032 be_xmit_restore(adapter, txo, head, map_single, copied);
7101e111 1033 return 0;
6b7c5b94
SP
1034}
1035
f7062ee5
SP
1036static inline int qnq_async_evt_rcvd(struct be_adapter *adapter)
1037{
1038 return adapter->flags & BE_FLAGS_QNQ_ASYNC_EVT_RCVD;
1039}
1040
93040ae5 1041static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
bc0c3405 1042 struct sk_buff *skb,
804abcdb
SB
1043 struct be_wrb_params
1044 *wrb_params)
93040ae5 1045{
c4062f89 1046 bool insert_vlan = false;
93040ae5
SK
1047 u16 vlan_tag = 0;
1048
1049 skb = skb_share_check(skb, GFP_ATOMIC);
1050 if (unlikely(!skb))
1051 return skb;
1052
c4062f89 1053 if (skb_vlan_tag_present(skb)) {
93040ae5 1054 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
c4062f89
MM
1055 insert_vlan = true;
1056 }
52fe29e4
SB
1057
1058 if (qnq_async_evt_rcvd(adapter) && adapter->pvid) {
c4062f89 1059 if (!insert_vlan) {
52fe29e4 1060 vlan_tag = adapter->pvid;
c4062f89
MM
1061 insert_vlan = true;
1062 }
52fe29e4
SB
1063 /* f/w workaround to set skip_hw_vlan = 1, informs the F/W to
1064 * skip VLAN insertion
1065 */
804abcdb 1066 BE_WRB_F_SET(wrb_params->features, VLAN_SKIP_HW, 1);
52fe29e4 1067 }
bc0c3405 1068
c4062f89 1069 if (insert_vlan) {
62749e2c
JP
1070 skb = vlan_insert_tag_set_proto(skb, htons(ETH_P_8021Q),
1071 vlan_tag);
bc0c3405
AK
1072 if (unlikely(!skb))
1073 return skb;
c4062f89 1074 __vlan_hwaccel_clear_tag(skb);
bc0c3405
AK
1075 }
1076
1077 /* Insert the outer VLAN, if any */
1078 if (adapter->qnq_vid) {
1079 vlan_tag = adapter->qnq_vid;
62749e2c
JP
1080 skb = vlan_insert_tag_set_proto(skb, htons(ETH_P_8021Q),
1081 vlan_tag);
bc0c3405
AK
1082 if (unlikely(!skb))
1083 return skb;
804abcdb 1084 BE_WRB_F_SET(wrb_params->features, VLAN_SKIP_HW, 1);
bc0c3405
AK
1085 }
1086
93040ae5
SK
1087 return skb;
1088}
1089
bc0c3405
AK
1090static bool be_ipv6_exthdr_check(struct sk_buff *skb)
1091{
1092 struct ethhdr *eh = (struct ethhdr *)skb->data;
1093 u16 offset = ETH_HLEN;
1094
1095 if (eh->h_proto == htons(ETH_P_IPV6)) {
1096 struct ipv6hdr *ip6h = (struct ipv6hdr *)(skb->data + offset);
1097
1098 offset += sizeof(struct ipv6hdr);
1099 if (ip6h->nexthdr != NEXTHDR_TCP &&
1100 ip6h->nexthdr != NEXTHDR_UDP) {
1101 struct ipv6_opt_hdr *ehdr =
504fbf1e 1102 (struct ipv6_opt_hdr *)(skb->data + offset);
bc0c3405
AK
1103
1104 /* offending pkt: 2nd byte following IPv6 hdr is 0xff */
1105 if (ehdr->hdrlen == 0xff)
1106 return true;
1107 }
1108 }
1109 return false;
1110}
1111
1112static int be_vlan_tag_tx_chk(struct be_adapter *adapter, struct sk_buff *skb)
1113{
df8a39de 1114 return skb_vlan_tag_present(skb) || adapter->pvid || adapter->qnq_vid;
bc0c3405
AK
1115}
1116
748b539a 1117static int be_ipv6_tx_stall_chk(struct be_adapter *adapter, struct sk_buff *skb)
bc0c3405 1118{
ee9c799c 1119 return BE3_chip(adapter) && be_ipv6_exthdr_check(skb);
bc0c3405
AK
1120}
1121
ec495fac
VV
1122static struct sk_buff *be_lancer_xmit_workarounds(struct be_adapter *adapter,
1123 struct sk_buff *skb,
804abcdb
SB
1124 struct be_wrb_params
1125 *wrb_params)
6b7c5b94 1126{
1f5020ac 1127 struct vlan_ethhdr *veh = skb_vlan_eth_hdr(skb);
ee9c799c
SP
1128 unsigned int eth_hdr_len;
1129 struct iphdr *ip;
93040ae5 1130
1297f9db
AK
1131 /* For padded packets, BE HW modifies tot_len field in IP header
1132 * incorrecly when VLAN tag is inserted by HW.
3904dcc4 1133 * For padded packets, Lancer computes incorrect checksum.
1ded132d 1134 */
ee9c799c
SP
1135 eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
1136 VLAN_ETH_HLEN : ETH_HLEN;
3904dcc4 1137 if (skb->len <= 60 &&
7580e0a7
RL
1138 (lancer_chip(adapter) || BE3_chip(adapter) ||
1139 skb_vlan_tag_present(skb)) && is_ipv4_pkt(skb)) {
93040ae5 1140 ip = (struct iphdr *)ip_hdr(skb);
5c85f706
YG
1141 if (unlikely(pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len))))
1142 goto tx_drop;
93040ae5 1143 }
1ded132d 1144
d2cb6ce7 1145 /* If vlan tag is already inlined in the packet, skip HW VLAN
f93f160b 1146 * tagging in pvid-tagging mode
d2cb6ce7 1147 */
f93f160b 1148 if (be_pvid_tagging_enabled(adapter) &&
d2cb6ce7 1149 veh->h_vlan_proto == htons(ETH_P_8021Q))
804abcdb 1150 BE_WRB_F_SET(wrb_params->features, VLAN_SKIP_HW, 1);
d2cb6ce7 1151
93040ae5
SK
1152 /* HW has a bug wherein it will calculate CSUM for VLAN
1153 * pkts even though it is disabled.
1154 * Manually insert VLAN in pkt.
1155 */
1156 if (skb->ip_summed != CHECKSUM_PARTIAL &&
df8a39de 1157 skb_vlan_tag_present(skb)) {
804abcdb 1158 skb = be_insert_vlan_in_pkt(adapter, skb, wrb_params);
bc0c3405 1159 if (unlikely(!skb))
c9128951 1160 goto err;
bc0c3405
AK
1161 }
1162
1163 /* HW may lockup when VLAN HW tagging is requested on
1164 * certain ipv6 packets. Drop such pkts if the HW workaround to
1165 * skip HW tagging is not enabled by FW.
1166 */
1167 if (unlikely(be_ipv6_tx_stall_chk(adapter, skb) &&
cd3307aa
KA
1168 (adapter->pvid || adapter->qnq_vid) &&
1169 !qnq_async_evt_rcvd(adapter)))
bc0c3405
AK
1170 goto tx_drop;
1171
1172 /* Manual VLAN tag insertion to prevent:
1173 * ASIC lockup when the ASIC inserts VLAN tag into
1174 * certain ipv6 packets. Insert VLAN tags in driver,
1175 * and set event, completion, vlan bits accordingly
1176 * in the Tx WRB.
1177 */
1178 if (be_ipv6_tx_stall_chk(adapter, skb) &&
1179 be_vlan_tag_tx_chk(adapter, skb)) {
804abcdb 1180 skb = be_insert_vlan_in_pkt(adapter, skb, wrb_params);
1ded132d 1181 if (unlikely(!skb))
c9128951 1182 goto err;
1ded132d
AK
1183 }
1184
ee9c799c
SP
1185 return skb;
1186tx_drop:
1187 dev_kfree_skb_any(skb);
c9128951 1188err:
ee9c799c
SP
1189 return NULL;
1190}
1191
ec495fac
VV
1192static struct sk_buff *be_xmit_workarounds(struct be_adapter *adapter,
1193 struct sk_buff *skb,
804abcdb 1194 struct be_wrb_params *wrb_params)
ec495fac 1195{
127bfce5 1196 int err;
1197
8227e990
SR
1198 /* Lancer, SH and BE3 in SRIOV mode have a bug wherein
1199 * packets that are 32b or less may cause a transmit stall
1200 * on that port. The workaround is to pad such packets
1201 * (len <= 32 bytes) to a minimum length of 36b.
ec495fac 1202 */
8227e990 1203 if (skb->len <= 32) {
74b6939d 1204 if (skb_put_padto(skb, 36))
ec495fac 1205 return NULL;
ec495fac
VV
1206 }
1207
1208 if (BEx_chip(adapter) || lancer_chip(adapter)) {
804abcdb 1209 skb = be_lancer_xmit_workarounds(adapter, skb, wrb_params);
ec495fac
VV
1210 if (!skb)
1211 return NULL;
1212 }
1213
127bfce5 1214 /* The stack can send us skbs with length greater than
1215 * what the HW can handle. Trim the extra bytes.
1216 */
1217 WARN_ON_ONCE(skb->len > BE_MAX_GSO_SIZE);
1218 err = pskb_trim(skb, BE_MAX_GSO_SIZE);
1219 WARN_ON(err);
1220
ec495fac
VV
1221 return skb;
1222}
1223
5f07b3c5
SP
1224static void be_xmit_flush(struct be_adapter *adapter, struct be_tx_obj *txo)
1225{
1226 struct be_queue_info *txq = &txo->q;
1227 struct be_eth_hdr_wrb *hdr = queue_index_node(txq, txo->last_req_hdr);
1228
1229 /* Mark the last request eventable if it hasn't been marked already */
1230 if (!(hdr->dw[2] & cpu_to_le32(TX_HDR_WRB_EVT)))
1231 hdr->dw[2] |= cpu_to_le32(TX_HDR_WRB_EVT | TX_HDR_WRB_COMPL);
1232
1233 /* compose a dummy wrb if there are odd set of wrbs to notify */
1234 if (!lancer_chip(adapter) && (txo->pend_wrb_cnt & 1)) {
f986afcb 1235 wrb_fill_dummy(queue_head_node(txq));
5f07b3c5
SP
1236 queue_head_inc(txq);
1237 atomic_inc(&txq->used);
1238 txo->pend_wrb_cnt++;
1239 hdr->dw[2] &= ~cpu_to_le32(TX_HDR_WRB_NUM_MASK <<
1240 TX_HDR_WRB_NUM_SHIFT);
1241 hdr->dw[2] |= cpu_to_le32((txo->last_req_wrb_cnt + 1) <<
1242 TX_HDR_WRB_NUM_SHIFT);
1243 }
1244 be_txq_notify(adapter, txo, txo->pend_wrb_cnt);
1245 txo->pend_wrb_cnt = 0;
1246}
1247
760c295e
VD
1248/* OS2BMC related */
1249
1250#define DHCP_CLIENT_PORT 68
1251#define DHCP_SERVER_PORT 67
1252#define NET_BIOS_PORT1 137
1253#define NET_BIOS_PORT2 138
1254#define DHCPV6_RAS_PORT 547
1255
1256#define is_mc_allowed_on_bmc(adapter, eh) \
1257 (!is_multicast_filt_enabled(adapter) && \
1258 is_multicast_ether_addr(eh->h_dest) && \
1259 !is_broadcast_ether_addr(eh->h_dest))
1260
1261#define is_bc_allowed_on_bmc(adapter, eh) \
1262 (!is_broadcast_filt_enabled(adapter) && \
1263 is_broadcast_ether_addr(eh->h_dest))
1264
1265#define is_arp_allowed_on_bmc(adapter, skb) \
1266 (is_arp(skb) && is_arp_filt_enabled(adapter))
760c295e
VD
1267
1268#define is_arp(skb) (skb->protocol == htons(ETH_P_ARP))
1269
1270#define is_arp_filt_enabled(adapter) \
1271 (adapter->bmc_filt_mask & (BMC_FILT_BROADCAST_ARP))
1272
1273#define is_dhcp_client_filt_enabled(adapter) \
1274 (adapter->bmc_filt_mask & BMC_FILT_BROADCAST_DHCP_CLIENT)
1275
1276#define is_dhcp_srvr_filt_enabled(adapter) \
1277 (adapter->bmc_filt_mask & BMC_FILT_BROADCAST_DHCP_SERVER)
1278
1279#define is_nbios_filt_enabled(adapter) \
1280 (adapter->bmc_filt_mask & BMC_FILT_BROADCAST_NET_BIOS)
1281
1282#define is_ipv6_na_filt_enabled(adapter) \
1283 (adapter->bmc_filt_mask & \
1284 BMC_FILT_MULTICAST_IPV6_NEIGH_ADVER)
1285
1286#define is_ipv6_ra_filt_enabled(adapter) \
1287 (adapter->bmc_filt_mask & BMC_FILT_MULTICAST_IPV6_RA)
1288
1289#define is_ipv6_ras_filt_enabled(adapter) \
1290 (adapter->bmc_filt_mask & BMC_FILT_MULTICAST_IPV6_RAS)
1291
1292#define is_broadcast_filt_enabled(adapter) \
1293 (adapter->bmc_filt_mask & BMC_FILT_BROADCAST)
1294
1295#define is_multicast_filt_enabled(adapter) \
1296 (adapter->bmc_filt_mask & BMC_FILT_MULTICAST)
1297
1298static bool be_send_pkt_to_bmc(struct be_adapter *adapter,
1299 struct sk_buff **skb)
1300{
1301 struct ethhdr *eh = (struct ethhdr *)(*skb)->data;
1302 bool os2bmc = false;
1303
1304 if (!be_is_os2bmc_enabled(adapter))
1305 goto done;
1306
1307 if (!is_multicast_ether_addr(eh->h_dest))
1308 goto done;
1309
1310 if (is_mc_allowed_on_bmc(adapter, eh) ||
1311 is_bc_allowed_on_bmc(adapter, eh) ||
1312 is_arp_allowed_on_bmc(adapter, (*skb))) {
1313 os2bmc = true;
1314 goto done;
1315 }
1316
1317 if ((*skb)->protocol == htons(ETH_P_IPV6)) {
1318 struct ipv6hdr *hdr = ipv6_hdr((*skb));
1319 u8 nexthdr = hdr->nexthdr;
1320
1321 if (nexthdr == IPPROTO_ICMPV6) {
1322 struct icmp6hdr *icmp6 = icmp6_hdr((*skb));
1323
1324 switch (icmp6->icmp6_type) {
1325 case NDISC_ROUTER_ADVERTISEMENT:
1326 os2bmc = is_ipv6_ra_filt_enabled(adapter);
1327 goto done;
1328 case NDISC_NEIGHBOUR_ADVERTISEMENT:
1329 os2bmc = is_ipv6_na_filt_enabled(adapter);
1330 goto done;
1331 default:
1332 break;
1333 }
1334 }
1335 }
1336
1337 if (is_udp_pkt((*skb))) {
1338 struct udphdr *udp = udp_hdr((*skb));
1339
1645d997 1340 switch (ntohs(udp->dest)) {
760c295e
VD
1341 case DHCP_CLIENT_PORT:
1342 os2bmc = is_dhcp_client_filt_enabled(adapter);
1343 goto done;
1344 case DHCP_SERVER_PORT:
1345 os2bmc = is_dhcp_srvr_filt_enabled(adapter);
1346 goto done;
1347 case NET_BIOS_PORT1:
1348 case NET_BIOS_PORT2:
1349 os2bmc = is_nbios_filt_enabled(adapter);
1350 goto done;
1351 case DHCPV6_RAS_PORT:
1352 os2bmc = is_ipv6_ras_filt_enabled(adapter);
1353 goto done;
1354 default:
1355 break;
1356 }
1357 }
1358done:
1359 /* For packets over a vlan, which are destined
1360 * to BMC, asic expects the vlan to be inline in the packet.
1361 */
1362 if (os2bmc)
1363 *skb = be_insert_vlan_in_pkt(adapter, *skb, NULL);
1364
1365 return os2bmc;
1366}
1367
ee9c799c
SP
1368static netdev_tx_t be_xmit(struct sk_buff *skb, struct net_device *netdev)
1369{
1370 struct be_adapter *adapter = netdev_priv(netdev);
5f07b3c5
SP
1371 u16 q_idx = skb_get_queue_mapping(skb);
1372 struct be_tx_obj *txo = &adapter->tx_obj[q_idx];
804abcdb 1373 struct be_wrb_params wrb_params = { 0 };
6b16f9ee 1374 bool flush = !netdev_xmit_more();
5f07b3c5 1375 u16 wrb_cnt;
ee9c799c 1376
804abcdb 1377 skb = be_xmit_workarounds(adapter, skb, &wrb_params);
5f07b3c5
SP
1378 if (unlikely(!skb))
1379 goto drop;
6b7c5b94 1380
804abcdb
SB
1381 be_get_wrb_params_from_skb(adapter, skb, &wrb_params);
1382
1383 wrb_cnt = be_xmit_enqueue(adapter, txo, skb, &wrb_params);
5f07b3c5
SP
1384 if (unlikely(!wrb_cnt)) {
1385 dev_kfree_skb_any(skb);
1386 goto drop;
1387 }
cd8f76c0 1388
760c295e
VD
1389 /* if os2bmc is enabled and if the pkt is destined to bmc,
1390 * enqueue the pkt a 2nd time with mgmt bit set.
1391 */
1392 if (be_send_pkt_to_bmc(adapter, &skb)) {
1393 BE_WRB_F_SET(wrb_params.features, OS2BMC, 1);
1394 wrb_cnt = be_xmit_enqueue(adapter, txo, skb, &wrb_params);
1395 if (unlikely(!wrb_cnt))
1396 goto drop;
1397 else
1398 skb_get(skb);
1399 }
1400
cf5671e6 1401 if (be_is_txq_full(txo)) {
5f07b3c5
SP
1402 netif_stop_subqueue(netdev, q_idx);
1403 tx_stats(txo)->tx_stops++;
1404 }
c190e3c8 1405
5f07b3c5
SP
1406 if (flush || __netif_subqueue_stopped(netdev, q_idx))
1407 be_xmit_flush(adapter, txo);
6b7c5b94 1408
5f07b3c5
SP
1409 return NETDEV_TX_OK;
1410drop:
1411 tx_stats(txo)->tx_drv_drops++;
1412 /* Flush the already enqueued tx requests */
1413 if (flush && txo->pend_wrb_cnt)
1414 be_xmit_flush(adapter, txo);
6b7c5b94 1415
6b7c5b94
SP
1416 return NETDEV_TX_OK;
1417}
1418
0290bd29 1419static void be_tx_timeout(struct net_device *netdev, unsigned int txqueue)
c1b3bdb2
SR
1420{
1421 struct be_adapter *adapter = netdev_priv(netdev);
1422 struct device *dev = &adapter->pdev->dev;
1423 struct be_tx_obj *txo;
1424 struct sk_buff *skb;
1425 struct tcphdr *tcphdr;
1426 struct udphdr *udphdr;
1427 u32 *entry;
1428 int status;
1429 int i, j;
1430
1431 for_all_tx_queues(adapter, txo, i) {
1432 dev_info(dev, "TXQ Dump: %d H: %d T: %d used: %d, qid: 0x%x\n",
1433 i, txo->q.head, txo->q.tail,
1434 atomic_read(&txo->q.used), txo->q.id);
1435
1436 entry = txo->q.dma_mem.va;
1437 for (j = 0; j < TX_Q_LEN * 4; j += 4) {
1438 if (entry[j] != 0 || entry[j + 1] != 0 ||
1439 entry[j + 2] != 0 || entry[j + 3] != 0) {
1440 dev_info(dev, "Entry %d 0x%x 0x%x 0x%x 0x%x\n",
1441 j, entry[j], entry[j + 1],
1442 entry[j + 2], entry[j + 3]);
1443 }
1444 }
1445
1446 entry = txo->cq.dma_mem.va;
1447 dev_info(dev, "TXCQ Dump: %d H: %d T: %d used: %d\n",
1448 i, txo->cq.head, txo->cq.tail,
1449 atomic_read(&txo->cq.used));
1450 for (j = 0; j < TX_CQ_LEN * 4; j += 4) {
1451 if (entry[j] != 0 || entry[j + 1] != 0 ||
1452 entry[j + 2] != 0 || entry[j + 3] != 0) {
1453 dev_info(dev, "Entry %d 0x%x 0x%x 0x%x 0x%x\n",
1454 j, entry[j], entry[j + 1],
1455 entry[j + 2], entry[j + 3]);
1456 }
1457 }
1458
1459 for (j = 0; j < TX_Q_LEN; j++) {
1460 if (txo->sent_skb_list[j]) {
1461 skb = txo->sent_skb_list[j];
1462 if (ip_hdr(skb)->protocol == IPPROTO_TCP) {
1463 tcphdr = tcp_hdr(skb);
1464 dev_info(dev, "TCP source port %d\n",
1465 ntohs(tcphdr->source));
1466 dev_info(dev, "TCP dest port %d\n",
1467 ntohs(tcphdr->dest));
ffd7ce3c 1468 dev_info(dev, "TCP sequence num %d\n",
c1b3bdb2
SR
1469 ntohs(tcphdr->seq));
1470 dev_info(dev, "TCP ack_seq %d\n",
1471 ntohs(tcphdr->ack_seq));
1472 } else if (ip_hdr(skb)->protocol ==
1473 IPPROTO_UDP) {
1474 udphdr = udp_hdr(skb);
1475 dev_info(dev, "UDP source port %d\n",
1476 ntohs(udphdr->source));
1477 dev_info(dev, "UDP dest port %d\n",
1478 ntohs(udphdr->dest));
1479 }
1480 dev_info(dev, "skb[%d] %p len %d proto 0x%x\n",
1481 j, skb, skb->len, skb->protocol);
1482 }
1483 }
1484 }
1485
1486 if (lancer_chip(adapter)) {
1487 dev_info(dev, "Initiating reset due to tx timeout\n");
1488 dev_info(dev, "Resetting adapter\n");
1489 status = lancer_physdev_ctrl(adapter,
1490 PHYSDEV_CONTROL_FW_RESET_MASK);
1491 if (status)
1492 dev_err(dev, "Reset failed .. Reboot server\n");
1493 }
1494}
1495
f66b7cfd
SP
1496static inline bool be_in_all_promisc(struct be_adapter *adapter)
1497{
1498 return (adapter->if_flags & BE_IF_FLAGS_ALL_PROMISCUOUS) ==
1499 BE_IF_FLAGS_ALL_PROMISCUOUS;
1500}
1501
1502static int be_set_vlan_promisc(struct be_adapter *adapter)
1503{
1504 struct device *dev = &adapter->pdev->dev;
1505 int status;
1506
1507 if (adapter->if_flags & BE_IF_FLAGS_VLAN_PROMISCUOUS)
1508 return 0;
1509
1510 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_VLAN_PROMISCUOUS, ON);
1511 if (!status) {
1512 dev_info(dev, "Enabled VLAN promiscuous mode\n");
1513 adapter->if_flags |= BE_IF_FLAGS_VLAN_PROMISCUOUS;
1514 } else {
1515 dev_err(dev, "Failed to enable VLAN promiscuous mode\n");
1516 }
1517 return status;
1518}
1519
1520static int be_clear_vlan_promisc(struct be_adapter *adapter)
1521{
1522 struct device *dev = &adapter->pdev->dev;
1523 int status;
1524
1525 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_VLAN_PROMISCUOUS, OFF);
1526 if (!status) {
1527 dev_info(dev, "Disabling VLAN promiscuous mode\n");
1528 adapter->if_flags &= ~BE_IF_FLAGS_VLAN_PROMISCUOUS;
1529 }
1530 return status;
1531}
1532
6b7c5b94 1533/*
82903e4b
AK
1534 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
1535 * If the user configures more, place BE in vlan promiscuous mode.
6b7c5b94 1536 */
10329df8 1537static int be_vid_config(struct be_adapter *adapter)
6b7c5b94 1538{
50762667 1539 struct device *dev = &adapter->pdev->dev;
10329df8 1540 u16 vids[BE_NUM_VLANS_SUPPORTED];
f6cbd364 1541 u16 num = 0, i = 0;
82903e4b 1542 int status = 0;
1da87b7f 1543
92fbb1df
SB
1544 /* No need to change the VLAN state if the I/F is in promiscuous */
1545 if (adapter->netdev->flags & IFF_PROMISC)
c0e64ef4
SP
1546 return 0;
1547
92bf14ab 1548 if (adapter->vlans_added > be_max_vlans(adapter))
f66b7cfd 1549 return be_set_vlan_promisc(adapter);
0fc16ebf 1550
841f60fc
SK
1551 if (adapter->if_flags & BE_IF_FLAGS_VLAN_PROMISCUOUS) {
1552 status = be_clear_vlan_promisc(adapter);
1553 if (status)
1554 return status;
1555 }
0fc16ebf 1556 /* Construct VLAN Table to give to HW */
f6cbd364
RN
1557 for_each_set_bit(i, adapter->vids, VLAN_N_VID)
1558 vids[num++] = cpu_to_le16(i);
0fc16ebf 1559
435452aa 1560 status = be_cmd_vlan_config(adapter, adapter->if_handle, vids, num, 0);
0fc16ebf 1561 if (status) {
f66b7cfd 1562 dev_err(dev, "Setting HW VLAN filtering failed\n");
d9d604f8 1563 /* Set to VLAN promisc mode as setting VLAN filter failed */
77be8c1c
KA
1564 if (addl_status(status) == MCC_ADDL_STATUS_INSUFFICIENT_VLANS ||
1565 addl_status(status) ==
4c60005f 1566 MCC_ADDL_STATUS_INSUFFICIENT_RESOURCES)
f66b7cfd 1567 return be_set_vlan_promisc(adapter);
6b7c5b94 1568 }
0fc16ebf 1569 return status;
6b7c5b94
SP
1570}
1571
80d5c368 1572static int be_vlan_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
6b7c5b94
SP
1573{
1574 struct be_adapter *adapter = netdev_priv(netdev);
80817cbf 1575 int status = 0;
6b7c5b94 1576
b7172414
SP
1577 mutex_lock(&adapter->rx_filter_lock);
1578
a85e9986
PR
1579 /* Packets with VID 0 are always received by Lancer by default */
1580 if (lancer_chip(adapter) && vid == 0)
b7172414 1581 goto done;
48291c22 1582
f6cbd364 1583 if (test_bit(vid, adapter->vids))
b7172414 1584 goto done;
a85e9986 1585
f6cbd364 1586 set_bit(vid, adapter->vids);
a6b74e01 1587 adapter->vlans_added++;
8e586137 1588
b7172414
SP
1589 status = be_vid_config(adapter);
1590done:
1591 mutex_unlock(&adapter->rx_filter_lock);
1592 return status;
6b7c5b94
SP
1593}
1594
80d5c368 1595static int be_vlan_rem_vid(struct net_device *netdev, __be16 proto, u16 vid)
6b7c5b94
SP
1596{
1597 struct be_adapter *adapter = netdev_priv(netdev);
b7172414
SP
1598 int status = 0;
1599
1600 mutex_lock(&adapter->rx_filter_lock);
6b7c5b94 1601
a85e9986
PR
1602 /* Packets with VID 0 are always received by Lancer by default */
1603 if (lancer_chip(adapter) && vid == 0)
b7172414 1604 goto done;
a85e9986 1605
41dcdfbd 1606 if (!test_bit(vid, adapter->vids))
b7172414 1607 goto done;
41dcdfbd 1608
f6cbd364 1609 clear_bit(vid, adapter->vids);
9d4dfe4a
KA
1610 adapter->vlans_added--;
1611
b7172414
SP
1612 status = be_vid_config(adapter);
1613done:
1614 mutex_unlock(&adapter->rx_filter_lock);
1615 return status;
6b7c5b94
SP
1616}
1617
f66b7cfd
SP
1618static void be_set_all_promisc(struct be_adapter *adapter)
1619{
1620 be_cmd_rx_filter(adapter, BE_IF_FLAGS_ALL_PROMISCUOUS, ON);
1621 adapter->if_flags |= BE_IF_FLAGS_ALL_PROMISCUOUS;
1622}
1623
1624static void be_set_mc_promisc(struct be_adapter *adapter)
6b7c5b94 1625{
0fc16ebf 1626 int status;
6b7c5b94 1627
f66b7cfd
SP
1628 if (adapter->if_flags & BE_IF_FLAGS_MCAST_PROMISCUOUS)
1629 return;
6b7c5b94 1630
f66b7cfd
SP
1631 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_MCAST_PROMISCUOUS, ON);
1632 if (!status)
1633 adapter->if_flags |= BE_IF_FLAGS_MCAST_PROMISCUOUS;
1634}
1635
92fbb1df 1636static void be_set_uc_promisc(struct be_adapter *adapter)
f66b7cfd
SP
1637{
1638 int status;
1639
92fbb1df
SB
1640 if (adapter->if_flags & BE_IF_FLAGS_PROMISCUOUS)
1641 return;
1642
1643 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_PROMISCUOUS, ON);
f66b7cfd 1644 if (!status)
92fbb1df
SB
1645 adapter->if_flags |= BE_IF_FLAGS_PROMISCUOUS;
1646}
1647
1648static void be_clear_uc_promisc(struct be_adapter *adapter)
1649{
1650 int status;
1651
1652 if (!(adapter->if_flags & BE_IF_FLAGS_PROMISCUOUS))
1653 return;
1654
1655 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_PROMISCUOUS, OFF);
1656 if (!status)
1657 adapter->if_flags &= ~BE_IF_FLAGS_PROMISCUOUS;
1658}
1659
1660/* The below 2 functions are the callback args for __dev_mc_sync/dev_uc_sync().
1661 * We use a single callback function for both sync and unsync. We really don't
1662 * add/remove addresses through this callback. But, we use it to detect changes
1663 * to the uc/mc lists. The entire uc/mc list is programmed in be_set_rx_mode().
1664 */
1665static int be_uc_list_update(struct net_device *netdev,
1666 const unsigned char *addr)
1667{
1668 struct be_adapter *adapter = netdev_priv(netdev);
1669
1670 adapter->update_uc_list = true;
1671 return 0;
1672}
1673
1674static int be_mc_list_update(struct net_device *netdev,
1675 const unsigned char *addr)
1676{
1677 struct be_adapter *adapter = netdev_priv(netdev);
1678
1679 adapter->update_mc_list = true;
1680 return 0;
1681}
1682
1683static void be_set_mc_list(struct be_adapter *adapter)
1684{
1685 struct net_device *netdev = adapter->netdev;
b7172414 1686 struct netdev_hw_addr *ha;
92fbb1df
SB
1687 bool mc_promisc = false;
1688 int status;
1689
b7172414 1690 netif_addr_lock_bh(netdev);
92fbb1df
SB
1691 __dev_mc_sync(netdev, be_mc_list_update, be_mc_list_update);
1692
1693 if (netdev->flags & IFF_PROMISC) {
1694 adapter->update_mc_list = false;
1695 } else if (netdev->flags & IFF_ALLMULTI ||
1696 netdev_mc_count(netdev) > be_max_mc(adapter)) {
1697 /* Enable multicast promisc if num configured exceeds
1698 * what we support
1699 */
1700 mc_promisc = true;
1701 adapter->update_mc_list = false;
1702 } else if (adapter->if_flags & BE_IF_FLAGS_MCAST_PROMISCUOUS) {
1703 /* Update mc-list unconditionally if the iface was previously
1704 * in mc-promisc mode and now is out of that mode.
1705 */
1706 adapter->update_mc_list = true;
1707 }
1708
b7172414
SP
1709 if (adapter->update_mc_list) {
1710 int i = 0;
1711
1712 /* cache the mc-list in adapter */
1713 netdev_for_each_mc_addr(ha, netdev) {
1714 ether_addr_copy(adapter->mc_list[i].mac, ha->addr);
1715 i++;
1716 }
1717 adapter->mc_count = netdev_mc_count(netdev);
1718 }
1719 netif_addr_unlock_bh(netdev);
1720
92fbb1df 1721 if (mc_promisc) {
f66b7cfd 1722 be_set_mc_promisc(adapter);
92fbb1df
SB
1723 } else if (adapter->update_mc_list) {
1724 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_MULTICAST, ON);
1725 if (!status)
1726 adapter->if_flags &= ~BE_IF_FLAGS_MCAST_PROMISCUOUS;
1727 else
1728 be_set_mc_promisc(adapter);
1729
1730 adapter->update_mc_list = false;
1731 }
1732}
1733
1734static void be_clear_mc_list(struct be_adapter *adapter)
1735{
1736 struct net_device *netdev = adapter->netdev;
1737
1738 __dev_mc_unsync(netdev, NULL);
1739 be_cmd_rx_filter(adapter, BE_IF_FLAGS_MULTICAST, OFF);
b7172414 1740 adapter->mc_count = 0;
f66b7cfd
SP
1741}
1742
988d44b1
SR
1743static int be_uc_mac_add(struct be_adapter *adapter, int uc_idx)
1744{
1d0f110a 1745 if (ether_addr_equal(adapter->uc_list[uc_idx].mac, adapter->dev_mac)) {
988d44b1
SR
1746 adapter->pmac_id[uc_idx + 1] = adapter->pmac_id[0];
1747 return 0;
1748 }
1749
1d0f110a 1750 return be_cmd_pmac_add(adapter, adapter->uc_list[uc_idx].mac,
988d44b1
SR
1751 adapter->if_handle,
1752 &adapter->pmac_id[uc_idx + 1], 0);
1753}
1754
1755static void be_uc_mac_del(struct be_adapter *adapter, int pmac_id)
1756{
1757 if (pmac_id == adapter->pmac_id[0])
1758 return;
1759
1760 be_cmd_pmac_del(adapter, adapter->if_handle, pmac_id, 0);
1761}
1762
f66b7cfd
SP
1763static void be_set_uc_list(struct be_adapter *adapter)
1764{
92fbb1df 1765 struct net_device *netdev = adapter->netdev;
f66b7cfd 1766 struct netdev_hw_addr *ha;
92fbb1df 1767 bool uc_promisc = false;
b7172414 1768 int curr_uc_macs = 0, i;
f66b7cfd 1769
b7172414 1770 netif_addr_lock_bh(netdev);
92fbb1df 1771 __dev_uc_sync(netdev, be_uc_list_update, be_uc_list_update);
f66b7cfd 1772
92fbb1df
SB
1773 if (netdev->flags & IFF_PROMISC) {
1774 adapter->update_uc_list = false;
1775 } else if (netdev_uc_count(netdev) > (be_max_uc(adapter) - 1)) {
1776 uc_promisc = true;
1777 adapter->update_uc_list = false;
1778 } else if (adapter->if_flags & BE_IF_FLAGS_PROMISCUOUS) {
1779 /* Update uc-list unconditionally if the iface was previously
1780 * in uc-promisc mode and now is out of that mode.
1781 */
1782 adapter->update_uc_list = true;
6b7c5b94
SP
1783 }
1784
b7172414 1785 if (adapter->update_uc_list) {
b7172414 1786 /* cache the uc-list in adapter array */
6052cd1a 1787 i = 0;
b7172414
SP
1788 netdev_for_each_uc_addr(ha, netdev) {
1789 ether_addr_copy(adapter->uc_list[i].mac, ha->addr);
1790 i++;
1791 }
1792 curr_uc_macs = netdev_uc_count(netdev);
1793 }
1794 netif_addr_unlock_bh(netdev);
1795
92fbb1df
SB
1796 if (uc_promisc) {
1797 be_set_uc_promisc(adapter);
1798 } else if (adapter->update_uc_list) {
1799 be_clear_uc_promisc(adapter);
1800
b7172414 1801 for (i = 0; i < adapter->uc_macs; i++)
988d44b1 1802 be_uc_mac_del(adapter, adapter->pmac_id[i + 1]);
92fbb1df 1803
b7172414 1804 for (i = 0; i < curr_uc_macs; i++)
988d44b1 1805 be_uc_mac_add(adapter, i);
b7172414 1806 adapter->uc_macs = curr_uc_macs;
92fbb1df 1807 adapter->update_uc_list = false;
f66b7cfd
SP
1808 }
1809}
6b7c5b94 1810
f66b7cfd
SP
1811static void be_clear_uc_list(struct be_adapter *adapter)
1812{
92fbb1df 1813 struct net_device *netdev = adapter->netdev;
f66b7cfd 1814 int i;
fbc13f01 1815
92fbb1df 1816 __dev_uc_unsync(netdev, NULL);
b7172414 1817 for (i = 0; i < adapter->uc_macs; i++)
988d44b1
SR
1818 be_uc_mac_del(adapter, adapter->pmac_id[i + 1]);
1819
f66b7cfd
SP
1820 adapter->uc_macs = 0;
1821}
fbc13f01 1822
b7172414 1823static void __be_set_rx_mode(struct be_adapter *adapter)
f66b7cfd 1824{
b7172414
SP
1825 struct net_device *netdev = adapter->netdev;
1826
1827 mutex_lock(&adapter->rx_filter_lock);
fbc13f01 1828
f66b7cfd 1829 if (netdev->flags & IFF_PROMISC) {
92fbb1df
SB
1830 if (!be_in_all_promisc(adapter))
1831 be_set_all_promisc(adapter);
1832 } else if (be_in_all_promisc(adapter)) {
1833 /* We need to re-program the vlan-list or clear
1834 * vlan-promisc mode (if needed) when the interface
1835 * comes out of promisc mode.
1836 */
1837 be_vid_config(adapter);
f66b7cfd 1838 }
a0794885 1839
92fbb1df 1840 be_set_uc_list(adapter);
f66b7cfd 1841 be_set_mc_list(adapter);
b7172414
SP
1842
1843 mutex_unlock(&adapter->rx_filter_lock);
1844}
1845
1846static void be_work_set_rx_mode(struct work_struct *work)
1847{
1848 struct be_cmd_work *cmd_work =
1849 container_of(work, struct be_cmd_work, work);
1850
1851 __be_set_rx_mode(cmd_work->adapter);
1852 kfree(cmd_work);
6b7c5b94
SP
1853}
1854
ba343c77
SB
1855static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
1856{
1857 struct be_adapter *adapter = netdev_priv(netdev);
11ac75ed 1858 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
ba343c77
SB
1859 int status;
1860
11ac75ed 1861 if (!sriov_enabled(adapter))
ba343c77
SB
1862 return -EPERM;
1863
11ac75ed 1864 if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
ba343c77
SB
1865 return -EINVAL;
1866
3c31aaf3
VV
1867 /* Proceed further only if user provided MAC is different
1868 * from active MAC
1869 */
1870 if (ether_addr_equal(mac, vf_cfg->mac_addr))
1871 return 0;
1872
3175d8c2
SP
1873 if (BEx_chip(adapter)) {
1874 be_cmd_pmac_del(adapter, vf_cfg->if_handle, vf_cfg->pmac_id,
1875 vf + 1);
ba343c77 1876
11ac75ed
SP
1877 status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
1878 &vf_cfg->pmac_id, vf + 1);
3175d8c2
SP
1879 } else {
1880 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
1881 vf + 1);
590c391d
PR
1882 }
1883
abccf23e
KA
1884 if (status) {
1885 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed: %#x",
1886 mac, vf, status);
1887 return be_cmd_status(status);
1888 }
64600ea5 1889
abccf23e
KA
1890 ether_addr_copy(vf_cfg->mac_addr, mac);
1891
1892 return 0;
ba343c77
SB
1893}
1894
64600ea5 1895static int be_get_vf_config(struct net_device *netdev, int vf,
748b539a 1896 struct ifla_vf_info *vi)
64600ea5
AK
1897{
1898 struct be_adapter *adapter = netdev_priv(netdev);
11ac75ed 1899 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
64600ea5 1900
11ac75ed 1901 if (!sriov_enabled(adapter))
64600ea5
AK
1902 return -EPERM;
1903
11ac75ed 1904 if (vf >= adapter->num_vfs)
64600ea5
AK
1905 return -EINVAL;
1906
1907 vi->vf = vf;
ed616689
SC
1908 vi->max_tx_rate = vf_cfg->tx_rate;
1909 vi->min_tx_rate = 0;
a60b3a13
AK
1910 vi->vlan = vf_cfg->vlan_tag & VLAN_VID_MASK;
1911 vi->qos = vf_cfg->vlan_tag >> VLAN_PRIO_SHIFT;
11ac75ed 1912 memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
bdce2ad7 1913 vi->linkstate = adapter->vf_cfg[vf].plink_tracking;
e7bcbd7b 1914 vi->spoofchk = adapter->vf_cfg[vf].spoofchk;
64600ea5
AK
1915
1916 return 0;
1917}
1918
435452aa
VV
1919static int be_set_vf_tvt(struct be_adapter *adapter, int vf, u16 vlan)
1920{
1921 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1922 u16 vids[BE_NUM_VLANS_SUPPORTED];
1923 int vf_if_id = vf_cfg->if_handle;
1924 int status;
1925
1926 /* Enable Transparent VLAN Tagging */
e7bcbd7b 1927 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1, vf_if_id, 0, 0);
435452aa
VV
1928 if (status)
1929 return status;
1930
1931 /* Clear pre-programmed VLAN filters on VF if any, if TVT is enabled */
1932 vids[0] = 0;
1933 status = be_cmd_vlan_config(adapter, vf_if_id, vids, 1, vf + 1);
1934 if (!status)
1935 dev_info(&adapter->pdev->dev,
1936 "Cleared guest VLANs on VF%d", vf);
1937
1938 /* After TVT is enabled, disallow VFs to program VLAN filters */
1939 if (vf_cfg->privileges & BE_PRIV_FILTMGMT) {
1940 status = be_cmd_set_fn_privileges(adapter, vf_cfg->privileges &
1941 ~BE_PRIV_FILTMGMT, vf + 1);
1942 if (!status)
1943 vf_cfg->privileges &= ~BE_PRIV_FILTMGMT;
1944 }
1945 return 0;
1946}
1947
1948static int be_clear_vf_tvt(struct be_adapter *adapter, int vf)
1949{
1950 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1951 struct device *dev = &adapter->pdev->dev;
1952 int status;
1953
1954 /* Reset Transparent VLAN Tagging. */
1955 status = be_cmd_set_hsw_config(adapter, BE_RESET_VLAN_TAG_ID, vf + 1,
e7bcbd7b 1956 vf_cfg->if_handle, 0, 0);
435452aa
VV
1957 if (status)
1958 return status;
1959
1960 /* Allow VFs to program VLAN filtering */
1961 if (!(vf_cfg->privileges & BE_PRIV_FILTMGMT)) {
1962 status = be_cmd_set_fn_privileges(adapter, vf_cfg->privileges |
1963 BE_PRIV_FILTMGMT, vf + 1);
1964 if (!status) {
1965 vf_cfg->privileges |= BE_PRIV_FILTMGMT;
1966 dev_info(dev, "VF%d: FILTMGMT priv enabled", vf);
1967 }
1968 }
1969
1970 dev_info(dev,
1971 "Disable/re-enable i/f in VM to clear Transparent VLAN tag");
1972 return 0;
1973}
1974
79aab093
MS
1975static int be_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos,
1976 __be16 vlan_proto)
1da87b7f
AK
1977{
1978 struct be_adapter *adapter = netdev_priv(netdev);
b9fc0e53 1979 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
435452aa 1980 int status;
1da87b7f 1981
11ac75ed 1982 if (!sriov_enabled(adapter))
1da87b7f
AK
1983 return -EPERM;
1984
b9fc0e53 1985 if (vf >= adapter->num_vfs || vlan > 4095 || qos > 7)
1da87b7f
AK
1986 return -EINVAL;
1987
79aab093
MS
1988 if (vlan_proto != htons(ETH_P_8021Q))
1989 return -EPROTONOSUPPORT;
1990
b9fc0e53
AK
1991 if (vlan || qos) {
1992 vlan |= qos << VLAN_PRIO_SHIFT;
435452aa 1993 status = be_set_vf_tvt(adapter, vf, vlan);
1da87b7f 1994 } else {
435452aa 1995 status = be_clear_vf_tvt(adapter, vf);
1da87b7f
AK
1996 }
1997
abccf23e
KA
1998 if (status) {
1999 dev_err(&adapter->pdev->dev,
435452aa
VV
2000 "VLAN %d config on VF %d failed : %#x\n", vlan, vf,
2001 status);
abccf23e
KA
2002 return be_cmd_status(status);
2003 }
2004
2005 vf_cfg->vlan_tag = vlan;
abccf23e 2006 return 0;
1da87b7f
AK
2007}
2008
ed616689
SC
2009static int be_set_vf_tx_rate(struct net_device *netdev, int vf,
2010 int min_tx_rate, int max_tx_rate)
e1d18735
AK
2011{
2012 struct be_adapter *adapter = netdev_priv(netdev);
0f77ba73
RN
2013 struct device *dev = &adapter->pdev->dev;
2014 int percent_rate, status = 0;
2015 u16 link_speed = 0;
2016 u8 link_status;
e1d18735 2017
11ac75ed 2018 if (!sriov_enabled(adapter))
e1d18735
AK
2019 return -EPERM;
2020
94f434c2 2021 if (vf >= adapter->num_vfs)
e1d18735
AK
2022 return -EINVAL;
2023
ed616689
SC
2024 if (min_tx_rate)
2025 return -EINVAL;
2026
0f77ba73
RN
2027 if (!max_tx_rate)
2028 goto config_qos;
2029
2030 status = be_cmd_link_status_query(adapter, &link_speed,
2031 &link_status, 0);
2032 if (status)
2033 goto err;
2034
2035 if (!link_status) {
2036 dev_err(dev, "TX-rate setting not allowed when link is down\n");
940a3fcd 2037 status = -ENETDOWN;
0f77ba73
RN
2038 goto err;
2039 }
2040
2041 if (max_tx_rate < 100 || max_tx_rate > link_speed) {
2042 dev_err(dev, "TX-rate must be between 100 and %d Mbps\n",
2043 link_speed);
2044 status = -EINVAL;
2045 goto err;
2046 }
2047
2048 /* On Skyhawk the QOS setting must be done only as a % value */
2049 percent_rate = link_speed / 100;
2050 if (skyhawk_chip(adapter) && (max_tx_rate % percent_rate)) {
2051 dev_err(dev, "TX-rate must be a multiple of %d Mbps\n",
2052 percent_rate);
2053 status = -EINVAL;
2054 goto err;
94f434c2 2055 }
e1d18735 2056
0f77ba73
RN
2057config_qos:
2058 status = be_cmd_config_qos(adapter, max_tx_rate, link_speed, vf + 1);
e1d18735 2059 if (status)
0f77ba73
RN
2060 goto err;
2061
2062 adapter->vf_cfg[vf].tx_rate = max_tx_rate;
2063 return 0;
2064
2065err:
2066 dev_err(dev, "TX-rate setting of %dMbps on VF%d failed\n",
2067 max_tx_rate, vf);
abccf23e 2068 return be_cmd_status(status);
e1d18735 2069}
e2fb1afa 2070
bdce2ad7
SR
2071static int be_set_vf_link_state(struct net_device *netdev, int vf,
2072 int link_state)
2073{
2074 struct be_adapter *adapter = netdev_priv(netdev);
2075 int status;
2076
2077 if (!sriov_enabled(adapter))
2078 return -EPERM;
2079
2080 if (vf >= adapter->num_vfs)
2081 return -EINVAL;
2082
2083 status = be_cmd_set_logical_link_config(adapter, link_state, vf+1);
abccf23e
KA
2084 if (status) {
2085 dev_err(&adapter->pdev->dev,
2086 "Link state change on VF %d failed: %#x\n", vf, status);
2087 return be_cmd_status(status);
2088 }
bdce2ad7 2089
abccf23e
KA
2090 adapter->vf_cfg[vf].plink_tracking = link_state;
2091
2092 return 0;
bdce2ad7 2093}
e1d18735 2094
e7bcbd7b
KA
2095static int be_set_vf_spoofchk(struct net_device *netdev, int vf, bool enable)
2096{
2097 struct be_adapter *adapter = netdev_priv(netdev);
2098 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
2099 u8 spoofchk;
2100 int status;
2101
2102 if (!sriov_enabled(adapter))
2103 return -EPERM;
2104
2105 if (vf >= adapter->num_vfs)
2106 return -EINVAL;
2107
2108 if (BEx_chip(adapter))
2109 return -EOPNOTSUPP;
2110
2111 if (enable == vf_cfg->spoofchk)
2112 return 0;
2113
2114 spoofchk = enable ? ENABLE_MAC_SPOOFCHK : DISABLE_MAC_SPOOFCHK;
2115
2116 status = be_cmd_set_hsw_config(adapter, 0, vf + 1, vf_cfg->if_handle,
2117 0, spoofchk);
2118 if (status) {
2119 dev_err(&adapter->pdev->dev,
2120 "Spoofchk change on VF %d failed: %#x\n", vf, status);
2121 return be_cmd_status(status);
2122 }
2123
2124 vf_cfg->spoofchk = enable;
2125 return 0;
2126}
2127
2632bafd
SP
2128static void be_aic_update(struct be_aic_obj *aic, u64 rx_pkts, u64 tx_pkts,
2129 ulong now)
6b7c5b94 2130{
2632bafd
SP
2131 aic->rx_pkts_prev = rx_pkts;
2132 aic->tx_reqs_prev = tx_pkts;
2133 aic->jiffies = now;
2134}
ac124ff9 2135
20947770 2136static int be_get_new_eqd(struct be_eq_obj *eqo)
2632bafd 2137{
20947770
PR
2138 struct be_adapter *adapter = eqo->adapter;
2139 int eqd, start;
2632bafd 2140 struct be_aic_obj *aic;
2632bafd
SP
2141 struct be_rx_obj *rxo;
2142 struct be_tx_obj *txo;
20947770 2143 u64 rx_pkts = 0, tx_pkts = 0;
2632bafd
SP
2144 ulong now;
2145 u32 pps, delta;
20947770 2146 int i;
10ef9ab4 2147
20947770 2148 aic = &adapter->aic_obj[eqo->idx];
9041f047 2149 if (!adapter->aic_enabled) {
20947770
PR
2150 if (aic->jiffies)
2151 aic->jiffies = 0;
2152 eqd = aic->et_eqd;
2153 return eqd;
2154 }
6b7c5b94 2155
20947770 2156 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
2632bafd 2157 do {
068c38ad 2158 start = u64_stats_fetch_begin(&rxo->stats.sync);
20947770 2159 rx_pkts += rxo->stats.rx_pkts;
068c38ad 2160 } while (u64_stats_fetch_retry(&rxo->stats.sync, start));
20947770 2161 }
10ef9ab4 2162
20947770 2163 for_all_tx_queues_on_eq(adapter, eqo, txo, i) {
2632bafd 2164 do {
068c38ad 2165 start = u64_stats_fetch_begin(&txo->stats.sync);
20947770 2166 tx_pkts += txo->stats.tx_reqs;
068c38ad 2167 } while (u64_stats_fetch_retry(&txo->stats.sync, start));
20947770 2168 }
6b7c5b94 2169
20947770
PR
2170 /* Skip, if wrapped around or first calculation */
2171 now = jiffies;
2172 if (!aic->jiffies || time_before(now, aic->jiffies) ||
2173 rx_pkts < aic->rx_pkts_prev ||
2174 tx_pkts < aic->tx_reqs_prev) {
2175 be_aic_update(aic, rx_pkts, tx_pkts, now);
2176 return aic->prev_eqd;
2177 }
2632bafd 2178
20947770
PR
2179 delta = jiffies_to_msecs(now - aic->jiffies);
2180 if (delta == 0)
2181 return aic->prev_eqd;
10ef9ab4 2182
20947770
PR
2183 pps = (((u32)(rx_pkts - aic->rx_pkts_prev) * 1000) / delta) +
2184 (((u32)(tx_pkts - aic->tx_reqs_prev) * 1000) / delta);
2185 eqd = (pps / 15000) << 2;
2632bafd 2186
20947770
PR
2187 if (eqd < 8)
2188 eqd = 0;
2189 eqd = min_t(u32, eqd, aic->max_eqd);
2190 eqd = max_t(u32, eqd, aic->min_eqd);
2191
2192 be_aic_update(aic, rx_pkts, tx_pkts, now);
2193
2194 return eqd;
2195}
2196
2197/* For Skyhawk-R only */
2198static u32 be_get_eq_delay_mult_enc(struct be_eq_obj *eqo)
2199{
2200 struct be_adapter *adapter = eqo->adapter;
2201 struct be_aic_obj *aic = &adapter->aic_obj[eqo->idx];
2202 ulong now = jiffies;
2203 int eqd;
2204 u32 mult_enc;
2205
9041f047 2206 if (!adapter->aic_enabled)
20947770
PR
2207 return 0;
2208
3c0d49aa 2209 if (jiffies_to_msecs(now - aic->jiffies) < 1)
20947770
PR
2210 eqd = aic->prev_eqd;
2211 else
2212 eqd = be_get_new_eqd(eqo);
2213
2214 if (eqd > 100)
2215 mult_enc = R2I_DLY_ENC_1;
2216 else if (eqd > 60)
2217 mult_enc = R2I_DLY_ENC_2;
2218 else if (eqd > 20)
2219 mult_enc = R2I_DLY_ENC_3;
2220 else
2221 mult_enc = R2I_DLY_ENC_0;
2222
2223 aic->prev_eqd = eqd;
2224
2225 return mult_enc;
2226}
2227
2228void be_eqd_update(struct be_adapter *adapter, bool force_update)
2229{
2230 struct be_set_eqd set_eqd[MAX_EVT_QS];
2231 struct be_aic_obj *aic;
2232 struct be_eq_obj *eqo;
2233 int i, num = 0, eqd;
2234
2235 for_all_evt_queues(adapter, eqo, i) {
2236 aic = &adapter->aic_obj[eqo->idx];
2237 eqd = be_get_new_eqd(eqo);
2238 if (force_update || eqd != aic->prev_eqd) {
2632bafd
SP
2239 set_eqd[num].delay_multiplier = (eqd * 65)/100;
2240 set_eqd[num].eq_id = eqo->q.id;
2241 aic->prev_eqd = eqd;
2242 num++;
2243 }
ac124ff9 2244 }
2632bafd
SP
2245
2246 if (num)
2247 be_cmd_modify_eqd(adapter, set_eqd, num);
6b7c5b94
SP
2248}
2249
3abcdeda 2250static void be_rx_stats_update(struct be_rx_obj *rxo,
748b539a 2251 struct be_rx_compl_info *rxcp)
4097f663 2252{
ac124ff9 2253 struct be_rx_stats *stats = rx_stats(rxo);
1ef78abe 2254
ab1594e9 2255 u64_stats_update_begin(&stats->sync);
3abcdeda 2256 stats->rx_compl++;
2e588f84 2257 stats->rx_bytes += rxcp->pkt_size;
3abcdeda 2258 stats->rx_pkts++;
8670f2a5
SB
2259 if (rxcp->tunneled)
2260 stats->rx_vxlan_offload_pkts++;
2e588f84 2261 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
3abcdeda 2262 stats->rx_mcast_pkts++;
2e588f84 2263 if (rxcp->err)
ac124ff9 2264 stats->rx_compl_err++;
ab1594e9 2265 u64_stats_update_end(&stats->sync);
4097f663
SP
2266}
2267
2e588f84 2268static inline bool csum_passed(struct be_rx_compl_info *rxcp)
728a9972 2269{
19fad86f 2270 /* L4 checksum is not reliable for non TCP/UDP packets.
c9c47142
SP
2271 * Also ignore ipcksm for ipv6 pkts
2272 */
2e588f84 2273 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
c9c47142 2274 (rxcp->ip_csum || rxcp->ipv6) && !rxcp->err;
728a9972
AK
2275}
2276
0b0ef1d0 2277static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo)
6b7c5b94 2278{
10ef9ab4 2279 struct be_adapter *adapter = rxo->adapter;
6b7c5b94 2280 struct be_rx_page_info *rx_page_info;
3abcdeda 2281 struct be_queue_info *rxq = &rxo->q;
b0fd2eb2 2282 u32 frag_idx = rxq->tail;
6b7c5b94 2283
3abcdeda 2284 rx_page_info = &rxo->page_info_tbl[frag_idx];
6b7c5b94
SP
2285 BUG_ON(!rx_page_info->page);
2286
e50287be 2287 if (rx_page_info->last_frag) {
2b7bcebf
IV
2288 dma_unmap_page(&adapter->pdev->dev,
2289 dma_unmap_addr(rx_page_info, bus),
2290 adapter->big_page_size, DMA_FROM_DEVICE);
e50287be
SP
2291 rx_page_info->last_frag = false;
2292 } else {
2293 dma_sync_single_for_cpu(&adapter->pdev->dev,
2294 dma_unmap_addr(rx_page_info, bus),
2295 rx_frag_size, DMA_FROM_DEVICE);
205859a2 2296 }
6b7c5b94 2297
0b0ef1d0 2298 queue_tail_inc(rxq);
6b7c5b94
SP
2299 atomic_dec(&rxq->used);
2300 return rx_page_info;
2301}
2302
2303/* Throwaway the data in the Rx completion */
10ef9ab4
SP
2304static void be_rx_compl_discard(struct be_rx_obj *rxo,
2305 struct be_rx_compl_info *rxcp)
6b7c5b94 2306{
6b7c5b94 2307 struct be_rx_page_info *page_info;
2e588f84 2308 u16 i, num_rcvd = rxcp->num_rcvd;
6b7c5b94 2309
e80d9da6 2310 for (i = 0; i < num_rcvd; i++) {
0b0ef1d0 2311 page_info = get_rx_page_info(rxo);
e80d9da6
PR
2312 put_page(page_info->page);
2313 memset(page_info, 0, sizeof(*page_info));
6b7c5b94
SP
2314 }
2315}
2316
2317/*
2318 * skb_fill_rx_data forms a complete skb for an ether frame
2319 * indicated by rxcp.
2320 */
10ef9ab4
SP
2321static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
2322 struct be_rx_compl_info *rxcp)
6b7c5b94 2323{
6b7c5b94 2324 struct be_rx_page_info *page_info;
2e588f84
SP
2325 u16 i, j;
2326 u16 hdr_len, curr_frag_len, remaining;
6b7c5b94 2327 u8 *start;
6b7c5b94 2328
0b0ef1d0 2329 page_info = get_rx_page_info(rxo);
6b7c5b94
SP
2330 start = page_address(page_info->page) + page_info->page_offset;
2331 prefetch(start);
2332
2333 /* Copy data in the first descriptor of this completion */
2e588f84 2334 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
6b7c5b94 2335
6b7c5b94
SP
2336 skb->len = curr_frag_len;
2337 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
ac1ae5f3 2338 memcpy(skb->data, start, curr_frag_len);
6b7c5b94
SP
2339 /* Complete packet has now been moved to data */
2340 put_page(page_info->page);
2341 skb->data_len = 0;
2342 skb->tail += curr_frag_len;
2343 } else {
ac1ae5f3
ED
2344 hdr_len = ETH_HLEN;
2345 memcpy(skb->data, start, hdr_len);
6b7c5b94 2346 skb_shinfo(skb)->nr_frags = 1;
b51f4113
YL
2347 skb_frag_fill_page_desc(&skb_shinfo(skb)->frags[0],
2348 page_info->page,
2349 page_info->page_offset + hdr_len,
2350 curr_frag_len - hdr_len);
6b7c5b94 2351 skb->data_len = curr_frag_len - hdr_len;
bdb28a97 2352 skb->truesize += rx_frag_size;
6b7c5b94
SP
2353 skb->tail += hdr_len;
2354 }
205859a2 2355 page_info->page = NULL;
6b7c5b94 2356
2e588f84
SP
2357 if (rxcp->pkt_size <= rx_frag_size) {
2358 BUG_ON(rxcp->num_rcvd != 1);
2359 return;
6b7c5b94
SP
2360 }
2361
2362 /* More frags present for this completion */
2e588f84
SP
2363 remaining = rxcp->pkt_size - curr_frag_len;
2364 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
0b0ef1d0 2365 page_info = get_rx_page_info(rxo);
2e588f84 2366 curr_frag_len = min(remaining, rx_frag_size);
6b7c5b94 2367
bd46cb6c
AK
2368 /* Coalesce all frags from the same physical page in one slot */
2369 if (page_info->page_offset == 0) {
2370 /* Fresh page */
2371 j++;
b51f4113
YL
2372 skb_frag_fill_page_desc(&skb_shinfo(skb)->frags[j],
2373 page_info->page,
2374 page_info->page_offset,
2375 curr_frag_len);
bd46cb6c
AK
2376 skb_shinfo(skb)->nr_frags++;
2377 } else {
2378 put_page(page_info->page);
b51f4113
YL
2379 skb_frag_size_add(&skb_shinfo(skb)->frags[j],
2380 curr_frag_len);
bd46cb6c
AK
2381 }
2382
6b7c5b94
SP
2383 skb->len += curr_frag_len;
2384 skb->data_len += curr_frag_len;
bdb28a97 2385 skb->truesize += rx_frag_size;
2e588f84 2386 remaining -= curr_frag_len;
205859a2 2387 page_info->page = NULL;
6b7c5b94 2388 }
bd46cb6c 2389 BUG_ON(j > MAX_SKB_FRAGS);
6b7c5b94
SP
2390}
2391
5be93b9a 2392/* Process the RX completion indicated by rxcp when GRO is disabled */
6384a4d0 2393static void be_rx_compl_process(struct be_rx_obj *rxo, struct napi_struct *napi,
10ef9ab4 2394 struct be_rx_compl_info *rxcp)
6b7c5b94 2395{
10ef9ab4 2396 struct be_adapter *adapter = rxo->adapter;
6332c8d3 2397 struct net_device *netdev = adapter->netdev;
6b7c5b94 2398 struct sk_buff *skb;
89420424 2399
bb349bb4 2400 skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE);
a058a632 2401 if (unlikely(!skb)) {
ac124ff9 2402 rx_stats(rxo)->rx_drops_no_skbs++;
10ef9ab4 2403 be_rx_compl_discard(rxo, rxcp);
6b7c5b94
SP
2404 return;
2405 }
2406
10ef9ab4 2407 skb_fill_rx_data(rxo, skb, rxcp);
6b7c5b94 2408
6332c8d3 2409 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
728a9972 2410 skb->ip_summed = CHECKSUM_UNNECESSARY;
c6ce2f4b
SK
2411 else
2412 skb_checksum_none_assert(skb);
6b7c5b94 2413
6332c8d3 2414 skb->protocol = eth_type_trans(skb, netdev);
aaa6daec 2415 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
10ef9ab4 2416 if (netdev->features & NETIF_F_RXHASH)
d2464c8c 2417 skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
c9c47142 2418
b6c0e89d 2419 skb->csum_level = rxcp->tunneled;
6384a4d0 2420 skb_mark_napi_id(skb, napi);
6b7c5b94 2421
343e43c0 2422 if (rxcp->vlanf)
86a9bad3 2423 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
4c5102f9
AK
2424
2425 netif_receive_skb(skb);
6b7c5b94
SP
2426}
2427
5be93b9a 2428/* Process the RX completion indicated by rxcp when GRO is enabled */
4188e7df
JH
2429static void be_rx_compl_process_gro(struct be_rx_obj *rxo,
2430 struct napi_struct *napi,
2431 struct be_rx_compl_info *rxcp)
6b7c5b94 2432{
10ef9ab4 2433 struct be_adapter *adapter = rxo->adapter;
6b7c5b94 2434 struct be_rx_page_info *page_info;
5be93b9a 2435 struct sk_buff *skb = NULL;
2e588f84
SP
2436 u16 remaining, curr_frag_len;
2437 u16 i, j;
3968fa1e 2438
10ef9ab4 2439 skb = napi_get_frags(napi);
5be93b9a 2440 if (!skb) {
10ef9ab4 2441 be_rx_compl_discard(rxo, rxcp);
5be93b9a
AK
2442 return;
2443 }
2444
2e588f84
SP
2445 remaining = rxcp->pkt_size;
2446 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
0b0ef1d0 2447 page_info = get_rx_page_info(rxo);
6b7c5b94
SP
2448
2449 curr_frag_len = min(remaining, rx_frag_size);
2450
bd46cb6c
AK
2451 /* Coalesce all frags from the same physical page in one slot */
2452 if (i == 0 || page_info->page_offset == 0) {
2453 /* First frag or Fresh page */
2454 j++;
b51f4113
YL
2455 skb_frag_fill_page_desc(&skb_shinfo(skb)->frags[j],
2456 page_info->page,
2457 page_info->page_offset,
2458 curr_frag_len);
bd46cb6c
AK
2459 } else {
2460 put_page(page_info->page);
b51f4113
YL
2461 skb_frag_size_add(&skb_shinfo(skb)->frags[j],
2462 curr_frag_len);
bd46cb6c 2463 }
b51f4113 2464
bdb28a97 2465 skb->truesize += rx_frag_size;
bd46cb6c 2466 remaining -= curr_frag_len;
6b7c5b94
SP
2467 memset(page_info, 0, sizeof(*page_info));
2468 }
bd46cb6c 2469 BUG_ON(j > MAX_SKB_FRAGS);
6b7c5b94 2470
5be93b9a 2471 skb_shinfo(skb)->nr_frags = j + 1;
2e588f84
SP
2472 skb->len = rxcp->pkt_size;
2473 skb->data_len = rxcp->pkt_size;
5be93b9a 2474 skb->ip_summed = CHECKSUM_UNNECESSARY;
aaa6daec 2475 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
4b972914 2476 if (adapter->netdev->features & NETIF_F_RXHASH)
d2464c8c 2477 skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
c9c47142 2478
b6c0e89d 2479 skb->csum_level = rxcp->tunneled;
5be93b9a 2480
343e43c0 2481 if (rxcp->vlanf)
86a9bad3 2482 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
4c5102f9 2483
10ef9ab4 2484 napi_gro_frags(napi);
2e588f84
SP
2485}
2486
10ef9ab4
SP
2487static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
2488 struct be_rx_compl_info *rxcp)
2e588f84 2489{
c3c18bc1
SP
2490 rxcp->pkt_size = GET_RX_COMPL_V1_BITS(pktsize, compl);
2491 rxcp->vlanf = GET_RX_COMPL_V1_BITS(vtp, compl);
2492 rxcp->err = GET_RX_COMPL_V1_BITS(err, compl);
2493 rxcp->tcpf = GET_RX_COMPL_V1_BITS(tcpf, compl);
2494 rxcp->udpf = GET_RX_COMPL_V1_BITS(udpf, compl);
2495 rxcp->ip_csum = GET_RX_COMPL_V1_BITS(ipcksm, compl);
2496 rxcp->l4_csum = GET_RX_COMPL_V1_BITS(l4_cksm, compl);
2497 rxcp->ipv6 = GET_RX_COMPL_V1_BITS(ip_version, compl);
2498 rxcp->num_rcvd = GET_RX_COMPL_V1_BITS(numfrags, compl);
2499 rxcp->pkt_type = GET_RX_COMPL_V1_BITS(cast_enc, compl);
2500 rxcp->rss_hash = GET_RX_COMPL_V1_BITS(rsshash, compl);
15d72184 2501 if (rxcp->vlanf) {
c3c18bc1
SP
2502 rxcp->qnq = GET_RX_COMPL_V1_BITS(qnq, compl);
2503 rxcp->vlan_tag = GET_RX_COMPL_V1_BITS(vlan_tag, compl);
15d72184 2504 }
c3c18bc1 2505 rxcp->port = GET_RX_COMPL_V1_BITS(port, compl);
c9c47142 2506 rxcp->tunneled =
c3c18bc1 2507 GET_RX_COMPL_V1_BITS(tunneled, compl);
2e588f84
SP
2508}
2509
10ef9ab4
SP
2510static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
2511 struct be_rx_compl_info *rxcp)
2e588f84 2512{
c3c18bc1
SP
2513 rxcp->pkt_size = GET_RX_COMPL_V0_BITS(pktsize, compl);
2514 rxcp->vlanf = GET_RX_COMPL_V0_BITS(vtp, compl);
2515 rxcp->err = GET_RX_COMPL_V0_BITS(err, compl);
2516 rxcp->tcpf = GET_RX_COMPL_V0_BITS(tcpf, compl);
2517 rxcp->udpf = GET_RX_COMPL_V0_BITS(udpf, compl);
2518 rxcp->ip_csum = GET_RX_COMPL_V0_BITS(ipcksm, compl);
2519 rxcp->l4_csum = GET_RX_COMPL_V0_BITS(l4_cksm, compl);
2520 rxcp->ipv6 = GET_RX_COMPL_V0_BITS(ip_version, compl);
2521 rxcp->num_rcvd = GET_RX_COMPL_V0_BITS(numfrags, compl);
2522 rxcp->pkt_type = GET_RX_COMPL_V0_BITS(cast_enc, compl);
2523 rxcp->rss_hash = GET_RX_COMPL_V0_BITS(rsshash, compl);
15d72184 2524 if (rxcp->vlanf) {
c3c18bc1
SP
2525 rxcp->qnq = GET_RX_COMPL_V0_BITS(qnq, compl);
2526 rxcp->vlan_tag = GET_RX_COMPL_V0_BITS(vlan_tag, compl);
15d72184 2527 }
c3c18bc1
SP
2528 rxcp->port = GET_RX_COMPL_V0_BITS(port, compl);
2529 rxcp->ip_frag = GET_RX_COMPL_V0_BITS(ip_frag, compl);
2e588f84
SP
2530}
2531
2532static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
2533{
2534 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
2535 struct be_rx_compl_info *rxcp = &rxo->rxcp;
2536 struct be_adapter *adapter = rxo->adapter;
6b7c5b94 2537
2e588f84
SP
2538 /* For checking the valid bit it is Ok to use either definition as the
2539 * valid bit is at the same position in both v0 and v1 Rx compl */
2540 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
2541 return NULL;
6b7c5b94 2542
2e588f84
SP
2543 rmb();
2544 be_dws_le_to_cpu(compl, sizeof(*compl));
6b7c5b94 2545
2e588f84 2546 if (adapter->be3_native)
10ef9ab4 2547 be_parse_rx_compl_v1(compl, rxcp);
2e588f84 2548 else
10ef9ab4 2549 be_parse_rx_compl_v0(compl, rxcp);
6b7c5b94 2550
e38b1706
SK
2551 if (rxcp->ip_frag)
2552 rxcp->l4_csum = 0;
2553
15d72184 2554 if (rxcp->vlanf) {
f93f160b
VV
2555 /* In QNQ modes, if qnq bit is not set, then the packet was
2556 * tagged only with the transparent outer vlan-tag and must
2557 * not be treated as a vlan packet by host
2558 */
2559 if (be_is_qnq_mode(adapter) && !rxcp->qnq)
15d72184 2560 rxcp->vlanf = 0;
6b7c5b94 2561
15d72184 2562 if (!lancer_chip(adapter))
3c709f8f 2563 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
6b7c5b94 2564
939cf306 2565 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
f6cbd364 2566 !test_bit(rxcp->vlan_tag, adapter->vids))
15d72184
SP
2567 rxcp->vlanf = 0;
2568 }
2e588f84
SP
2569
2570 /* As the compl has been parsed, reset it; we wont touch it again */
2571 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
6b7c5b94 2572
3abcdeda 2573 queue_tail_inc(&rxo->cq);
6b7c5b94
SP
2574 return rxcp;
2575}
2576
1829b086 2577static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
6b7c5b94 2578{
6b7c5b94 2579 u32 order = get_order(size);
1829b086 2580
6b7c5b94 2581 if (order > 0)
1829b086
ED
2582 gfp |= __GFP_COMP;
2583 return alloc_pages(gfp, order);
6b7c5b94
SP
2584}
2585
2586/*
2587 * Allocate a page, split it to fragments of size rx_frag_size and post as
2588 * receive buffers to BE
2589 */
c30d7266 2590static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp, u32 frags_needed)
6b7c5b94 2591{
3abcdeda 2592 struct be_adapter *adapter = rxo->adapter;
26d92f92 2593 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
3abcdeda 2594 struct be_queue_info *rxq = &rxo->q;
6b7c5b94 2595 struct page *pagep = NULL;
ba42fad0 2596 struct device *dev = &adapter->pdev->dev;
6b7c5b94
SP
2597 struct be_eth_rx_d *rxd;
2598 u64 page_dmaaddr = 0, frag_dmaaddr;
c30d7266 2599 u32 posted, page_offset = 0, notify = 0;
6b7c5b94 2600
3abcdeda 2601 page_info = &rxo->page_info_tbl[rxq->head];
c30d7266 2602 for (posted = 0; posted < frags_needed && !page_info->page; posted++) {
6b7c5b94 2603 if (!pagep) {
1829b086 2604 pagep = be_alloc_pages(adapter->big_page_size, gfp);
6b7c5b94 2605 if (unlikely(!pagep)) {
ac124ff9 2606 rx_stats(rxo)->rx_post_fail++;
6b7c5b94
SP
2607 break;
2608 }
ba42fad0
IV
2609 page_dmaaddr = dma_map_page(dev, pagep, 0,
2610 adapter->big_page_size,
2b7bcebf 2611 DMA_FROM_DEVICE);
ba42fad0
IV
2612 if (dma_mapping_error(dev, page_dmaaddr)) {
2613 put_page(pagep);
2614 pagep = NULL;
d3de1540 2615 adapter->drv_stats.dma_map_errors++;
ba42fad0
IV
2616 break;
2617 }
e50287be 2618 page_offset = 0;
6b7c5b94
SP
2619 } else {
2620 get_page(pagep);
e50287be 2621 page_offset += rx_frag_size;
6b7c5b94 2622 }
e50287be 2623 page_info->page_offset = page_offset;
6b7c5b94 2624 page_info->page = pagep;
6b7c5b94
SP
2625
2626 rxd = queue_head_node(rxq);
e50287be 2627 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
6b7c5b94
SP
2628 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
2629 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
6b7c5b94
SP
2630
2631 /* Any space left in the current big page for another frag? */
2632 if ((page_offset + rx_frag_size + rx_frag_size) >
2633 adapter->big_page_size) {
2634 pagep = NULL;
e50287be
SP
2635 page_info->last_frag = true;
2636 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
2637 } else {
2638 dma_unmap_addr_set(page_info, bus, frag_dmaaddr);
6b7c5b94 2639 }
26d92f92
SP
2640
2641 prev_page_info = page_info;
2642 queue_head_inc(rxq);
10ef9ab4 2643 page_info = &rxo->page_info_tbl[rxq->head];
6b7c5b94 2644 }
e50287be
SP
2645
2646 /* Mark the last frag of a page when we break out of the above loop
2647 * with no more slots available in the RXQ
2648 */
2649 if (pagep) {
2650 prev_page_info->last_frag = true;
2651 dma_unmap_addr_set(prev_page_info, bus, page_dmaaddr);
2652 }
6b7c5b94
SP
2653
2654 if (posted) {
6b7c5b94 2655 atomic_add(posted, &rxq->used);
6384a4d0
SP
2656 if (rxo->rx_post_starved)
2657 rxo->rx_post_starved = false;
c30d7266 2658 do {
69304cc9 2659 notify = min(MAX_NUM_POST_ERX_DB, posted);
c30d7266
AK
2660 be_rxq_notify(adapter, rxq->id, notify);
2661 posted -= notify;
2662 } while (posted);
ea1dae11
SP
2663 } else if (atomic_read(&rxq->used) == 0) {
2664 /* Let be_worker replenish when memory is available */
3abcdeda 2665 rxo->rx_post_starved = true;
6b7c5b94 2666 }
6b7c5b94
SP
2667}
2668
ffc39620
SR
2669static inline void be_update_tx_err(struct be_tx_obj *txo, u8 status)
2670{
2671 switch (status) {
2672 case BE_TX_COMP_HDR_PARSE_ERR:
2673 tx_stats(txo)->tx_hdr_parse_err++;
2674 break;
2675 case BE_TX_COMP_NDMA_ERR:
2676 tx_stats(txo)->tx_dma_err++;
2677 break;
2678 case BE_TX_COMP_ACL_ERR:
2679 tx_stats(txo)->tx_spoof_check_err++;
2680 break;
2681 }
2682}
2683
2684static inline void lancer_update_tx_err(struct be_tx_obj *txo, u8 status)
2685{
2686 switch (status) {
2687 case LANCER_TX_COMP_LSO_ERR:
2688 tx_stats(txo)->tx_tso_err++;
2689 break;
2690 case LANCER_TX_COMP_HSW_DROP_MAC_ERR:
2691 case LANCER_TX_COMP_HSW_DROP_VLAN_ERR:
2692 tx_stats(txo)->tx_spoof_check_err++;
2693 break;
2694 case LANCER_TX_COMP_QINQ_ERR:
2695 tx_stats(txo)->tx_qinq_err++;
2696 break;
2697 case LANCER_TX_COMP_PARITY_ERR:
2698 tx_stats(txo)->tx_internal_parity_err++;
2699 break;
2700 case LANCER_TX_COMP_DMA_ERR:
2701 tx_stats(txo)->tx_dma_err++;
2702 break;
2703 case LANCER_TX_COMP_SGE_ERR:
2704 tx_stats(txo)->tx_sge_err++;
2705 break;
2706 }
2707}
2708
2709static struct be_tx_compl_info *be_tx_compl_get(struct be_adapter *adapter,
2710 struct be_tx_obj *txo)
6b7c5b94 2711{
152ffe5b
SB
2712 struct be_queue_info *tx_cq = &txo->cq;
2713 struct be_tx_compl_info *txcp = &txo->txcp;
2714 struct be_eth_tx_compl *compl = queue_tail_node(tx_cq);
6b7c5b94 2715
152ffe5b 2716 if (compl->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
6b7c5b94
SP
2717 return NULL;
2718
152ffe5b 2719 /* Ensure load ordering of valid bit dword and other dwords below */
f3eb62d2 2720 rmb();
152ffe5b 2721 be_dws_le_to_cpu(compl, sizeof(*compl));
6b7c5b94 2722
152ffe5b
SB
2723 txcp->status = GET_TX_COMPL_BITS(status, compl);
2724 txcp->end_index = GET_TX_COMPL_BITS(wrb_index, compl);
6b7c5b94 2725
ffc39620
SR
2726 if (txcp->status) {
2727 if (lancer_chip(adapter)) {
2728 lancer_update_tx_err(txo, txcp->status);
2729 /* Reset the adapter incase of TSO,
2730 * SGE or Parity error
2731 */
2732 if (txcp->status == LANCER_TX_COMP_LSO_ERR ||
2733 txcp->status == LANCER_TX_COMP_PARITY_ERR ||
2734 txcp->status == LANCER_TX_COMP_SGE_ERR)
2735 be_set_error(adapter, BE_ERROR_TX);
2736 } else {
2737 be_update_tx_err(txo, txcp->status);
2738 }
2739 }
2740
2741 if (be_check_error(adapter, BE_ERROR_TX))
2742 return NULL;
2743
152ffe5b 2744 compl->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
6b7c5b94
SP
2745 queue_tail_inc(tx_cq);
2746 return txcp;
2747}
2748
3c8def97 2749static u16 be_tx_compl_process(struct be_adapter *adapter,
748b539a 2750 struct be_tx_obj *txo, u16 last_index)
6b7c5b94 2751{
5f07b3c5 2752 struct sk_buff **sent_skbs = txo->sent_skb_list;
3c8def97 2753 struct be_queue_info *txq = &txo->q;
5f07b3c5
SP
2754 struct sk_buff *skb = NULL;
2755 bool unmap_skb_hdr = false;
a73b796e 2756 struct be_eth_wrb *wrb;
b0fd2eb2 2757 u16 num_wrbs = 0;
2758 u32 frag_index;
6b7c5b94 2759
ec43b1a6 2760 do {
5f07b3c5
SP
2761 if (sent_skbs[txq->tail]) {
2762 /* Free skb from prev req */
2763 if (skb)
2764 dev_consume_skb_any(skb);
2765 skb = sent_skbs[txq->tail];
2766 sent_skbs[txq->tail] = NULL;
2767 queue_tail_inc(txq); /* skip hdr wrb */
2768 num_wrbs++;
2769 unmap_skb_hdr = true;
2770 }
a73b796e 2771 wrb = queue_tail_node(txq);
5f07b3c5 2772 frag_index = txq->tail;
2b7bcebf 2773 unmap_tx_frag(&adapter->pdev->dev, wrb,
5f07b3c5 2774 (unmap_skb_hdr && skb_headlen(skb)));
ec43b1a6 2775 unmap_skb_hdr = false;
6b7c5b94 2776 queue_tail_inc(txq);
5f07b3c5
SP
2777 num_wrbs++;
2778 } while (frag_index != last_index);
2779 dev_consume_skb_any(skb);
6b7c5b94 2780
4d586b82 2781 return num_wrbs;
6b7c5b94
SP
2782}
2783
10ef9ab4
SP
2784/* Return the number of events in the event queue */
2785static inline int events_get(struct be_eq_obj *eqo)
859b1e4e 2786{
10ef9ab4
SP
2787 struct be_eq_entry *eqe;
2788 int num = 0;
859b1e4e 2789
10ef9ab4
SP
2790 do {
2791 eqe = queue_tail_node(&eqo->q);
2792 if (eqe->evt == 0)
2793 break;
859b1e4e 2794
10ef9ab4
SP
2795 rmb();
2796 eqe->evt = 0;
2797 num++;
2798 queue_tail_inc(&eqo->q);
2799 } while (true);
2800
2801 return num;
859b1e4e
SP
2802}
2803
10ef9ab4
SP
2804/* Leaves the EQ is disarmed state */
2805static void be_eq_clean(struct be_eq_obj *eqo)
859b1e4e 2806{
10ef9ab4 2807 int num = events_get(eqo);
859b1e4e 2808
20947770 2809 be_eq_notify(eqo->adapter, eqo->q.id, false, true, num, 0);
859b1e4e
SP
2810}
2811
99b44304
KA
2812/* Free posted rx buffers that were not used */
2813static void be_rxq_clean(struct be_rx_obj *rxo)
6b7c5b94 2814{
3abcdeda 2815 struct be_queue_info *rxq = &rxo->q;
99b44304
KA
2816 struct be_rx_page_info *page_info;
2817
2818 while (atomic_read(&rxq->used) > 0) {
2819 page_info = get_rx_page_info(rxo);
2820 put_page(page_info->page);
2821 memset(page_info, 0, sizeof(*page_info));
2822 }
2823 BUG_ON(atomic_read(&rxq->used));
2824 rxq->tail = 0;
2825 rxq->head = 0;
2826}
2827
2828static void be_rx_cq_clean(struct be_rx_obj *rxo)
2829{
3abcdeda 2830 struct be_queue_info *rx_cq = &rxo->cq;
2e588f84 2831 struct be_rx_compl_info *rxcp;
d23e946c
SP
2832 struct be_adapter *adapter = rxo->adapter;
2833 int flush_wait = 0;
6b7c5b94 2834
d23e946c
SP
2835 /* Consume pending rx completions.
2836 * Wait for the flush completion (identified by zero num_rcvd)
2837 * to arrive. Notify CQ even when there are no more CQ entries
2838 * for HW to flush partially coalesced CQ entries.
2839 * In Lancer, there is no need to wait for flush compl.
2840 */
2841 for (;;) {
2842 rxcp = be_rx_compl_get(rxo);
ddf1169f 2843 if (!rxcp) {
d23e946c
SP
2844 if (lancer_chip(adapter))
2845 break;
2846
954f6825
VD
2847 if (flush_wait++ > 50 ||
2848 be_check_error(adapter,
2849 BE_ERROR_HW)) {
d23e946c
SP
2850 dev_warn(&adapter->pdev->dev,
2851 "did not receive flush compl\n");
2852 break;
2853 }
2854 be_cq_notify(adapter, rx_cq->id, true, 0);
2855 mdelay(1);
2856 } else {
2857 be_rx_compl_discard(rxo, rxcp);
3f5dffe6 2858 be_cq_notify(adapter, rx_cq->id, false, 1);
d23e946c
SP
2859 if (rxcp->num_rcvd == 0)
2860 break;
2861 }
6b7c5b94
SP
2862 }
2863
d23e946c
SP
2864 /* After cleanup, leave the CQ in unarmed state */
2865 be_cq_notify(adapter, rx_cq->id, false, 0);
6b7c5b94
SP
2866}
2867
0ae57bb3 2868static void be_tx_compl_clean(struct be_adapter *adapter)
6b7c5b94 2869{
5f07b3c5 2870 struct device *dev = &adapter->pdev->dev;
b0fd2eb2 2871 u16 cmpl = 0, timeo = 0, num_wrbs = 0;
152ffe5b 2872 struct be_tx_compl_info *txcp;
0ae57bb3 2873 struct be_queue_info *txq;
b0fd2eb2 2874 u32 end_idx, notified_idx;
152ffe5b 2875 struct be_tx_obj *txo;
0ae57bb3 2876 int i, pending_txqs;
a8e9179a 2877
1a3d0717 2878 /* Stop polling for compls when HW has been silent for 10ms */
a8e9179a 2879 do {
0ae57bb3
SP
2880 pending_txqs = adapter->num_tx_qs;
2881
2882 for_all_tx_queues(adapter, txo, i) {
1a3d0717
VV
2883 cmpl = 0;
2884 num_wrbs = 0;
0ae57bb3 2885 txq = &txo->q;
ffc39620 2886 while ((txcp = be_tx_compl_get(adapter, txo))) {
152ffe5b
SB
2887 num_wrbs +=
2888 be_tx_compl_process(adapter, txo,
2889 txcp->end_index);
0ae57bb3
SP
2890 cmpl++;
2891 }
2892 if (cmpl) {
2893 be_cq_notify(adapter, txo->cq.id, false, cmpl);
2894 atomic_sub(num_wrbs, &txq->used);
1a3d0717 2895 timeo = 0;
0ae57bb3 2896 }
cf5671e6 2897 if (!be_is_tx_compl_pending(txo))
0ae57bb3 2898 pending_txqs--;
a8e9179a
SP
2899 }
2900
954f6825
VD
2901 if (pending_txqs == 0 || ++timeo > 10 ||
2902 be_check_error(adapter, BE_ERROR_HW))
a8e9179a
SP
2903 break;
2904
2905 mdelay(1);
2906 } while (true);
2907
5f07b3c5 2908 /* Free enqueued TX that was never notified to HW */
0ae57bb3
SP
2909 for_all_tx_queues(adapter, txo, i) {
2910 txq = &txo->q;
0ae57bb3 2911
5f07b3c5
SP
2912 if (atomic_read(&txq->used)) {
2913 dev_info(dev, "txq%d: cleaning %d pending tx-wrbs\n",
2914 i, atomic_read(&txq->used));
2915 notified_idx = txq->tail;
0ae57bb3 2916 end_idx = txq->tail;
5f07b3c5
SP
2917 index_adv(&end_idx, atomic_read(&txq->used) - 1,
2918 txq->len);
2919 /* Use the tx-compl process logic to handle requests
2920 * that were not sent to the HW.
2921 */
0ae57bb3
SP
2922 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
2923 atomic_sub(num_wrbs, &txq->used);
5f07b3c5
SP
2924 BUG_ON(atomic_read(&txq->used));
2925 txo->pend_wrb_cnt = 0;
2926 /* Since hw was never notified of these requests,
2927 * reset TXQ indices
2928 */
2929 txq->head = notified_idx;
2930 txq->tail = notified_idx;
0ae57bb3 2931 }
b03388d6 2932 }
6b7c5b94
SP
2933}
2934
10ef9ab4
SP
2935static void be_evt_queues_destroy(struct be_adapter *adapter)
2936{
2937 struct be_eq_obj *eqo;
2938 int i;
2939
2940 for_all_evt_queues(adapter, eqo, i) {
19d59aa7
PR
2941 if (eqo->q.created) {
2942 be_eq_clean(eqo);
10ef9ab4 2943 be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
68d7bdcb 2944 netif_napi_del(&eqo->napi);
649886a3 2945 free_cpumask_var(eqo->affinity_mask);
19d59aa7 2946 }
10ef9ab4
SP
2947 be_queue_free(adapter, &eqo->q);
2948 }
2949}
2950
2951static int be_evt_queues_create(struct be_adapter *adapter)
2952{
2953 struct be_queue_info *eq;
2954 struct be_eq_obj *eqo;
2632bafd 2955 struct be_aic_obj *aic;
10ef9ab4
SP
2956 int i, rc;
2957
e261768e 2958 /* need enough EQs to service both RX and TX queues */
92bf14ab 2959 adapter->num_evt_qs = min_t(u16, num_irqs(adapter),
e261768e
SP
2960 max(adapter->cfg_num_rx_irqs,
2961 adapter->cfg_num_tx_irqs));
10ef9ab4 2962
9041f047
IV
2963 adapter->aic_enabled = true;
2964
10ef9ab4 2965 for_all_evt_queues(adapter, eqo, i) {
f36963c9 2966 int numa_node = dev_to_node(&adapter->pdev->dev);
649886a3 2967
2632bafd 2968 aic = &adapter->aic_obj[i];
10ef9ab4 2969 eqo->adapter = adapter;
10ef9ab4 2970 eqo->idx = i;
2632bafd 2971 aic->max_eqd = BE_MAX_EQD;
10ef9ab4
SP
2972
2973 eq = &eqo->q;
2974 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
748b539a 2975 sizeof(struct be_eq_entry));
10ef9ab4
SP
2976 if (rc)
2977 return rc;
2978
f2f781a7 2979 rc = be_cmd_eq_create(adapter, eqo);
10ef9ab4
SP
2980 if (rc)
2981 return rc;
649886a3
KA
2982
2983 if (!zalloc_cpumask_var(&eqo->affinity_mask, GFP_KERNEL))
2984 return -ENOMEM;
2985 cpumask_set_cpu(cpumask_local_spread(i, numa_node),
2986 eqo->affinity_mask);
b48b89f9 2987 netif_napi_add(adapter->netdev, &eqo->napi, be_poll);
10ef9ab4 2988 }
1cfafab9 2989 return 0;
10ef9ab4
SP
2990}
2991
5fb379ee
SP
2992static void be_mcc_queues_destroy(struct be_adapter *adapter)
2993{
2994 struct be_queue_info *q;
5fb379ee 2995
8788fdc2 2996 q = &adapter->mcc_obj.q;
5fb379ee 2997 if (q->created)
8788fdc2 2998 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
5fb379ee
SP
2999 be_queue_free(adapter, q);
3000
8788fdc2 3001 q = &adapter->mcc_obj.cq;
5fb379ee 3002 if (q->created)
8788fdc2 3003 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
5fb379ee
SP
3004 be_queue_free(adapter, q);
3005}
3006
3007/* Must be called only after TX qs are created as MCC shares TX EQ */
3008static int be_mcc_queues_create(struct be_adapter *adapter)
3009{
3010 struct be_queue_info *q, *cq;
5fb379ee 3011
8788fdc2 3012 cq = &adapter->mcc_obj.cq;
5fb379ee 3013 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
748b539a 3014 sizeof(struct be_mcc_compl)))
5fb379ee
SP
3015 goto err;
3016
10ef9ab4
SP
3017 /* Use the default EQ for MCC completions */
3018 if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0))
5fb379ee
SP
3019 goto mcc_cq_free;
3020
8788fdc2 3021 q = &adapter->mcc_obj.q;
5fb379ee
SP
3022 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
3023 goto mcc_cq_destroy;
3024
8788fdc2 3025 if (be_cmd_mccq_create(adapter, q, cq))
5fb379ee
SP
3026 goto mcc_q_free;
3027
3028 return 0;
3029
3030mcc_q_free:
3031 be_queue_free(adapter, q);
3032mcc_cq_destroy:
8788fdc2 3033 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
5fb379ee
SP
3034mcc_cq_free:
3035 be_queue_free(adapter, cq);
3036err:
3037 return -1;
3038}
3039
6b7c5b94
SP
3040static void be_tx_queues_destroy(struct be_adapter *adapter)
3041{
3042 struct be_queue_info *q;
3c8def97
SP
3043 struct be_tx_obj *txo;
3044 u8 i;
6b7c5b94 3045
3c8def97
SP
3046 for_all_tx_queues(adapter, txo, i) {
3047 q = &txo->q;
3048 if (q->created)
3049 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
3050 be_queue_free(adapter, q);
6b7c5b94 3051
3c8def97
SP
3052 q = &txo->cq;
3053 if (q->created)
3054 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
3055 be_queue_free(adapter, q);
3056 }
6b7c5b94
SP
3057}
3058
7707133c 3059static int be_tx_qs_create(struct be_adapter *adapter)
6b7c5b94 3060{
73f394e6 3061 struct be_queue_info *cq;
3c8def97 3062 struct be_tx_obj *txo;
73f394e6 3063 struct be_eq_obj *eqo;
92bf14ab 3064 int status, i;
6b7c5b94 3065
e261768e 3066 adapter->num_tx_qs = min(adapter->num_evt_qs, adapter->cfg_num_tx_irqs);
dafc0fe3 3067
10ef9ab4
SP
3068 for_all_tx_queues(adapter, txo, i) {
3069 cq = &txo->cq;
3070 status = be_queue_alloc(adapter, cq, TX_CQ_LEN,
3071 sizeof(struct be_eth_tx_compl));
3072 if (status)
3073 return status;
3c8def97 3074
827da44c
JS
3075 u64_stats_init(&txo->stats.sync);
3076 u64_stats_init(&txo->stats.sync_compl);
3077
10ef9ab4
SP
3078 /* If num_evt_qs is less than num_tx_qs, then more than
3079 * one txq share an eq
3080 */
73f394e6
SP
3081 eqo = &adapter->eq_obj[i % adapter->num_evt_qs];
3082 status = be_cmd_cq_create(adapter, cq, &eqo->q, false, 3);
10ef9ab4
SP
3083 if (status)
3084 return status;
6b7c5b94 3085
10ef9ab4
SP
3086 status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
3087 sizeof(struct be_eth_wrb));
3088 if (status)
3089 return status;
6b7c5b94 3090
94d73aaa 3091 status = be_cmd_txq_create(adapter, txo);
10ef9ab4
SP
3092 if (status)
3093 return status;
73f394e6
SP
3094
3095 netif_set_xps_queue(adapter->netdev, eqo->affinity_mask,
3096 eqo->idx);
3c8def97 3097 }
6b7c5b94 3098
d379142b
SP
3099 dev_info(&adapter->pdev->dev, "created %d TX queue(s)\n",
3100 adapter->num_tx_qs);
10ef9ab4 3101 return 0;
6b7c5b94
SP
3102}
3103
10ef9ab4 3104static void be_rx_cqs_destroy(struct be_adapter *adapter)
6b7c5b94
SP
3105{
3106 struct be_queue_info *q;
3abcdeda
SP
3107 struct be_rx_obj *rxo;
3108 int i;
3109
3110 for_all_rx_queues(adapter, rxo, i) {
3abcdeda
SP
3111 q = &rxo->cq;
3112 if (q->created)
3113 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
3114 be_queue_free(adapter, q);
ac6a0c4a
SP
3115 }
3116}
3117
10ef9ab4 3118static int be_rx_cqs_create(struct be_adapter *adapter)
6b7c5b94 3119{
10ef9ab4 3120 struct be_queue_info *eq, *cq;
3abcdeda
SP
3121 struct be_rx_obj *rxo;
3122 int rc, i;
6b7c5b94 3123
e261768e
SP
3124 adapter->num_rss_qs =
3125 min(adapter->num_evt_qs, adapter->cfg_num_rx_irqs);
92bf14ab 3126
71bb8bd0 3127 /* We'll use RSS only if atleast 2 RSS rings are supported. */
e261768e 3128 if (adapter->num_rss_qs < 2)
71bb8bd0
VV
3129 adapter->num_rss_qs = 0;
3130
3131 adapter->num_rx_qs = adapter->num_rss_qs + adapter->need_def_rxq;
3132
3133 /* When the interface is not capable of RSS rings (and there is no
3134 * need to create a default RXQ) we'll still need one RXQ
10ef9ab4 3135 */
71bb8bd0
VV
3136 if (adapter->num_rx_qs == 0)
3137 adapter->num_rx_qs = 1;
92bf14ab 3138
6b7c5b94 3139 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
3abcdeda
SP
3140 for_all_rx_queues(adapter, rxo, i) {
3141 rxo->adapter = adapter;
3abcdeda
SP
3142 cq = &rxo->cq;
3143 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
748b539a 3144 sizeof(struct be_eth_rx_compl));
3abcdeda 3145 if (rc)
10ef9ab4 3146 return rc;
3abcdeda 3147
827da44c 3148 u64_stats_init(&rxo->stats.sync);
10ef9ab4
SP
3149 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
3150 rc = be_cmd_cq_create(adapter, cq, eq, false, 3);
3abcdeda 3151 if (rc)
10ef9ab4 3152 return rc;
3abcdeda 3153 }
6b7c5b94 3154
d379142b 3155 dev_info(&adapter->pdev->dev,
71bb8bd0 3156 "created %d RX queue(s)\n", adapter->num_rx_qs);
10ef9ab4 3157 return 0;
b628bde2
SP
3158}
3159
6b7c5b94
SP
3160static irqreturn_t be_intx(int irq, void *dev)
3161{
e49cc34f
SP
3162 struct be_eq_obj *eqo = dev;
3163 struct be_adapter *adapter = eqo->adapter;
3164 int num_evts = 0;
6b7c5b94 3165
d0b9cec3
SP
3166 /* IRQ is not expected when NAPI is scheduled as the EQ
3167 * will not be armed.
3168 * But, this can happen on Lancer INTx where it takes
3169 * a while to de-assert INTx or in BE2 where occasionaly
3170 * an interrupt may be raised even when EQ is unarmed.
3171 * If NAPI is already scheduled, then counting & notifying
3172 * events will orphan them.
e49cc34f 3173 */
d0b9cec3 3174 if (napi_schedule_prep(&eqo->napi)) {
e49cc34f 3175 num_evts = events_get(eqo);
d0b9cec3
SP
3176 __napi_schedule(&eqo->napi);
3177 if (num_evts)
3178 eqo->spurious_intr = 0;
3179 }
20947770 3180 be_eq_notify(adapter, eqo->q.id, false, true, num_evts, 0);
e49cc34f 3181
4c2d6acd 3182 /* Return IRQ_HANDLED only for the first spurious intr
d0b9cec3
SP
3183 * after a valid intr to stop the kernel from branding
3184 * this irq as a bad one!
e49cc34f 3185 */
d0b9cec3
SP
3186 if (num_evts || eqo->spurious_intr++ == 0)
3187 return IRQ_HANDLED;
3188 else
3189 return IRQ_NONE;
6b7c5b94
SP
3190}
3191
10ef9ab4 3192static irqreturn_t be_msix(int irq, void *dev)
6b7c5b94 3193{
10ef9ab4 3194 struct be_eq_obj *eqo = dev;
6b7c5b94 3195
20947770 3196 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0, 0);
0b545a62 3197 napi_schedule(&eqo->napi);
6b7c5b94
SP
3198 return IRQ_HANDLED;
3199}
3200
2e588f84 3201static inline bool do_gro(struct be_rx_compl_info *rxcp)
6b7c5b94 3202{
e38b1706 3203 return (rxcp->tcpf && !rxcp->err && rxcp->l4_csum) ? true : false;
6b7c5b94
SP
3204}
3205
10ef9ab4 3206static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
fb6113e6 3207 int budget)
6b7c5b94 3208{
3abcdeda
SP
3209 struct be_adapter *adapter = rxo->adapter;
3210 struct be_queue_info *rx_cq = &rxo->cq;
2e588f84 3211 struct be_rx_compl_info *rxcp;
6b7c5b94 3212 u32 work_done;
c30d7266 3213 u32 frags_consumed = 0;
6b7c5b94
SP
3214
3215 for (work_done = 0; work_done < budget; work_done++) {
3abcdeda 3216 rxcp = be_rx_compl_get(rxo);
6b7c5b94
SP
3217 if (!rxcp)
3218 break;
3219
12004ae9
SP
3220 /* Is it a flush compl that has no data */
3221 if (unlikely(rxcp->num_rcvd == 0))
3222 goto loop_continue;
3223
3224 /* Discard compl with partial DMA Lancer B0 */
3225 if (unlikely(!rxcp->pkt_size)) {
10ef9ab4 3226 be_rx_compl_discard(rxo, rxcp);
12004ae9
SP
3227 goto loop_continue;
3228 }
3229
3230 /* On BE drop pkts that arrive due to imperfect filtering in
3231 * promiscuous mode on some skews
3232 */
3233 if (unlikely(rxcp->port != adapter->port_num &&
748b539a 3234 !lancer_chip(adapter))) {
10ef9ab4 3235 be_rx_compl_discard(rxo, rxcp);
12004ae9 3236 goto loop_continue;
64642811 3237 }
009dd872 3238
fb6113e6 3239 if (do_gro(rxcp))
10ef9ab4 3240 be_rx_compl_process_gro(rxo, napi, rxcp);
12004ae9 3241 else
6384a4d0
SP
3242 be_rx_compl_process(rxo, napi, rxcp);
3243
12004ae9 3244loop_continue:
c30d7266 3245 frags_consumed += rxcp->num_rcvd;
2e588f84 3246 be_rx_stats_update(rxo, rxcp);
6b7c5b94
SP
3247 }
3248
10ef9ab4
SP
3249 if (work_done) {
3250 be_cq_notify(adapter, rx_cq->id, true, work_done);
9372cacb 3251
6384a4d0
SP
3252 /* When an rx-obj gets into post_starved state, just
3253 * let be_worker do the posting.
3254 */
3255 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM &&
3256 !rxo->rx_post_starved)
c30d7266
AK
3257 be_post_rx_frags(rxo, GFP_ATOMIC,
3258 max_t(u32, MAX_RX_POST,
3259 frags_consumed));
6b7c5b94 3260 }
10ef9ab4 3261
6b7c5b94
SP
3262 return work_done;
3263}
3264
512bb8a2 3265
c8f64615
SP
3266static void be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
3267 int idx)
6b7c5b94 3268{
c8f64615 3269 int num_wrbs = 0, work_done = 0;
152ffe5b 3270 struct be_tx_compl_info *txcp;
c8f64615 3271
ffc39620 3272 while ((txcp = be_tx_compl_get(adapter, txo))) {
152ffe5b 3273 num_wrbs += be_tx_compl_process(adapter, txo, txcp->end_index);
c8f64615 3274 work_done++;
10ef9ab4 3275 }
6b7c5b94 3276
10ef9ab4
SP
3277 if (work_done) {
3278 be_cq_notify(adapter, txo->cq.id, true, work_done);
3279 atomic_sub(num_wrbs, &txo->q.used);
3c8def97 3280
10ef9ab4
SP
3281 /* As Tx wrbs have been freed up, wake up netdev queue
3282 * if it was stopped due to lack of tx wrbs. */
3283 if (__netif_subqueue_stopped(adapter->netdev, idx) &&
cf5671e6 3284 be_can_txq_wake(txo)) {
10ef9ab4 3285 netif_wake_subqueue(adapter->netdev, idx);
3c8def97 3286 }
10ef9ab4
SP
3287
3288 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
3289 tx_stats(txo)->tx_compl += work_done;
3290 u64_stats_update_end(&tx_stats(txo)->sync_compl);
6b7c5b94 3291 }
10ef9ab4 3292}
6b7c5b94 3293
68d7bdcb 3294int be_poll(struct napi_struct *napi, int budget)
10ef9ab4
SP
3295{
3296 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
3297 struct be_adapter *adapter = eqo->adapter;
0b545a62 3298 int max_work = 0, work, i, num_evts;
6384a4d0 3299 struct be_rx_obj *rxo;
a4906ea0 3300 struct be_tx_obj *txo;
20947770 3301 u32 mult_enc = 0;
f31e50a8 3302
0b545a62
SP
3303 num_evts = events_get(eqo);
3304
a4906ea0
SP
3305 for_all_tx_queues_on_eq(adapter, eqo, txo, i)
3306 be_process_tx(adapter, txo, i);
f31e50a8 3307
fb6113e6
ED
3308 /* This loop will iterate twice for EQ0 in which
3309 * completions of the last RXQ (default one) are also processed
3310 * For other EQs the loop iterates only once
3311 */
3312 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
3313 work = be_process_rx(rxo, napi, budget);
3314 max_work = max(work, max_work);
10ef9ab4 3315 }
6b7c5b94 3316
10ef9ab4
SP
3317 if (is_mcc_eqo(eqo))
3318 be_process_mcc(adapter);
93c86700 3319
10ef9ab4 3320 if (max_work < budget) {
6ad20165 3321 napi_complete_done(napi, max_work);
20947770
PR
3322
3323 /* Skyhawk EQ_DB has a provision to set the rearm to interrupt
3324 * delay via a delay multiplier encoding value
3325 */
3326 if (skyhawk_chip(adapter))
3327 mult_enc = be_get_eq_delay_mult_enc(eqo);
3328
3329 be_eq_notify(adapter, eqo->q.id, true, false, num_evts,
3330 mult_enc);
10ef9ab4
SP
3331 } else {
3332 /* As we'll continue in polling mode, count and clear events */
20947770 3333 be_eq_notify(adapter, eqo->q.id, false, false, num_evts, 0);
93c86700 3334 }
10ef9ab4 3335 return max_work;
6b7c5b94
SP
3336}
3337
f67ef7ba 3338void be_detect_error(struct be_adapter *adapter)
7c185276 3339{
e1cfb67a
PR
3340 u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
3341 u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
eb0eecc1 3342 struct device *dev = &adapter->pdev->dev;
673c96e5
SR
3343 u16 val;
3344 u32 i;
7c185276 3345
954f6825 3346 if (be_check_error(adapter, BE_ERROR_HW))
72f02485
SP
3347 return;
3348
e1cfb67a
PR
3349 if (lancer_chip(adapter)) {
3350 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3351 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
954f6825 3352 be_set_error(adapter, BE_ERROR_UE);
e1cfb67a 3353 sliport_err1 = ioread32(adapter->db +
748b539a 3354 SLIPORT_ERROR1_OFFSET);
e1cfb67a 3355 sliport_err2 = ioread32(adapter->db +
748b539a 3356 SLIPORT_ERROR2_OFFSET);
eb0eecc1
SK
3357 /* Do not log error messages if its a FW reset */
3358 if (sliport_err1 == SLIPORT_ERROR_FW_RESET1 &&
3359 sliport_err2 == SLIPORT_ERROR_FW_RESET2) {
c1b3bdb2 3360 dev_info(dev, "Reset is in progress\n");
eb0eecc1 3361 } else {
eb0eecc1
SK
3362 dev_err(dev, "Error detected in the card\n");
3363 dev_err(dev, "ERR: sliport status 0x%x\n",
3364 sliport_status);
3365 dev_err(dev, "ERR: sliport error1 0x%x\n",
3366 sliport_err1);
3367 dev_err(dev, "ERR: sliport error2 0x%x\n",
3368 sliport_err2);
3369 }
e1cfb67a
PR
3370 }
3371 } else {
25848c90
SR
3372 ue_lo = ioread32(adapter->pcicfg + PCICFG_UE_STATUS_LOW);
3373 ue_hi = ioread32(adapter->pcicfg + PCICFG_UE_STATUS_HIGH);
3374 ue_lo_mask = ioread32(adapter->pcicfg +
3375 PCICFG_UE_STATUS_LOW_MASK);
3376 ue_hi_mask = ioread32(adapter->pcicfg +
3377 PCICFG_UE_STATUS_HI_MASK);
e1cfb67a 3378
f67ef7ba
PR
3379 ue_lo = (ue_lo & ~ue_lo_mask);
3380 ue_hi = (ue_hi & ~ue_hi_mask);
7c185276 3381
eb0eecc1 3382 if (ue_lo || ue_hi) {
673c96e5
SR
3383 /* On certain platforms BE3 hardware can indicate
3384 * spurious UEs. In case of a UE in the chip,
3385 * the POST register correctly reports either a
3386 * FAT_LOG_START state (FW is currently dumping
3387 * FAT log data) or a ARMFW_UE state. Check for the
3388 * above states to ascertain if the UE is valid or not.
3389 */
3390 if (BE3_chip(adapter)) {
3391 val = be_POST_stage_get(adapter);
3392 if ((val & POST_STAGE_FAT_LOG_START)
3393 != POST_STAGE_FAT_LOG_START &&
3394 (val & POST_STAGE_ARMFW_UE)
d2c2725c
SR
3395 != POST_STAGE_ARMFW_UE &&
3396 (val & POST_STAGE_RECOVERABLE_ERR)
3397 != POST_STAGE_RECOVERABLE_ERR)
673c96e5
SR
3398 return;
3399 }
3400
710f3e59 3401 dev_err(dev, "Error detected in the adapter");
673c96e5 3402 be_set_error(adapter, BE_ERROR_UE);
954f6825 3403
eb0eecc1
SK
3404 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
3405 if (ue_lo & 1)
3406 dev_err(dev, "UE: %s bit set\n",
3407 ue_status_low_desc[i]);
3408 }
3409 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
3410 if (ue_hi & 1)
3411 dev_err(dev, "UE: %s bit set\n",
3412 ue_status_hi_desc[i]);
3413 }
7c185276
AK
3414 }
3415 }
7c185276
AK
3416}
3417
8d56ff11
SP
3418static void be_msix_disable(struct be_adapter *adapter)
3419{
ac6a0c4a 3420 if (msix_enabled(adapter)) {
8d56ff11 3421 pci_disable_msix(adapter->pdev);
ac6a0c4a 3422 adapter->num_msix_vec = 0;
68d7bdcb 3423 adapter->num_msix_roce_vec = 0;
3abcdeda
SP
3424 }
3425}
3426
c2bba3df 3427static int be_msix_enable(struct be_adapter *adapter)
6b7c5b94 3428{
6fde0e63 3429 unsigned int i, max_roce_eqs;
d379142b 3430 struct device *dev = &adapter->pdev->dev;
6fde0e63 3431 int num_vec;
6b7c5b94 3432
ce7faf0a
SP
3433 /* If RoCE is supported, program the max number of vectors that
3434 * could be used for NIC and RoCE, else, just program the number
3435 * we'll use initially.
92bf14ab 3436 */
e261768e
SP
3437 if (be_roce_supported(adapter)) {
3438 max_roce_eqs =
3439 be_max_func_eqs(adapter) - be_max_nic_eqs(adapter);
3440 max_roce_eqs = min(max_roce_eqs, num_online_cpus());
3441 num_vec = be_max_any_irqs(adapter) + max_roce_eqs;
3442 } else {
3443 num_vec = max(adapter->cfg_num_rx_irqs,
3444 adapter->cfg_num_tx_irqs);
3445 }
3abcdeda 3446
ac6a0c4a 3447 for (i = 0; i < num_vec; i++)
6b7c5b94
SP
3448 adapter->msix_entries[i].entry = i;
3449
7dc4c064
AG
3450 num_vec = pci_enable_msix_range(adapter->pdev, adapter->msix_entries,
3451 MIN_MSIX_VECTORS, num_vec);
3452 if (num_vec < 0)
3453 goto fail;
92bf14ab 3454
92bf14ab
SP
3455 if (be_roce_supported(adapter) && num_vec > MIN_MSIX_VECTORS) {
3456 adapter->num_msix_roce_vec = num_vec / 2;
3457 dev_info(dev, "enabled %d MSI-x vector(s) for RoCE\n",
3458 adapter->num_msix_roce_vec);
3459 }
3460
3461 adapter->num_msix_vec = num_vec - adapter->num_msix_roce_vec;
3462
3463 dev_info(dev, "enabled %d MSI-x vector(s) for NIC\n",
3464 adapter->num_msix_vec);
c2bba3df 3465 return 0;
7dc4c064
AG
3466
3467fail:
3468 dev_warn(dev, "MSIx enable failed\n");
3469
3470 /* INTx is not supported in VFs, so fail probe if enable_msix fails */
18c57c74 3471 if (be_virtfn(adapter))
7dc4c064
AG
3472 return num_vec;
3473 return 0;
6b7c5b94
SP
3474}
3475
fe6d2a38 3476static inline int be_msix_vec_get(struct be_adapter *adapter,
748b539a 3477 struct be_eq_obj *eqo)
b628bde2 3478{
f2f781a7 3479 return adapter->msix_entries[eqo->msix_idx].vector;
b628bde2 3480}
6b7c5b94 3481
b628bde2
SP
3482static int be_msix_register(struct be_adapter *adapter)
3483{
10ef9ab4
SP
3484 struct net_device *netdev = adapter->netdev;
3485 struct be_eq_obj *eqo;
3486 int status, i, vec;
6b7c5b94 3487
10ef9ab4 3488 for_all_evt_queues(adapter, eqo, i) {
5ef79151 3489 sprintf(eqo->desc, "%s-q%d", netdev->name, i);
10ef9ab4 3490 vec = be_msix_vec_get(adapter, eqo);
5ef79151 3491 status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
3abcdeda
SP
3492 if (status)
3493 goto err_msix;
d658d98a 3494
b8b9dd52 3495 irq_update_affinity_hint(vec, eqo->affinity_mask);
3abcdeda 3496 }
b628bde2 3497
6b7c5b94 3498 return 0;
3abcdeda 3499err_msix:
6e3cd5fa
VD
3500 for (i--; i >= 0; i--) {
3501 eqo = &adapter->eq_obj[i];
10ef9ab4 3502 free_irq(be_msix_vec_get(adapter, eqo), eqo);
6e3cd5fa 3503 }
10ef9ab4 3504 dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
748b539a 3505 status);
ac6a0c4a 3506 be_msix_disable(adapter);
6b7c5b94
SP
3507 return status;
3508}
3509
3510static int be_irq_register(struct be_adapter *adapter)
3511{
3512 struct net_device *netdev = adapter->netdev;
3513 int status;
3514
ac6a0c4a 3515 if (msix_enabled(adapter)) {
6b7c5b94
SP
3516 status = be_msix_register(adapter);
3517 if (status == 0)
3518 goto done;
ba343c77 3519 /* INTx is not supported for VF */
18c57c74 3520 if (be_virtfn(adapter))
ba343c77 3521 return status;
6b7c5b94
SP
3522 }
3523
e49cc34f 3524 /* INTx: only the first EQ is used */
6b7c5b94
SP
3525 netdev->irq = adapter->pdev->irq;
3526 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
e49cc34f 3527 &adapter->eq_obj[0]);
6b7c5b94
SP
3528 if (status) {
3529 dev_err(&adapter->pdev->dev,
3530 "INTx request IRQ failed - err %d\n", status);
3531 return status;
3532 }
3533done:
3534 adapter->isr_registered = true;
3535 return 0;
3536}
3537
3538static void be_irq_unregister(struct be_adapter *adapter)
3539{
3540 struct net_device *netdev = adapter->netdev;
10ef9ab4 3541 struct be_eq_obj *eqo;
d658d98a 3542 int i, vec;
6b7c5b94
SP
3543
3544 if (!adapter->isr_registered)
3545 return;
3546
3547 /* INTx */
ac6a0c4a 3548 if (!msix_enabled(adapter)) {
e49cc34f 3549 free_irq(netdev->irq, &adapter->eq_obj[0]);
6b7c5b94
SP
3550 goto done;
3551 }
3552
3553 /* MSIx */
d658d98a
PR
3554 for_all_evt_queues(adapter, eqo, i) {
3555 vec = be_msix_vec_get(adapter, eqo);
b8b9dd52 3556 irq_update_affinity_hint(vec, NULL);
d658d98a
PR
3557 free_irq(vec, eqo);
3558 }
3abcdeda 3559
6b7c5b94
SP
3560done:
3561 adapter->isr_registered = false;
6b7c5b94
SP
3562}
3563
10ef9ab4 3564static void be_rx_qs_destroy(struct be_adapter *adapter)
482c9e79 3565{
62219066 3566 struct rss_info *rss = &adapter->rss_info;
482c9e79
SP
3567 struct be_queue_info *q;
3568 struct be_rx_obj *rxo;
3569 int i;
3570
3571 for_all_rx_queues(adapter, rxo, i) {
3572 q = &rxo->q;
3573 if (q->created) {
99b44304
KA
3574 /* If RXQs are destroyed while in an "out of buffer"
3575 * state, there is a possibility of an HW stall on
3576 * Lancer. So, post 64 buffers to each queue to relieve
3577 * the "out of buffer" condition.
3578 * Make sure there's space in the RXQ before posting.
3579 */
3580 if (lancer_chip(adapter)) {
3581 be_rx_cq_clean(rxo);
3582 if (atomic_read(&q->used) == 0)
3583 be_post_rx_frags(rxo, GFP_KERNEL,
3584 MAX_RX_POST);
3585 }
3586
482c9e79 3587 be_cmd_rxq_destroy(adapter, q);
10ef9ab4 3588 be_rx_cq_clean(rxo);
99b44304 3589 be_rxq_clean(rxo);
482c9e79 3590 }
10ef9ab4 3591 be_queue_free(adapter, q);
482c9e79 3592 }
62219066
AK
3593
3594 if (rss->rss_flags) {
3595 rss->rss_flags = RSS_ENABLE_NONE;
3596 be_cmd_rss_config(adapter, rss->rsstable, rss->rss_flags,
3597 128, rss->rss_hkey);
3598 }
482c9e79
SP
3599}
3600
bcc84140
KA
3601static void be_disable_if_filters(struct be_adapter *adapter)
3602{
6d928ae5
IV
3603 /* Don't delete MAC on BE3 VFs without FILTMGMT privilege */
3604 if (!BEx_chip(adapter) || !be_virtfn(adapter) ||
4993b39a 3605 check_privilege(adapter, BE_PRIV_FILTMGMT)) {
6d928ae5 3606 be_dev_mac_del(adapter, adapter->pmac_id[0]);
4993b39a
IV
3607 eth_zero_addr(adapter->dev_mac);
3608 }
6d928ae5 3609
bcc84140 3610 be_clear_uc_list(adapter);
92fbb1df 3611 be_clear_mc_list(adapter);
bcc84140
KA
3612
3613 /* The IFACE flags are enabled in the open path and cleared
3614 * in the close path. When a VF gets detached from the host and
3615 * assigned to a VM the following happens:
3616 * - VF's IFACE flags get cleared in the detach path
3617 * - IFACE create is issued by the VF in the attach path
3618 * Due to a bug in the BE3/Skyhawk-R FW
3619 * (Lancer FW doesn't have the bug), the IFACE capability flags
3620 * specified along with the IFACE create cmd issued by a VF are not
3621 * honoured by FW. As a consequence, if a *new* driver
3622 * (that enables/disables IFACE flags in open/close)
3623 * is loaded in the host and an *old* driver is * used by a VM/VF,
3624 * the IFACE gets created *without* the needed flags.
3625 * To avoid this, disable RX-filter flags only for Lancer.
3626 */
3627 if (lancer_chip(adapter)) {
3628 be_cmd_rx_filter(adapter, BE_IF_ALL_FILT_FLAGS, OFF);
3629 adapter->if_flags &= ~BE_IF_ALL_FILT_FLAGS;
3630 }
3631}
3632
889cd4b2
SP
3633static int be_close(struct net_device *netdev)
3634{
3635 struct be_adapter *adapter = netdev_priv(netdev);
10ef9ab4
SP
3636 struct be_eq_obj *eqo;
3637 int i;
889cd4b2 3638
e1ad8e33
KA
3639 /* This protection is needed as be_close() may be called even when the
3640 * adapter is in cleared state (after eeh perm failure)
3641 */
3642 if (!(adapter->flags & BE_FLAGS_SETUP_DONE))
3643 return 0;
3644
b7172414
SP
3645 /* Before attempting cleanup ensure all the pending cmds in the
3646 * config_wq have finished execution
3647 */
3648 flush_workqueue(be_wq);
3649
bcc84140
KA
3650 be_disable_if_filters(adapter);
3651
dff345c5
IV
3652 if (adapter->flags & BE_FLAGS_NAPI_ENABLED) {
3653 for_all_evt_queues(adapter, eqo, i) {
04d3d624 3654 napi_disable(&eqo->napi);
6384a4d0 3655 }
71237b6f 3656 adapter->flags &= ~BE_FLAGS_NAPI_ENABLED;
04d3d624 3657 }
a323d9bf
SP
3658
3659 be_async_mcc_disable(adapter);
3660
3661 /* Wait for all pending tx completions to arrive so that
3662 * all tx skbs are freed.
3663 */
fba87559 3664 netif_tx_disable(netdev);
6e1f9975 3665 be_tx_compl_clean(adapter);
a323d9bf
SP
3666
3667 be_rx_qs_destroy(adapter);
d11a347d 3668
a323d9bf 3669 for_all_evt_queues(adapter, eqo, i) {
10ef9ab4
SP
3670 if (msix_enabled(adapter))
3671 synchronize_irq(be_msix_vec_get(adapter, eqo));
3672 else
3673 synchronize_irq(netdev->irq);
3674 be_eq_clean(eqo);
63fcb27f
PR
3675 }
3676
889cd4b2
SP
3677 be_irq_unregister(adapter);
3678
482c9e79
SP
3679 return 0;
3680}
3681
10ef9ab4 3682static int be_rx_qs_create(struct be_adapter *adapter)
482c9e79 3683{
1dcf7b1c
ED
3684 struct rss_info *rss = &adapter->rss_info;
3685 u8 rss_key[RSS_HASH_KEY_LEN];
482c9e79 3686 struct be_rx_obj *rxo;
e9008ee9 3687 int rc, i, j;
482c9e79
SP
3688
3689 for_all_rx_queues(adapter, rxo, i) {
10ef9ab4
SP
3690 rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
3691 sizeof(struct be_eth_rx_d));
3692 if (rc)
3693 return rc;
3694 }
3695
71bb8bd0
VV
3696 if (adapter->need_def_rxq || !adapter->num_rss_qs) {
3697 rxo = default_rxo(adapter);
3698 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
3699 rx_frag_size, adapter->if_handle,
3700 false, &rxo->rss_id);
3701 if (rc)
3702 return rc;
3703 }
10ef9ab4
SP
3704
3705 for_all_rss_queues(adapter, rxo, i) {
482c9e79 3706 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
10ef9ab4
SP
3707 rx_frag_size, adapter->if_handle,
3708 true, &rxo->rss_id);
482c9e79
SP
3709 if (rc)
3710 return rc;
3711 }
3712
3713 if (be_multi_rxq(adapter)) {
71bb8bd0 3714 for (j = 0; j < RSS_INDIR_TABLE_LEN; j += adapter->num_rss_qs) {
e9008ee9 3715 for_all_rss_queues(adapter, rxo, i) {
e2557877 3716 if ((j + i) >= RSS_INDIR_TABLE_LEN)
e9008ee9 3717 break;
e2557877
VD
3718 rss->rsstable[j + i] = rxo->rss_id;
3719 rss->rss_queue[j + i] = i;
e9008ee9
PR
3720 }
3721 }
e2557877
VD
3722 rss->rss_flags = RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_IPV4 |
3723 RSS_ENABLE_TCP_IPV6 | RSS_ENABLE_IPV6;
594ad54a
SR
3724
3725 if (!BEx_chip(adapter))
e2557877
VD
3726 rss->rss_flags |= RSS_ENABLE_UDP_IPV4 |
3727 RSS_ENABLE_UDP_IPV6;
62219066
AK
3728
3729 netdev_rss_key_fill(rss_key, RSS_HASH_KEY_LEN);
3730 rc = be_cmd_rss_config(adapter, rss->rsstable, rss->rss_flags,
3731 RSS_INDIR_TABLE_LEN, rss_key);
3732 if (rc) {
3733 rss->rss_flags = RSS_ENABLE_NONE;
3734 return rc;
3735 }
3736
3737 memcpy(rss->rss_hkey, rss_key, RSS_HASH_KEY_LEN);
da1388d6
VV
3738 } else {
3739 /* Disable RSS, if only default RX Q is created */
e2557877 3740 rss->rss_flags = RSS_ENABLE_NONE;
da1388d6 3741 }
594ad54a 3742
e2557877 3743
b02e60c8
SR
3744 /* Post 1 less than RXQ-len to avoid head being equal to tail,
3745 * which is a queue empty condition
3746 */
10ef9ab4 3747 for_all_rx_queues(adapter, rxo, i)
b02e60c8
SR
3748 be_post_rx_frags(rxo, GFP_KERNEL, RX_Q_LEN - 1);
3749
889cd4b2
SP
3750 return 0;
3751}
3752
bcc84140
KA
3753static int be_enable_if_filters(struct be_adapter *adapter)
3754{
3755 int status;
3756
c1bb0a55 3757 status = be_cmd_rx_filter(adapter, BE_IF_FILT_FLAGS_BASIC, ON);
bcc84140
KA
3758 if (status)
3759 return status;
3760
4993b39a
IV
3761 /* Normally this condition usually true as the ->dev_mac is zeroed.
3762 * But on BE3 VFs the initial MAC is pre-programmed by PF and
3763 * subsequent be_dev_mac_add() can fail (after fresh boot)
3764 */
3765 if (!ether_addr_equal(adapter->dev_mac, adapter->netdev->dev_addr)) {
3766 int old_pmac_id = -1;
3767
3768 /* Remember old programmed MAC if any - can happen on BE3 VF */
3769 if (!is_zero_ether_addr(adapter->dev_mac))
3770 old_pmac_id = adapter->pmac_id[0];
3771
988d44b1 3772 status = be_dev_mac_add(adapter, adapter->netdev->dev_addr);
bcc84140
KA
3773 if (status)
3774 return status;
4993b39a
IV
3775
3776 /* Delete the old programmed MAC as we successfully programmed
3777 * a new MAC
3778 */
3779 if (old_pmac_id >= 0 && old_pmac_id != adapter->pmac_id[0])
3780 be_dev_mac_del(adapter, old_pmac_id);
3781
c27ebf58 3782 ether_addr_copy(adapter->dev_mac, adapter->netdev->dev_addr);
bcc84140
KA
3783 }
3784
3785 if (adapter->vlans_added)
3786 be_vid_config(adapter);
3787
b7172414 3788 __be_set_rx_mode(adapter);
bcc84140
KA
3789
3790 return 0;
3791}
3792
6b7c5b94
SP
3793static int be_open(struct net_device *netdev)
3794{
3795 struct be_adapter *adapter = netdev_priv(netdev);
10ef9ab4 3796 struct be_eq_obj *eqo;
3abcdeda 3797 struct be_rx_obj *rxo;
10ef9ab4 3798 struct be_tx_obj *txo;
b236916a 3799 u8 link_status;
3abcdeda 3800 int status, i;
5fb379ee 3801
10ef9ab4 3802 status = be_rx_qs_create(adapter);
482c9e79
SP
3803 if (status)
3804 goto err;
3805
bcc84140
KA
3806 status = be_enable_if_filters(adapter);
3807 if (status)
3808 goto err;
3809
c2bba3df
SK
3810 status = be_irq_register(adapter);
3811 if (status)
3812 goto err;
5fb379ee 3813
10ef9ab4 3814 for_all_rx_queues(adapter, rxo, i)
3abcdeda 3815 be_cq_notify(adapter, rxo->cq.id, true, 0);
5fb379ee 3816
10ef9ab4
SP
3817 for_all_tx_queues(adapter, txo, i)
3818 be_cq_notify(adapter, txo->cq.id, true, 0);
3819
7a1e9b20
SP
3820 be_async_mcc_enable(adapter);
3821
10ef9ab4
SP
3822 for_all_evt_queues(adapter, eqo, i) {
3823 napi_enable(&eqo->napi);
20947770 3824 be_eq_notify(adapter, eqo->q.id, true, true, 0, 0);
10ef9ab4 3825 }
04d3d624 3826 adapter->flags |= BE_FLAGS_NAPI_ENABLED;
10ef9ab4 3827
323ff71e 3828 status = be_cmd_link_status_query(adapter, NULL, &link_status, 0);
b236916a
AK
3829 if (!status)
3830 be_link_status_update(adapter, link_status);
3831
fba87559 3832 netif_tx_start_all_queues(netdev);
8f0545d2
JK
3833
3834 udp_tunnel_nic_reset_ntf(netdev);
c5abe7c0 3835
889cd4b2
SP
3836 return 0;
3837err:
3838 be_close(adapter->netdev);
3839 return -EIO;
5fb379ee
SP
3840}
3841
f7062ee5
SP
3842static void be_vf_eth_addr_generate(struct be_adapter *adapter, u8 *mac)
3843{
3844 u32 addr;
3845
3846 addr = jhash(adapter->netdev->dev_addr, ETH_ALEN, 0);
3847
3848 mac[5] = (u8)(addr & 0xFF);
3849 mac[4] = (u8)((addr >> 8) & 0xFF);
3850 mac[3] = (u8)((addr >> 16) & 0xFF);
3851 /* Use the OUI from the current MAC address */
3852 memcpy(mac, adapter->netdev->dev_addr, 3);
3853}
3854
6d87f5c3
AK
3855/*
3856 * Generate a seed MAC address from the PF MAC Address using jhash.
3857 * MAC Address for VFs are assigned incrementally starting from the seed.
3858 * These addresses are programmed in the ASIC by the PF and the VF driver
3859 * queries for the MAC address during its probe.
3860 */
4c876616 3861static int be_vf_eth_addr_config(struct be_adapter *adapter)
6d87f5c3 3862{
f9449ab7 3863 u32 vf;
3abcdeda 3864 int status = 0;
6d87f5c3 3865 u8 mac[ETH_ALEN];
11ac75ed 3866 struct be_vf_cfg *vf_cfg;
6d87f5c3
AK
3867
3868 be_vf_eth_addr_generate(adapter, mac);
3869
11ac75ed 3870 for_all_vfs(adapter, vf_cfg, vf) {
3175d8c2 3871 if (BEx_chip(adapter))
590c391d 3872 status = be_cmd_pmac_add(adapter, mac,
11ac75ed
SP
3873 vf_cfg->if_handle,
3874 &vf_cfg->pmac_id, vf + 1);
3175d8c2
SP
3875 else
3876 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
3877 vf + 1);
590c391d 3878
6d87f5c3
AK
3879 if (status)
3880 dev_err(&adapter->pdev->dev,
748b539a
SP
3881 "Mac address assignment failed for VF %d\n",
3882 vf);
6d87f5c3 3883 else
11ac75ed 3884 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
6d87f5c3
AK
3885
3886 mac[5] += 1;
3887 }
3888 return status;
3889}
3890
4c876616
SP
3891static int be_vfs_mac_query(struct be_adapter *adapter)
3892{
3893 int status, vf;
3894 u8 mac[ETH_ALEN];
3895 struct be_vf_cfg *vf_cfg;
4c876616
SP
3896
3897 for_all_vfs(adapter, vf_cfg, vf) {
b188f090
SR
3898 status = be_cmd_get_active_mac(adapter, vf_cfg->pmac_id,
3899 mac, vf_cfg->if_handle,
3900 false, vf+1);
4c876616
SP
3901 if (status)
3902 return status;
3903 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
3904 }
3905 return 0;
3906}
3907
f9449ab7 3908static void be_vf_clear(struct be_adapter *adapter)
6d87f5c3 3909{
11ac75ed 3910 struct be_vf_cfg *vf_cfg;
6d87f5c3
AK
3911 u32 vf;
3912
257a3feb 3913 if (pci_vfs_assigned(adapter->pdev)) {
4c876616
SP
3914 dev_warn(&adapter->pdev->dev,
3915 "VFs are assigned to VMs: not disabling VFs\n");
39f1d94d
SP
3916 goto done;
3917 }
3918
b4c1df93
SP
3919 pci_disable_sriov(adapter->pdev);
3920
11ac75ed 3921 for_all_vfs(adapter, vf_cfg, vf) {
3175d8c2 3922 if (BEx_chip(adapter))
11ac75ed
SP
3923 be_cmd_pmac_del(adapter, vf_cfg->if_handle,
3924 vf_cfg->pmac_id, vf + 1);
3175d8c2
SP
3925 else
3926 be_cmd_set_mac(adapter, NULL, vf_cfg->if_handle,
3927 vf + 1);
f9449ab7 3928
11ac75ed
SP
3929 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
3930 }
884476be
SK
3931
3932 if (BE3_chip(adapter))
3933 be_cmd_set_hsw_config(adapter, 0, 0,
3934 adapter->if_handle,
3935 PORT_FWD_TYPE_PASSTHRU, 0);
39f1d94d
SP
3936done:
3937 kfree(adapter->vf_cfg);
3938 adapter->num_vfs = 0;
f174c7ec 3939 adapter->flags &= ~BE_FLAGS_SRIOV_ENABLED;
6d87f5c3
AK
3940}
3941
7707133c
SP
3942static void be_clear_queues(struct be_adapter *adapter)
3943{
3944 be_mcc_queues_destroy(adapter);
3945 be_rx_cqs_destroy(adapter);
3946 be_tx_queues_destroy(adapter);
3947 be_evt_queues_destroy(adapter);
3948}
3949
68d7bdcb 3950static void be_cancel_worker(struct be_adapter *adapter)
a54769f5 3951{
191eb756
SP
3952 if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) {
3953 cancel_delayed_work_sync(&adapter->work);
3954 adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED;
3955 }
68d7bdcb
SP
3956}
3957
eb7dd46c
SP
3958static void be_cancel_err_detection(struct be_adapter *adapter)
3959{
710f3e59
SB
3960 struct be_error_recovery *err_rec = &adapter->error_recovery;
3961
3962 if (!be_err_recovery_workq)
3963 return;
3964
eb7dd46c 3965 if (adapter->flags & BE_FLAGS_ERR_DETECTION_SCHEDULED) {
710f3e59 3966 cancel_delayed_work_sync(&err_rec->err_detection_work);
eb7dd46c
SP
3967 adapter->flags &= ~BE_FLAGS_ERR_DETECTION_SCHEDULED;
3968 }
3969}
3970
8f0545d2
JK
3971/* VxLAN offload Notes:
3972 *
3973 * The stack defines tunnel offload flags (hw_enc_features) for IP and doesn't
3974 * distinguish various types of transports (VxLAN, GRE, NVGRE ..). So, offload
3975 * is expected to work across all types of IP tunnels once exported. Skyhawk
3976 * supports offloads for either VxLAN or NVGRE, exclusively. So we export VxLAN
3977 * offloads in hw_enc_features only when a VxLAN port is added. If other (non
3978 * VxLAN) tunnels are configured while VxLAN offloads are enabled, offloads for
3979 * those other tunnels are unexported on the fly through ndo_features_check().
3980 */
3981static int be_vxlan_set_port(struct net_device *netdev, unsigned int table,
3982 unsigned int entry, struct udp_tunnel_info *ti)
bf8d9dfb 3983{
8f0545d2 3984 struct be_adapter *adapter = netdev_priv(netdev);
bf8d9dfb 3985 struct device *dev = &adapter->pdev->dev;
bf8d9dfb
SB
3986 int status;
3987
bf8d9dfb
SB
3988 status = be_cmd_manage_iface(adapter, adapter->if_handle,
3989 OP_CONVERT_NORMAL_TO_TUNNEL);
3990 if (status) {
3991 dev_warn(dev, "Failed to convert normal interface to tunnel\n");
3992 return status;
3993 }
3994 adapter->flags |= BE_FLAGS_VXLAN_OFFLOADS;
3995
8f0545d2 3996 status = be_cmd_set_vxlan_port(adapter, ti->port);
bf8d9dfb
SB
3997 if (status) {
3998 dev_warn(dev, "Failed to add VxLAN port\n");
3999 return status;
4000 }
8f0545d2 4001 adapter->vxlan_port = ti->port;
bf8d9dfb
SB
4002
4003 netdev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
4004 NETIF_F_TSO | NETIF_F_TSO6 |
4005 NETIF_F_GSO_UDP_TUNNEL;
bf8d9dfb
SB
4006
4007 dev_info(dev, "Enabled VxLAN offloads for UDP port %d\n",
8f0545d2 4008 be16_to_cpu(ti->port));
bf8d9dfb
SB
4009 return 0;
4010}
4011
8f0545d2
JK
4012static int be_vxlan_unset_port(struct net_device *netdev, unsigned int table,
4013 unsigned int entry, struct udp_tunnel_info *ti)
c9c47142 4014{
8f0545d2 4015 struct be_adapter *adapter = netdev_priv(netdev);
630f4b70 4016
c9c47142
SP
4017 if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS)
4018 be_cmd_manage_iface(adapter, adapter->if_handle,
4019 OP_CONVERT_TUNNEL_TO_NORMAL);
4020
4021 if (adapter->vxlan_port)
4022 be_cmd_set_vxlan_port(adapter, 0);
4023
4024 adapter->flags &= ~BE_FLAGS_VXLAN_OFFLOADS;
4025 adapter->vxlan_port = 0;
630f4b70
SB
4026
4027 netdev->hw_enc_features = 0;
8f0545d2 4028 return 0;
c9c47142
SP
4029}
4030
8f0545d2
JK
4031static const struct udp_tunnel_nic_info be_udp_tunnels = {
4032 .set_port = be_vxlan_set_port,
4033 .unset_port = be_vxlan_unset_port,
4034 .flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP |
4035 UDP_TUNNEL_NIC_INFO_OPEN_ONLY,
4036 .tables = {
4037 { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN, },
4038 },
4039};
4040
b9263cbf
SR
4041static void be_calculate_vf_res(struct be_adapter *adapter, u16 num_vfs,
4042 struct be_resources *vft_res)
f2858738
VV
4043{
4044 struct be_resources res = adapter->pool_res;
b9263cbf
SR
4045 u32 vf_if_cap_flags = res.vf_if_cap_flags;
4046 struct be_resources res_mod = {0};
f2858738
VV
4047 u16 num_vf_qs = 1;
4048
de2b1e03
SK
4049 /* Distribute the queue resources among the PF and it's VFs */
4050 if (num_vfs) {
4051 /* Divide the rx queues evenly among the VFs and the PF, capped
4052 * at VF-EQ-count. Any remainder queues belong to the PF.
4053 */
ee9ad280
SB
4054 num_vf_qs = min(SH_VF_MAX_NIC_EQS,
4055 res.max_rss_qs / (num_vfs + 1));
f2858738 4056
de2b1e03
SK
4057 /* Skyhawk-R chip supports only MAX_PORT_RSS_TABLES
4058 * RSS Tables per port. Provide RSS on VFs, only if number of
4059 * VFs requested is less than it's PF Pool's RSS Tables limit.
f2858738 4060 */
de2b1e03 4061 if (num_vfs >= be_max_pf_pool_rss_tables(adapter))
f2858738
VV
4062 num_vf_qs = 1;
4063 }
b9263cbf
SR
4064
4065 /* Resource with fields set to all '1's by GET_PROFILE_CONFIG cmd,
4066 * which are modifiable using SET_PROFILE_CONFIG cmd.
4067 */
de2b1e03
SK
4068 be_cmd_get_profile_config(adapter, &res_mod, NULL, ACTIVE_PROFILE_TYPE,
4069 RESOURCE_MODIFIABLE, 0);
b9263cbf
SR
4070
4071 /* If RSS IFACE capability flags are modifiable for a VF, set the
4072 * capability flag as valid and set RSS and DEFQ_RSS IFACE flags if
4073 * more than 1 RSSQ is available for a VF.
4074 * Otherwise, provision only 1 queue pair for VF.
4075 */
4076 if (res_mod.vf_if_cap_flags & BE_IF_FLAGS_RSS) {
4077 vft_res->flags |= BIT(IF_CAPS_FLAGS_VALID_SHIFT);
4078 if (num_vf_qs > 1) {
4079 vf_if_cap_flags |= BE_IF_FLAGS_RSS;
4080 if (res.if_cap_flags & BE_IF_FLAGS_DEFQ_RSS)
4081 vf_if_cap_flags |= BE_IF_FLAGS_DEFQ_RSS;
4082 } else {
4083 vf_if_cap_flags &= ~(BE_IF_FLAGS_RSS |
4084 BE_IF_FLAGS_DEFQ_RSS);
4085 }
4086 } else {
4087 num_vf_qs = 1;
4088 }
4089
4090 if (res_mod.vf_if_cap_flags & BE_IF_FLAGS_VLAN_PROMISCUOUS) {
4091 vft_res->flags |= BIT(IF_CAPS_FLAGS_VALID_SHIFT);
4092 vf_if_cap_flags &= ~BE_IF_FLAGS_VLAN_PROMISCUOUS;
4093 }
4094
4095 vft_res->vf_if_cap_flags = vf_if_cap_flags;
4096 vft_res->max_rx_qs = num_vf_qs;
4097 vft_res->max_rss_qs = num_vf_qs;
4098 vft_res->max_tx_qs = res.max_tx_qs / (num_vfs + 1);
4099 vft_res->max_cq_count = res.max_cq_count / (num_vfs + 1);
4100
4101 /* Distribute unicast MACs, VLANs, IFACE count and MCCQ count equally
4102 * among the PF and it's VFs, if the fields are changeable
4103 */
4104 if (res_mod.max_uc_mac == FIELD_MODIFIABLE)
4105 vft_res->max_uc_mac = res.max_uc_mac / (num_vfs + 1);
4106
4107 if (res_mod.max_vlans == FIELD_MODIFIABLE)
4108 vft_res->max_vlans = res.max_vlans / (num_vfs + 1);
4109
4110 if (res_mod.max_iface_count == FIELD_MODIFIABLE)
4111 vft_res->max_iface_count = res.max_iface_count / (num_vfs + 1);
4112
4113 if (res_mod.max_mcc_count == FIELD_MODIFIABLE)
4114 vft_res->max_mcc_count = res.max_mcc_count / (num_vfs + 1);
f2858738
VV
4115}
4116
b7172414
SP
4117static void be_if_destroy(struct be_adapter *adapter)
4118{
4119 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
4120
4121 kfree(adapter->pmac_id);
4122 adapter->pmac_id = NULL;
4123
4124 kfree(adapter->mc_list);
4125 adapter->mc_list = NULL;
4126
4127 kfree(adapter->uc_list);
4128 adapter->uc_list = NULL;
4129}
4130
b05004ad
SK
4131static int be_clear(struct be_adapter *adapter)
4132{
f2858738 4133 struct pci_dev *pdev = adapter->pdev;
b9263cbf 4134 struct be_resources vft_res = {0};
f2858738 4135
68d7bdcb 4136 be_cancel_worker(adapter);
191eb756 4137
b7172414
SP
4138 flush_workqueue(be_wq);
4139
11ac75ed 4140 if (sriov_enabled(adapter))
f9449ab7
SP
4141 be_vf_clear(adapter);
4142
bec84e6b
VV
4143 /* Re-configure FW to distribute resources evenly across max-supported
4144 * number of VFs, only when VFs are not already enabled.
4145 */
ace40aff
VV
4146 if (skyhawk_chip(adapter) && be_physfn(adapter) &&
4147 !pci_vfs_assigned(pdev)) {
b9263cbf
SR
4148 be_calculate_vf_res(adapter,
4149 pci_sriov_get_totalvfs(pdev),
4150 &vft_res);
bec84e6b 4151 be_cmd_set_sriov_config(adapter, adapter->pool_res,
f2858738 4152 pci_sriov_get_totalvfs(pdev),
b9263cbf 4153 &vft_res);
f2858738 4154 }
bec84e6b 4155
8f0545d2 4156 be_vxlan_unset_port(adapter->netdev, 0, 0, NULL);
fbc13f01 4157
b7172414 4158 be_if_destroy(adapter);
a54769f5 4159
7707133c 4160 be_clear_queues(adapter);
a54769f5 4161
10ef9ab4 4162 be_msix_disable(adapter);
e1ad8e33 4163 adapter->flags &= ~BE_FLAGS_SETUP_DONE;
a54769f5
SP
4164 return 0;
4165}
4166
4c876616 4167static int be_vfs_if_create(struct be_adapter *adapter)
abb93951 4168{
92bf14ab 4169 struct be_resources res = {0};
bcc84140 4170 u32 cap_flags, en_flags, vf;
4c876616 4171 struct be_vf_cfg *vf_cfg;
0700d816 4172 int status;
abb93951 4173
0700d816 4174 /* If a FW profile exists, then cap_flags are updated */
c1bb0a55 4175 cap_flags = BE_VF_IF_EN_FLAGS;
abb93951 4176
4c876616 4177 for_all_vfs(adapter, vf_cfg, vf) {
92bf14ab 4178 if (!BE3_chip(adapter)) {
de2b1e03
SK
4179 status = be_cmd_get_profile_config(adapter, &res, NULL,
4180 ACTIVE_PROFILE_TYPE,
f2858738 4181 RESOURCE_LIMITS,
92bf14ab 4182 vf + 1);
435452aa 4183 if (!status) {
92bf14ab 4184 cap_flags = res.if_cap_flags;
435452aa
VV
4185 /* Prevent VFs from enabling VLAN promiscuous
4186 * mode
4187 */
4188 cap_flags &= ~BE_IF_FLAGS_VLAN_PROMISCUOUS;
4189 }
92bf14ab 4190 }
4c876616 4191
c1bb0a55
VD
4192 /* PF should enable IF flags during proxy if_create call */
4193 en_flags = cap_flags & BE_VF_IF_EN_FLAGS;
bcc84140
KA
4194 status = be_cmd_if_create(adapter, cap_flags, en_flags,
4195 &vf_cfg->if_handle, vf + 1);
4c876616 4196 if (status)
0700d816 4197 return status;
4c876616 4198 }
0700d816
KA
4199
4200 return 0;
abb93951
PR
4201}
4202
39f1d94d 4203static int be_vf_setup_init(struct be_adapter *adapter)
30128031 4204{
11ac75ed 4205 struct be_vf_cfg *vf_cfg;
30128031
SP
4206 int vf;
4207
39f1d94d
SP
4208 adapter->vf_cfg = kcalloc(adapter->num_vfs, sizeof(*vf_cfg),
4209 GFP_KERNEL);
4210 if (!adapter->vf_cfg)
4211 return -ENOMEM;
4212
11ac75ed
SP
4213 for_all_vfs(adapter, vf_cfg, vf) {
4214 vf_cfg->if_handle = -1;
4215 vf_cfg->pmac_id = -1;
30128031 4216 }
39f1d94d 4217 return 0;
30128031
SP
4218}
4219
f9449ab7
SP
4220static int be_vf_setup(struct be_adapter *adapter)
4221{
c502224e 4222 struct device *dev = &adapter->pdev->dev;
11ac75ed 4223 struct be_vf_cfg *vf_cfg;
4c876616 4224 int status, old_vfs, vf;
e7bcbd7b 4225 bool spoofchk;
39f1d94d 4226
257a3feb 4227 old_vfs = pci_num_vf(adapter->pdev);
39f1d94d
SP
4228
4229 status = be_vf_setup_init(adapter);
4230 if (status)
4231 goto err;
30128031 4232
4c876616
SP
4233 if (old_vfs) {
4234 for_all_vfs(adapter, vf_cfg, vf) {
4235 status = be_cmd_get_if_id(adapter, vf_cfg, vf);
4236 if (status)
4237 goto err;
4238 }
f9449ab7 4239
4c876616
SP
4240 status = be_vfs_mac_query(adapter);
4241 if (status)
4242 goto err;
4243 } else {
bec84e6b
VV
4244 status = be_vfs_if_create(adapter);
4245 if (status)
4246 goto err;
4247
39f1d94d
SP
4248 status = be_vf_eth_addr_config(adapter);
4249 if (status)
4250 goto err;
4251 }
f9449ab7 4252
11ac75ed 4253 for_all_vfs(adapter, vf_cfg, vf) {
04a06028 4254 /* Allow VFs to programs MAC/VLAN filters */
435452aa
VV
4255 status = be_cmd_get_fn_privileges(adapter, &vf_cfg->privileges,
4256 vf + 1);
4257 if (!status && !(vf_cfg->privileges & BE_PRIV_FILTMGMT)) {
04a06028 4258 status = be_cmd_set_fn_privileges(adapter,
435452aa 4259 vf_cfg->privileges |
04a06028
SP
4260 BE_PRIV_FILTMGMT,
4261 vf + 1);
435452aa
VV
4262 if (!status) {
4263 vf_cfg->privileges |= BE_PRIV_FILTMGMT;
04a06028
SP
4264 dev_info(dev, "VF%d has FILTMGMT privilege\n",
4265 vf);
435452aa 4266 }
04a06028
SP
4267 }
4268
0f77ba73
RN
4269 /* Allow full available bandwidth */
4270 if (!old_vfs)
4271 be_cmd_config_qos(adapter, 0, 0, vf + 1);
f1f3ee1b 4272
e7bcbd7b
KA
4273 status = be_cmd_get_hsw_config(adapter, NULL, vf + 1,
4274 vf_cfg->if_handle, NULL,
4275 &spoofchk);
4276 if (!status)
4277 vf_cfg->spoofchk = spoofchk;
4278
bdce2ad7 4279 if (!old_vfs) {
0599863d 4280 be_cmd_enable_vf(adapter, vf + 1);
bdce2ad7
SR
4281 be_cmd_set_logical_link_config(adapter,
4282 IFLA_VF_LINK_STATE_AUTO,
4283 vf+1);
4284 }
f9449ab7 4285 }
b4c1df93
SP
4286
4287 if (!old_vfs) {
4288 status = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
4289 if (status) {
4290 dev_err(dev, "SRIOV enable failed\n");
4291 adapter->num_vfs = 0;
4292 goto err;
4293 }
4294 }
f174c7ec 4295
884476be
SK
4296 if (BE3_chip(adapter)) {
4297 /* On BE3, enable VEB only when SRIOV is enabled */
4298 status = be_cmd_set_hsw_config(adapter, 0, 0,
4299 adapter->if_handle,
4300 PORT_FWD_TYPE_VEB, 0);
4301 if (status)
4302 goto err;
4303 }
4304
f174c7ec 4305 adapter->flags |= BE_FLAGS_SRIOV_ENABLED;
f9449ab7
SP
4306 return 0;
4307err:
4c876616
SP
4308 dev_err(dev, "VF setup failed\n");
4309 be_vf_clear(adapter);
f9449ab7
SP
4310 return status;
4311}
4312
f93f160b
VV
4313/* Converting function_mode bits on BE3 to SH mc_type enums */
4314
4315static u8 be_convert_mc_type(u32 function_mode)
4316{
66064dbc 4317 if (function_mode & VNIC_MODE && function_mode & QNQ_MODE)
f93f160b 4318 return vNIC1;
66064dbc 4319 else if (function_mode & QNQ_MODE)
f93f160b
VV
4320 return FLEX10;
4321 else if (function_mode & VNIC_MODE)
4322 return vNIC2;
4323 else if (function_mode & UMC_ENABLED)
4324 return UMC;
4325 else
4326 return MC_NONE;
4327}
4328
92bf14ab
SP
4329/* On BE2/BE3 FW does not suggest the supported limits */
4330static void BEx_get_resources(struct be_adapter *adapter,
4331 struct be_resources *res)
4332{
bec84e6b 4333 bool use_sriov = adapter->num_vfs ? 1 : 0;
92bf14ab
SP
4334
4335 if (be_physfn(adapter))
4336 res->max_uc_mac = BE_UC_PMAC_COUNT;
4337 else
4338 res->max_uc_mac = BE_VF_UC_PMAC_COUNT;
4339
f93f160b
VV
4340 adapter->mc_type = be_convert_mc_type(adapter->function_mode);
4341
4342 if (be_is_mc(adapter)) {
4343 /* Assuming that there are 4 channels per port,
4344 * when multi-channel is enabled
4345 */
4346 if (be_is_qnq_mode(adapter))
4347 res->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
4348 else
4349 /* In a non-qnq multichannel mode, the pvid
4350 * takes up one vlan entry
4351 */
4352 res->max_vlans = (BE_NUM_VLANS_SUPPORTED / 4) - 1;
4353 } else {
92bf14ab 4354 res->max_vlans = BE_NUM_VLANS_SUPPORTED;
f93f160b
VV
4355 }
4356
92bf14ab
SP
4357 res->max_mcast_mac = BE_MAX_MC;
4358
a5243dab
VV
4359 /* 1) For BE3 1Gb ports, FW does not support multiple TXQs
4360 * 2) Create multiple TX rings on a BE3-R multi-channel interface
4361 * *only* if it is RSS-capable.
4362 */
4363 if (BE2_chip(adapter) || use_sriov || (adapter->port_num > 1) ||
18c57c74
KA
4364 be_virtfn(adapter) ||
4365 (be_is_mc(adapter) &&
4366 !(adapter->function_caps & BE_FUNCTION_CAPS_RSS))) {
92bf14ab 4367 res->max_tx_qs = 1;
a28277dc
SR
4368 } else if (adapter->function_caps & BE_FUNCTION_CAPS_SUPER_NIC) {
4369 struct be_resources super_nic_res = {0};
4370
4371 /* On a SuperNIC profile, the driver needs to use the
4372 * GET_PROFILE_CONFIG cmd to query the per-function TXQ limits
4373 */
de2b1e03
SK
4374 be_cmd_get_profile_config(adapter, &super_nic_res, NULL,
4375 ACTIVE_PROFILE_TYPE, RESOURCE_LIMITS,
4376 0);
a28277dc
SR
4377 /* Some old versions of BE3 FW don't report max_tx_qs value */
4378 res->max_tx_qs = super_nic_res.max_tx_qs ? : BE3_MAX_TX_QS;
4379 } else {
92bf14ab 4380 res->max_tx_qs = BE3_MAX_TX_QS;
a28277dc 4381 }
92bf14ab
SP
4382
4383 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
4384 !use_sriov && be_physfn(adapter))
4385 res->max_rss_qs = (adapter->be3_native) ?
4386 BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
4387 res->max_rx_qs = res->max_rss_qs + 1;
4388
e3dc867c 4389 if (be_physfn(adapter))
d3518e21 4390 res->max_evt_qs = (be_max_vfs(adapter) > 0) ?
e3dc867c
SR
4391 BE3_SRIOV_MAX_EVT_QS : BE3_MAX_EVT_QS;
4392 else
4393 res->max_evt_qs = 1;
92bf14ab
SP
4394
4395 res->if_cap_flags = BE_IF_CAP_FLAGS_WANT;
71bb8bd0 4396 res->if_cap_flags &= ~BE_IF_FLAGS_DEFQ_RSS;
92bf14ab
SP
4397 if (!(adapter->function_caps & BE_FUNCTION_CAPS_RSS))
4398 res->if_cap_flags &= ~BE_IF_FLAGS_RSS;
4399}
4400
30128031
SP
4401static void be_setup_init(struct be_adapter *adapter)
4402{
4403 adapter->vlan_prio_bmap = 0xff;
42f11cf2 4404 adapter->phy.link_speed = -1;
30128031
SP
4405 adapter->if_handle = -1;
4406 adapter->be3_native = false;
f66b7cfd 4407 adapter->if_flags = 0;
51d1f98a 4408 adapter->phy_state = BE_UNKNOWN_PHY_STATE;
f25b119c
PR
4409 if (be_physfn(adapter))
4410 adapter->cmd_privileges = MAX_PRIVILEGES;
4411 else
4412 adapter->cmd_privileges = MIN_PRIVILEGES;
30128031
SP
4413}
4414
de2b1e03
SK
4415/* HW supports only MAX_PORT_RSS_TABLES RSS Policy Tables per port.
4416 * However, this HW limitation is not exposed to the host via any SLI cmd.
4417 * As a result, in the case of SRIOV and in particular multi-partition configs
4418 * the driver needs to calcuate a proportional share of RSS Tables per PF-pool
4419 * for distribution between the VFs. This self-imposed limit will determine the
4420 * no: of VFs for which RSS can be enabled.
4421 */
d766e7e6 4422static void be_calculate_pf_pool_rss_tables(struct be_adapter *adapter)
de2b1e03
SK
4423{
4424 struct be_port_resources port_res = {0};
4425 u8 rss_tables_on_port;
4426 u16 max_vfs = be_max_vfs(adapter);
4427
4428 be_cmd_get_profile_config(adapter, NULL, &port_res, SAVED_PROFILE_TYPE,
4429 RESOURCE_LIMITS, 0);
4430
4431 rss_tables_on_port = MAX_PORT_RSS_TABLES - port_res.nic_pfs;
4432
4433 /* Each PF Pool's RSS Tables limit =
4434 * PF's Max VFs / Total_Max_VFs on Port * RSS Tables on Port
4435 */
4436 adapter->pool_res.max_rss_tables =
4437 max_vfs * rss_tables_on_port / port_res.max_vfs;
4438}
4439
bec84e6b
VV
4440static int be_get_sriov_config(struct be_adapter *adapter)
4441{
bec84e6b 4442 struct be_resources res = {0};
d3d18312 4443 int max_vfs, old_vfs;
bec84e6b 4444
de2b1e03
SK
4445 be_cmd_get_profile_config(adapter, &res, NULL, ACTIVE_PROFILE_TYPE,
4446 RESOURCE_LIMITS, 0);
d3d18312 4447
ace40aff 4448 /* Some old versions of BE3 FW don't report max_vfs value */
bec84e6b
VV
4449 if (BE3_chip(adapter) && !res.max_vfs) {
4450 max_vfs = pci_sriov_get_totalvfs(adapter->pdev);
4451 res.max_vfs = max_vfs > 0 ? min(MAX_VFS, max_vfs) : 0;
4452 }
4453
d3d18312 4454 adapter->pool_res = res;
bec84e6b 4455
ace40aff
VV
4456 /* If during previous unload of the driver, the VFs were not disabled,
4457 * then we cannot rely on the PF POOL limits for the TotalVFs value.
4458 * Instead use the TotalVFs value stored in the pci-dev struct.
4459 */
bec84e6b
VV
4460 old_vfs = pci_num_vf(adapter->pdev);
4461 if (old_vfs) {
ace40aff
VV
4462 dev_info(&adapter->pdev->dev, "%d VFs are already enabled\n",
4463 old_vfs);
4464
4465 adapter->pool_res.max_vfs =
4466 pci_sriov_get_totalvfs(adapter->pdev);
bec84e6b 4467 adapter->num_vfs = old_vfs;
bec84e6b
VV
4468 }
4469
de2b1e03
SK
4470 if (skyhawk_chip(adapter) && be_max_vfs(adapter) && !old_vfs) {
4471 be_calculate_pf_pool_rss_tables(adapter);
4472 dev_info(&adapter->pdev->dev,
4473 "RSS can be enabled for all VFs if num_vfs <= %d\n",
4474 be_max_pf_pool_rss_tables(adapter));
4475 }
bec84e6b
VV
4476 return 0;
4477}
4478
ace40aff
VV
4479static void be_alloc_sriov_res(struct be_adapter *adapter)
4480{
4481 int old_vfs = pci_num_vf(adapter->pdev);
b9263cbf 4482 struct be_resources vft_res = {0};
ace40aff
VV
4483 int status;
4484
4485 be_get_sriov_config(adapter);
4486
4487 if (!old_vfs)
4488 pci_sriov_set_totalvfs(adapter->pdev, be_max_vfs(adapter));
4489
4490 /* When the HW is in SRIOV capable configuration, the PF-pool
4491 * resources are given to PF during driver load, if there are no
4492 * old VFs. This facility is not available in BE3 FW.
4493 * Also, this is done by FW in Lancer chip.
4494 */
4495 if (skyhawk_chip(adapter) && be_max_vfs(adapter) && !old_vfs) {
b9263cbf 4496 be_calculate_vf_res(adapter, 0, &vft_res);
ace40aff 4497 status = be_cmd_set_sriov_config(adapter, adapter->pool_res, 0,
b9263cbf 4498 &vft_res);
ace40aff
VV
4499 if (status)
4500 dev_err(&adapter->pdev->dev,
4501 "Failed to optimize SRIOV resources\n");
4502 }
4503}
4504
92bf14ab 4505static int be_get_resources(struct be_adapter *adapter)
abb93951 4506{
92bf14ab
SP
4507 struct device *dev = &adapter->pdev->dev;
4508 struct be_resources res = {0};
4509 int status;
abb93951 4510
92bf14ab
SP
4511 /* For Lancer, SH etc read per-function resource limits from FW.
4512 * GET_FUNC_CONFIG returns per function guaranteed limits.
4513 * GET_PROFILE_CONFIG returns PCI-E related limits PF-pool limits
4514 */
ce7faf0a
SP
4515 if (BEx_chip(adapter)) {
4516 BEx_get_resources(adapter, &res);
4517 } else {
92bf14ab
SP
4518 status = be_cmd_get_func_config(adapter, &res);
4519 if (status)
4520 return status;
abb93951 4521
71bb8bd0
VV
4522 /* If a deafault RXQ must be created, we'll use up one RSSQ*/
4523 if (res.max_rss_qs && res.max_rss_qs == res.max_rx_qs &&
4524 !(res.if_cap_flags & BE_IF_FLAGS_DEFQ_RSS))
4525 res.max_rss_qs -= 1;
abb93951 4526 }
4c876616 4527
ce7faf0a
SP
4528 /* If RoCE is supported stash away half the EQs for RoCE */
4529 res.max_nic_evt_qs = be_roce_supported(adapter) ?
4530 res.max_evt_qs / 2 : res.max_evt_qs;
4531 adapter->res = res;
4532
71bb8bd0
VV
4533 /* If FW supports RSS default queue, then skip creating non-RSS
4534 * queue for non-IP traffic.
4535 */
4536 adapter->need_def_rxq = (be_if_cap_flags(adapter) &
4537 BE_IF_FLAGS_DEFQ_RSS) ? 0 : 1;
4538
acbafeb1
SP
4539 dev_info(dev, "Max: txqs %d, rxqs %d, rss %d, eqs %d, vfs %d\n",
4540 be_max_txqs(adapter), be_max_rxqs(adapter),
ce7faf0a 4541 be_max_rss(adapter), be_max_nic_eqs(adapter),
acbafeb1
SP
4542 be_max_vfs(adapter));
4543 dev_info(dev, "Max: uc-macs %d, mc-macs %d, vlans %d\n",
4544 be_max_uc(adapter), be_max_mc(adapter),
4545 be_max_vlans(adapter));
4546
e261768e
SP
4547 /* Ensure RX and TX queues are created in pairs at init time */
4548 adapter->cfg_num_rx_irqs =
4549 min_t(u16, netif_get_num_default_rss_queues(),
4550 be_max_qp_irqs(adapter));
4551 adapter->cfg_num_tx_irqs = adapter->cfg_num_rx_irqs;
92bf14ab 4552 return 0;
abb93951
PR
4553}
4554
39f1d94d
SP
4555static int be_get_config(struct be_adapter *adapter)
4556{
6b085ba9 4557 int status, level;
542963b7 4558 u16 profile_id;
6b085ba9 4559
980df249
SR
4560 status = be_cmd_get_cntl_attributes(adapter);
4561 if (status)
4562 return status;
4563
e97e3cda 4564 status = be_cmd_query_fw_cfg(adapter);
abb93951 4565 if (status)
92bf14ab 4566 return status;
abb93951 4567
fd7ff6f0
VD
4568 if (!lancer_chip(adapter) && be_physfn(adapter))
4569 be_cmd_get_fat_dump_len(adapter, &adapter->fat_dump_len);
4570
6b085ba9
SP
4571 if (BEx_chip(adapter)) {
4572 level = be_cmd_get_fw_log_level(adapter);
4573 adapter->msg_enable =
4574 level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0;
4575 }
4576
4577 be_cmd_get_acpi_wol_cap(adapter);
45f13df7
SB
4578 pci_enable_wake(adapter->pdev, PCI_D3hot, adapter->wol_en);
4579 pci_enable_wake(adapter->pdev, PCI_D3cold, adapter->wol_en);
6b085ba9 4580
21252377
VV
4581 be_cmd_query_port_name(adapter);
4582
4583 if (be_physfn(adapter)) {
542963b7
VV
4584 status = be_cmd_get_active_profile(adapter, &profile_id);
4585 if (!status)
4586 dev_info(&adapter->pdev->dev,
4587 "Using profile 0x%x\n", profile_id);
962bcb75 4588 }
bec84e6b 4589
92bf14ab 4590 return 0;
39f1d94d
SP
4591}
4592
95046b92
SP
4593static int be_mac_setup(struct be_adapter *adapter)
4594{
4595 u8 mac[ETH_ALEN];
4596 int status;
4597
4598 if (is_zero_ether_addr(adapter->netdev->dev_addr)) {
4599 status = be_cmd_get_perm_mac(adapter, mac);
4600 if (status)
4601 return status;
4602
a96d317f 4603 eth_hw_addr_set(adapter->netdev, mac);
95046b92 4604 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
4993b39a
IV
4605
4606 /* Initial MAC for BE3 VFs is already programmed by PF */
4607 if (BEx_chip(adapter) && be_virtfn(adapter))
4608 memcpy(adapter->dev_mac, mac, ETH_ALEN);
95046b92
SP
4609 }
4610
95046b92
SP
4611 return 0;
4612}
4613
68d7bdcb
SP
4614static void be_schedule_worker(struct be_adapter *adapter)
4615{
b7172414 4616 queue_delayed_work(be_wq, &adapter->work, msecs_to_jiffies(1000));
68d7bdcb
SP
4617 adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
4618}
4619
710f3e59
SB
4620static void be_destroy_err_recovery_workq(void)
4621{
4622 if (!be_err_recovery_workq)
4623 return;
4624
710f3e59
SB
4625 destroy_workqueue(be_err_recovery_workq);
4626 be_err_recovery_workq = NULL;
4627}
4628
972f37b4 4629static void be_schedule_err_detection(struct be_adapter *adapter, u32 delay)
eb7dd46c 4630{
710f3e59
SB
4631 struct be_error_recovery *err_rec = &adapter->error_recovery;
4632
4633 if (!be_err_recovery_workq)
4634 return;
4635
4636 queue_delayed_work(be_err_recovery_workq, &err_rec->err_detection_work,
4637 msecs_to_jiffies(delay));
eb7dd46c
SP
4638 adapter->flags |= BE_FLAGS_ERR_DETECTION_SCHEDULED;
4639}
4640
7707133c 4641static int be_setup_queues(struct be_adapter *adapter)
5fb379ee 4642{
68d7bdcb 4643 struct net_device *netdev = adapter->netdev;
10ef9ab4 4644 int status;
ba343c77 4645
7707133c 4646 status = be_evt_queues_create(adapter);
abb93951
PR
4647 if (status)
4648 goto err;
73d540f2 4649
7707133c 4650 status = be_tx_qs_create(adapter);
c2bba3df
SK
4651 if (status)
4652 goto err;
10ef9ab4 4653
7707133c 4654 status = be_rx_cqs_create(adapter);
10ef9ab4 4655 if (status)
a54769f5 4656 goto err;
6b7c5b94 4657
7707133c 4658 status = be_mcc_queues_create(adapter);
10ef9ab4
SP
4659 if (status)
4660 goto err;
4661
68d7bdcb
SP
4662 status = netif_set_real_num_rx_queues(netdev, adapter->num_rx_qs);
4663 if (status)
4664 goto err;
4665
4666 status = netif_set_real_num_tx_queues(netdev, adapter->num_tx_qs);
4667 if (status)
4668 goto err;
4669
7707133c
SP
4670 return 0;
4671err:
4672 dev_err(&adapter->pdev->dev, "queue_setup failed\n");
4673 return status;
4674}
4675
62219066
AK
4676static int be_if_create(struct be_adapter *adapter)
4677{
4678 u32 en_flags = BE_IF_FLAGS_RSS | BE_IF_FLAGS_DEFQ_RSS;
4679 u32 cap_flags = be_if_cap_flags(adapter);
62219066 4680
b7172414
SP
4681 /* alloc required memory for other filtering fields */
4682 adapter->pmac_id = kcalloc(be_max_uc(adapter),
4683 sizeof(*adapter->pmac_id), GFP_KERNEL);
4684 if (!adapter->pmac_id)
4685 return -ENOMEM;
4686
4687 adapter->mc_list = kcalloc(be_max_mc(adapter),
4688 sizeof(*adapter->mc_list), GFP_KERNEL);
4689 if (!adapter->mc_list)
4690 return -ENOMEM;
4691
4692 adapter->uc_list = kcalloc(be_max_uc(adapter),
4693 sizeof(*adapter->uc_list), GFP_KERNEL);
4694 if (!adapter->uc_list)
4695 return -ENOMEM;
4696
e261768e 4697 if (adapter->cfg_num_rx_irqs == 1)
62219066
AK
4698 cap_flags &= ~(BE_IF_FLAGS_DEFQ_RSS | BE_IF_FLAGS_RSS);
4699
4700 en_flags &= cap_flags;
4701 /* will enable all the needed filter flags in be_open() */
dd0e7aab 4702 return be_cmd_if_create(adapter, be_if_cap_flags(adapter), en_flags,
62219066 4703 &adapter->if_handle, 0);
62219066
AK
4704}
4705
68d7bdcb
SP
4706int be_update_queues(struct be_adapter *adapter)
4707{
4708 struct net_device *netdev = adapter->netdev;
4709 int status;
4710
7429c6c0 4711 if (netif_running(netdev)) {
ffd342e0
BP
4712 /* be_tx_timeout() must not run concurrently with this
4713 * function, synchronize with an already-running dev_watchdog
4714 */
4715 netif_tx_lock_bh(netdev);
7429c6c0
BP
4716 /* device cannot transmit now, avoid dev_watchdog timeouts */
4717 netif_carrier_off(netdev);
ffd342e0 4718 netif_tx_unlock_bh(netdev);
7429c6c0 4719
68d7bdcb 4720 be_close(netdev);
7429c6c0 4721 }
68d7bdcb
SP
4722
4723 be_cancel_worker(adapter);
4724
4725 /* If any vectors have been shared with RoCE we cannot re-program
4726 * the MSIx table.
4727 */
4728 if (!adapter->num_msix_roce_vec)
4729 be_msix_disable(adapter);
4730
4731 be_clear_queues(adapter);
62219066
AK
4732 status = be_cmd_if_destroy(adapter, adapter->if_handle, 0);
4733 if (status)
4734 return status;
68d7bdcb
SP
4735
4736 if (!msix_enabled(adapter)) {
4737 status = be_msix_enable(adapter);
4738 if (status)
4739 return status;
4740 }
4741
62219066
AK
4742 status = be_if_create(adapter);
4743 if (status)
4744 return status;
4745
68d7bdcb
SP
4746 status = be_setup_queues(adapter);
4747 if (status)
4748 return status;
4749
4750 be_schedule_worker(adapter);
4751
5f834cf4 4752 /* The IF was destroyed and re-created. We need to clear
52acf064
IV
4753 * all promiscuous flags valid for the destroyed IF.
4754 * Without this promisc mode is not restored during
4755 * be_open() because the driver thinks that it is
4756 * already enabled in HW.
4757 */
4758 adapter->if_flags &= ~BE_IF_FLAGS_ALL_PROMISCUOUS;
4759
68d7bdcb
SP
4760 if (netif_running(netdev))
4761 status = be_open(netdev);
4762
4763 return status;
4764}
4765
f7062ee5
SP
4766static inline int fw_major_num(const char *fw_ver)
4767{
4768 int fw_major = 0, i;
4769
4770 i = sscanf(fw_ver, "%d.", &fw_major);
4771 if (i != 1)
4772 return 0;
4773
4774 return fw_major;
4775}
4776
710f3e59
SB
4777/* If it is error recovery, FLR the PF
4778 * Else if any VFs are already enabled don't FLR the PF
4779 */
f962f840
SP
4780static bool be_reset_required(struct be_adapter *adapter)
4781{
710f3e59
SB
4782 if (be_error_recovering(adapter))
4783 return true;
4784 else
4785 return pci_num_vf(adapter->pdev) == 0;
f962f840
SP
4786}
4787
4788/* Wait for the FW to be ready and perform the required initialization */
4789static int be_func_init(struct be_adapter *adapter)
4790{
4791 int status;
4792
4793 status = be_fw_wait_ready(adapter);
4794 if (status)
4795 return status;
4796
710f3e59
SB
4797 /* FW is now ready; clear errors to allow cmds/doorbell */
4798 be_clear_error(adapter, BE_CLEAR_ALL);
4799
f962f840
SP
4800 if (be_reset_required(adapter)) {
4801 status = be_cmd_reset_function(adapter);
4802 if (status)
4803 return status;
4804
4805 /* Wait for interrupts to quiesce after an FLR */
4806 msleep(100);
f962f840
SP
4807 }
4808
4809 /* Tell FW we're ready to fire cmds */
4810 status = be_cmd_fw_init(adapter);
4811 if (status)
4812 return status;
4813
4814 /* Allow interrupts for other ULPs running on NIC function */
4815 be_intr_set(adapter, true);
4816
4817 return 0;
4818}
4819
7707133c
SP
4820static int be_setup(struct be_adapter *adapter)
4821{
4822 struct device *dev = &adapter->pdev->dev;
7707133c
SP
4823 int status;
4824
f962f840
SP
4825 status = be_func_init(adapter);
4826 if (status)
4827 return status;
4828
7707133c
SP
4829 be_setup_init(adapter);
4830
4831 if (!lancer_chip(adapter))
4832 be_cmd_req_native_mode(adapter);
4833
980df249
SR
4834 /* invoke this cmd first to get pf_num and vf_num which are needed
4835 * for issuing profile related cmds
4836 */
4837 if (!BEx_chip(adapter)) {
4838 status = be_cmd_get_func_config(adapter, NULL);
4839 if (status)
4840 return status;
4841 }
72ef3a88 4842
de2b1e03
SK
4843 status = be_get_config(adapter);
4844 if (status)
4845 goto err;
4846
ace40aff
VV
4847 if (!BE2_chip(adapter) && be_physfn(adapter))
4848 be_alloc_sriov_res(adapter);
4849
de2b1e03 4850 status = be_get_resources(adapter);
10ef9ab4 4851 if (status)
a54769f5 4852 goto err;
6b7c5b94 4853
7707133c 4854 status = be_msix_enable(adapter);
10ef9ab4 4855 if (status)
a54769f5 4856 goto err;
6b7c5b94 4857
bcc84140 4858 /* will enable all the needed filter flags in be_open() */
62219066 4859 status = be_if_create(adapter);
7707133c 4860 if (status)
a54769f5 4861 goto err;
6b7c5b94 4862
68d7bdcb
SP
4863 /* Updating real_num_tx/rx_queues() requires rtnl_lock() */
4864 rtnl_lock();
7707133c 4865 status = be_setup_queues(adapter);
68d7bdcb 4866 rtnl_unlock();
95046b92 4867 if (status)
1578e777
PR
4868 goto err;
4869
7707133c 4870 be_cmd_get_fn_privileges(adapter, &adapter->cmd_privileges, 0);
7707133c
SP
4871
4872 status = be_mac_setup(adapter);
10ef9ab4
SP
4873 if (status)
4874 goto err;
4875
e97e3cda 4876 be_cmd_get_fw_ver(adapter);
acbafeb1 4877 dev_info(dev, "FW version is %s\n", adapter->fw_ver);
5a56eb10 4878
e9e2a904 4879 if (BE2_chip(adapter) && fw_major_num(adapter->fw_ver) < 4) {
50762667 4880 dev_err(dev, "Firmware on card is old(%s), IRQs may not work",
e9e2a904
SK
4881 adapter->fw_ver);
4882 dev_err(dev, "Please upgrade firmware to version >= 4.0\n");
4883 }
4884
00d594c3
KA
4885 status = be_cmd_set_flow_control(adapter, adapter->tx_fc,
4886 adapter->rx_fc);
4887 if (status)
4888 be_cmd_get_flow_control(adapter, &adapter->tx_fc,
4889 &adapter->rx_fc);
590c391d 4890
00d594c3
KA
4891 dev_info(&adapter->pdev->dev, "HW Flow control - TX:%d RX:%d\n",
4892 adapter->tx_fc, adapter->rx_fc);
2dc1deb6 4893
bdce2ad7
SR
4894 if (be_physfn(adapter))
4895 be_cmd_set_logical_link_config(adapter,
4896 IFLA_VF_LINK_STATE_AUTO, 0);
4897
884476be
SK
4898 /* BE3 EVB echoes broadcast/multicast packets back to PF's vport
4899 * confusing a linux bridge or OVS that it might be connected to.
4900 * Set the EVB to PASSTHRU mode which effectively disables the EVB
4901 * when SRIOV is not enabled.
4902 */
4903 if (BE3_chip(adapter))
4904 be_cmd_set_hsw_config(adapter, 0, 0, adapter->if_handle,
4905 PORT_FWD_TYPE_PASSTHRU, 0);
4906
bec84e6b
VV
4907 if (adapter->num_vfs)
4908 be_vf_setup(adapter);
f9449ab7 4909
f25b119c
PR
4910 status = be_cmd_get_phy_info(adapter);
4911 if (!status && be_pause_supported(adapter))
42f11cf2
AK
4912 adapter->phy.fc_autoneg = 1;
4913
710f3e59
SB
4914 if (be_physfn(adapter) && !lancer_chip(adapter))
4915 be_cmd_set_features(adapter);
4916
68d7bdcb 4917 be_schedule_worker(adapter);
e1ad8e33 4918 adapter->flags |= BE_FLAGS_SETUP_DONE;
f9449ab7 4919 return 0;
a54769f5
SP
4920err:
4921 be_clear(adapter);
4922 return status;
4923}
6b7c5b94 4924
66268739
IV
4925#ifdef CONFIG_NET_POLL_CONTROLLER
4926static void be_netpoll(struct net_device *netdev)
4927{
4928 struct be_adapter *adapter = netdev_priv(netdev);
10ef9ab4 4929 struct be_eq_obj *eqo;
66268739
IV
4930 int i;
4931
e49cc34f 4932 for_all_evt_queues(adapter, eqo, i) {
20947770 4933 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0, 0);
e49cc34f
SP
4934 napi_schedule(&eqo->napi);
4935 }
66268739
IV
4936}
4937#endif
4938
485bf569
SN
4939int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
4940{
4941 const struct firmware *fw;
4942 int status;
4943
4944 if (!netif_running(adapter->netdev)) {
4945 dev_err(&adapter->pdev->dev,
4946 "Firmware load not allowed (interface is down)\n");
940a3fcd 4947 return -ENETDOWN;
485bf569
SN
4948 }
4949
4950 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
4951 if (status)
4952 goto fw_exit;
4953
4954 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
4955
4956 if (lancer_chip(adapter))
4957 status = lancer_fw_download(adapter, fw);
4958 else
4959 status = be_fw_download(adapter, fw);
4960
eeb65ced 4961 if (!status)
e97e3cda 4962 be_cmd_get_fw_ver(adapter);
eeb65ced 4963
84517482
AK
4964fw_exit:
4965 release_firmware(fw);
4966 return status;
4967}
4968
add511b3 4969static int be_ndo_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
2fd527b7 4970 u16 flags, struct netlink_ext_ack *extack)
a77dcb8c
AK
4971{
4972 struct be_adapter *adapter = netdev_priv(dev);
4973 struct nlattr *attr, *br_spec;
4974 int rem;
4975 int status = 0;
4976 u16 mode = 0;
4977
4978 if (!sriov_enabled(adapter))
4979 return -EOPNOTSUPP;
4980
4981 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
4ea85e83
TG
4982 if (!br_spec)
4983 return -EINVAL;
a77dcb8c 4984
e8058a49 4985 nla_for_each_nested_type(attr, IFLA_BRIDGE_MODE, br_spec, rem) {
a77dcb8c 4986 mode = nla_get_u16(attr);
ac0f5fba
SR
4987 if (BE3_chip(adapter) && mode == BRIDGE_MODE_VEPA)
4988 return -EOPNOTSUPP;
4989
a77dcb8c
AK
4990 if (mode != BRIDGE_MODE_VEPA && mode != BRIDGE_MODE_VEB)
4991 return -EINVAL;
4992
4993 status = be_cmd_set_hsw_config(adapter, 0, 0,
4994 adapter->if_handle,
4995 mode == BRIDGE_MODE_VEPA ?
4996 PORT_FWD_TYPE_VEPA :
e7bcbd7b 4997 PORT_FWD_TYPE_VEB, 0);
a77dcb8c
AK
4998 if (status)
4999 goto err;
5000
5001 dev_info(&adapter->pdev->dev, "enabled switch mode: %s\n",
5002 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
5003
5004 return status;
5005 }
5006err:
5007 dev_err(&adapter->pdev->dev, "Failed to set switch mode %s\n",
5008 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
5009
5010 return status;
5011}
5012
5013static int be_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
46c264da
ND
5014 struct net_device *dev, u32 filter_mask,
5015 int nlflags)
a77dcb8c
AK
5016{
5017 struct be_adapter *adapter = netdev_priv(dev);
5018 int status = 0;
5019 u8 hsw_mode;
5020
a77dcb8c
AK
5021 /* BE and Lancer chips support VEB mode only */
5022 if (BEx_chip(adapter) || lancer_chip(adapter)) {
8431706b
IV
5023 /* VEB is disabled in non-SR-IOV profiles on BE3/Lancer */
5024 if (!pci_sriov_get_totalvfs(adapter->pdev))
5025 return 0;
a77dcb8c
AK
5026 hsw_mode = PORT_FWD_TYPE_VEB;
5027 } else {
5028 status = be_cmd_get_hsw_config(adapter, NULL, 0,
e7bcbd7b
KA
5029 adapter->if_handle, &hsw_mode,
5030 NULL);
a77dcb8c
AK
5031 if (status)
5032 return 0;
ff9ed19d
KP
5033
5034 if (hsw_mode == PORT_FWD_TYPE_PASSTHRU)
5035 return 0;
a77dcb8c
AK
5036 }
5037
5038 return ndo_dflt_bridge_getlink(skb, pid, seq, dev,
5039 hsw_mode == PORT_FWD_TYPE_VEPA ?
2c3c031c 5040 BRIDGE_MODE_VEPA : BRIDGE_MODE_VEB,
7d4f8d87 5041 0, 0, nlflags, filter_mask, NULL);
a77dcb8c
AK
5042}
5043
b7172414
SP
5044static struct be_cmd_work *be_alloc_work(struct be_adapter *adapter,
5045 void (*func)(struct work_struct *))
5046{
5047 struct be_cmd_work *work;
5048
5049 work = kzalloc(sizeof(*work), GFP_ATOMIC);
5050 if (!work) {
5051 dev_err(&adapter->pdev->dev,
5052 "be_work memory allocation failed\n");
5053 return NULL;
5054 }
5055
5056 INIT_WORK(&work->work, func);
5057 work->adapter = adapter;
5058 return work;
5059}
5060
5f35227e
JG
5061static netdev_features_t be_features_check(struct sk_buff *skb,
5062 struct net_device *dev,
5063 netdev_features_t features)
725d548f 5064{
16dde0d6
SB
5065 struct be_adapter *adapter = netdev_priv(dev);
5066 u8 l4_hdr = 0;
5067
822f8565
SR
5068 if (skb_is_gso(skb)) {
5069 /* IPv6 TSO requests with extension hdrs are a problem
5070 * to Lancer and BE3 HW. Disable TSO6 feature.
5071 */
5072 if (!skyhawk_chip(adapter) && is_ipv6_ext_hdr(skb))
5073 features &= ~NETIF_F_TSO6;
5074
5075 /* Lancer cannot handle the packet with MSS less than 256.
3df40aad 5076 * Also it can't handle a TSO packet with a single segment
822f8565
SR
5077 * Disable the GSO support in such cases
5078 */
3df40aad
SR
5079 if (lancer_chip(adapter) &&
5080 (skb_shinfo(skb)->gso_size < 256 ||
5081 skb_shinfo(skb)->gso_segs == 1))
822f8565
SR
5082 features &= ~NETIF_F_GSO_MASK;
5083 }
5084
cc6e9de6
VY
5085 /* The code below restricts offload features for some tunneled and
5086 * Q-in-Q packets.
16dde0d6
SB
5087 * Offload features for normal (non tunnel) packets are unchanged.
5088 */
cc6e9de6 5089 features = vlan_features_check(skb, features);
16dde0d6
SB
5090 if (!skb->encapsulation ||
5091 !(adapter->flags & BE_FLAGS_VXLAN_OFFLOADS))
5092 return features;
5093
5094 /* It's an encapsulated packet and VxLAN offloads are enabled. We
5095 * should disable tunnel offload features if it's not a VxLAN packet,
5096 * as tunnel offloads have been enabled only for VxLAN. This is done to
5097 * allow other tunneled traffic like GRE work fine while VxLAN
5098 * offloads are configured in Skyhawk-R.
5099 */
5100 switch (vlan_get_protocol(skb)) {
5101 case htons(ETH_P_IP):
5102 l4_hdr = ip_hdr(skb)->protocol;
5103 break;
5104 case htons(ETH_P_IPV6):
5105 l4_hdr = ipv6_hdr(skb)->nexthdr;
5106 break;
5107 default:
5108 return features;
5109 }
5110
5111 if (l4_hdr != IPPROTO_UDP ||
5112 skb->inner_protocol_type != ENCAP_TYPE_ETHER ||
5113 skb->inner_protocol != htons(ETH_P_TEB) ||
5114 skb_inner_mac_header(skb) - skb_transport_header(skb) !=
096de2f8
SD
5115 sizeof(struct udphdr) + sizeof(struct vxlanhdr) ||
5116 !adapter->vxlan_port ||
5117 udp_hdr(skb)->dest != adapter->vxlan_port)
a188222b 5118 return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
16dde0d6
SB
5119
5120 return features;
725d548f 5121}
c9c47142 5122
a155a5db
SB
5123static int be_get_phys_port_id(struct net_device *dev,
5124 struct netdev_phys_item_id *ppid)
5125{
5126 int i, id_len = CNTL_SERIAL_NUM_WORDS * CNTL_SERIAL_NUM_WORD_SZ + 1;
5127 struct be_adapter *adapter = netdev_priv(dev);
5128 u8 *id;
5129
5130 if (MAX_PHYS_ITEM_ID_LEN < id_len)
5131 return -ENOSPC;
5132
5133 ppid->id[0] = adapter->hba_port_num + 1;
5134 id = &ppid->id[1];
5135 for (i = CNTL_SERIAL_NUM_WORDS - 1; i >= 0;
5136 i--, id += CNTL_SERIAL_NUM_WORD_SZ)
5137 memcpy(id, &adapter->serial_num[i], CNTL_SERIAL_NUM_WORD_SZ);
5138
5139 ppid->id_len = id_len;
5140
5141 return 0;
5142}
5143
b7172414
SP
5144static void be_set_rx_mode(struct net_device *dev)
5145{
5146 struct be_adapter *adapter = netdev_priv(dev);
5147 struct be_cmd_work *work;
5148
5149 work = be_alloc_work(adapter, be_work_set_rx_mode);
5150 if (work)
5151 queue_work(be_wq, &work->work);
5152}
5153
e5686ad8 5154static const struct net_device_ops be_netdev_ops = {
6b7c5b94
SP
5155 .ndo_open = be_open,
5156 .ndo_stop = be_close,
5157 .ndo_start_xmit = be_xmit,
a54769f5 5158 .ndo_set_rx_mode = be_set_rx_mode,
6b7c5b94 5159 .ndo_set_mac_address = be_mac_addr_set,
ab1594e9 5160 .ndo_get_stats64 = be_get_stats64,
6b7c5b94 5161 .ndo_validate_addr = eth_validate_addr,
6b7c5b94
SP
5162 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
5163 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
64600ea5 5164 .ndo_set_vf_mac = be_set_vf_mac,
1da87b7f 5165 .ndo_set_vf_vlan = be_set_vf_vlan,
ed616689 5166 .ndo_set_vf_rate = be_set_vf_tx_rate,
66268739 5167 .ndo_get_vf_config = be_get_vf_config,
bdce2ad7 5168 .ndo_set_vf_link_state = be_set_vf_link_state,
e7bcbd7b 5169 .ndo_set_vf_spoofchk = be_set_vf_spoofchk,
c1b3bdb2 5170 .ndo_tx_timeout = be_tx_timeout,
66268739
IV
5171#ifdef CONFIG_NET_POLL_CONTROLLER
5172 .ndo_poll_controller = be_netpoll,
5173#endif
a77dcb8c
AK
5174 .ndo_bridge_setlink = be_ndo_bridge_setlink,
5175 .ndo_bridge_getlink = be_ndo_bridge_getlink,
5f35227e 5176 .ndo_features_check = be_features_check,
a155a5db 5177 .ndo_get_phys_port_id = be_get_phys_port_id,
6b7c5b94
SP
5178};
5179
5180static void be_netdev_init(struct net_device *netdev)
5181{
5182 struct be_adapter *adapter = netdev_priv(netdev);
5183
6332c8d3 5184 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
2d52527e 5185 NETIF_F_GSO_UDP_TUNNEL |
8b8ddc68 5186 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
f646968f 5187 NETIF_F_HW_VLAN_CTAG_TX;
62219066 5188 if ((be_if_cap_flags(adapter) & BE_IF_FLAGS_RSS))
8b8ddc68 5189 netdev->hw_features |= NETIF_F_RXHASH;
6332c8d3
MM
5190
5191 netdev->features |= netdev->hw_features |
942e7891
CJ
5192 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER |
5193 NETIF_F_HIGHDMA;
4b972914 5194
eb8a50d9 5195 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
79032644 5196 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
51c59870 5197
fbc13f01
AK
5198 netdev->priv_flags |= IFF_UNICAST_FLT;
5199
6b7c5b94
SP
5200 netdev->flags |= IFF_MULTICAST;
5201
ee8b7a11 5202 netif_set_tso_max_size(netdev, BE_MAX_GSO_SIZE - ETH_HLEN);
c190e3c8 5203
10ef9ab4 5204 netdev->netdev_ops = &be_netdev_ops;
6b7c5b94 5205
7ad24ea4 5206 netdev->ethtool_ops = &be_ethtool_ops;
d894be57 5207
8f0545d2
JK
5208 if (!lancer_chip(adapter) && !BEx_chip(adapter) && !be_is_mc(adapter))
5209 netdev->udp_tunnel_nic_info = &be_udp_tunnels;
5210
d894be57
JW
5211 /* MTU range: 256 - 9000 */
5212 netdev->min_mtu = BE_MIN_MTU;
5213 netdev->max_mtu = BE_MAX_MTU;
6b7c5b94
SP
5214}
5215
87ac1a52
KA
5216static void be_cleanup(struct be_adapter *adapter)
5217{
5218 struct net_device *netdev = adapter->netdev;
5219
5220 rtnl_lock();
5221 netif_device_detach(netdev);
5222 if (netif_running(netdev))
5223 be_close(netdev);
5224 rtnl_unlock();
5225
5226 be_clear(adapter);
5227}
5228
484d76fd 5229static int be_resume(struct be_adapter *adapter)
78fad34e 5230{
d0e1b319 5231 struct net_device *netdev = adapter->netdev;
78fad34e
SP
5232 int status;
5233
78fad34e
SP
5234 status = be_setup(adapter);
5235 if (status)
484d76fd 5236 return status;
78fad34e 5237
08d9910c
HFS
5238 rtnl_lock();
5239 if (netif_running(netdev))
d0e1b319 5240 status = be_open(netdev);
08d9910c
HFS
5241 rtnl_unlock();
5242
5243 if (status)
5244 return status;
78fad34e 5245
d0e1b319
KA
5246 netif_device_attach(netdev);
5247
484d76fd
KA
5248 return 0;
5249}
5250
710f3e59
SB
5251static void be_soft_reset(struct be_adapter *adapter)
5252{
5253 u32 val;
5254
5255 dev_info(&adapter->pdev->dev, "Initiating chip soft reset\n");
5256 val = ioread32(adapter->pcicfg + SLIPORT_SOFTRESET_OFFSET);
5257 val |= SLIPORT_SOFTRESET_SR_MASK;
5258 iowrite32(val, adapter->pcicfg + SLIPORT_SOFTRESET_OFFSET);
5259}
5260
5261static bool be_err_is_recoverable(struct be_adapter *adapter)
5262{
5263 struct be_error_recovery *err_rec = &adapter->error_recovery;
5264 unsigned long initial_idle_time =
5265 msecs_to_jiffies(ERR_RECOVERY_IDLE_TIME);
5266 unsigned long recovery_interval =
5267 msecs_to_jiffies(ERR_RECOVERY_INTERVAL);
5268 u16 ue_err_code;
5269 u32 val;
5270
5271 val = be_POST_stage_get(adapter);
5272 if ((val & POST_STAGE_RECOVERABLE_ERR) != POST_STAGE_RECOVERABLE_ERR)
5273 return false;
5274 ue_err_code = val & POST_ERR_RECOVERY_CODE_MASK;
5275 if (ue_err_code == 0)
5276 return false;
5277
5278 dev_err(&adapter->pdev->dev, "Recoverable HW error code: 0x%x\n",
5279 ue_err_code);
5280
2faf2657 5281 if (time_before_eq(jiffies - err_rec->probe_time, initial_idle_time)) {
710f3e59
SB
5282 dev_err(&adapter->pdev->dev,
5283 "Cannot recover within %lu sec from driver load\n",
5284 jiffies_to_msecs(initial_idle_time) / MSEC_PER_SEC);
5285 return false;
5286 }
5287
2faf2657
KE
5288 if (err_rec->last_recovery_time && time_before_eq(
5289 jiffies - err_rec->last_recovery_time, recovery_interval)) {
710f3e59
SB
5290 dev_err(&adapter->pdev->dev,
5291 "Cannot recover within %lu sec from last recovery\n",
5292 jiffies_to_msecs(recovery_interval) / MSEC_PER_SEC);
5293 return false;
5294 }
5295
5296 if (ue_err_code == err_rec->last_err_code) {
5297 dev_err(&adapter->pdev->dev,
5298 "Cannot recover from a consecutive TPE error\n");
5299 return false;
5300 }
5301
5302 err_rec->last_recovery_time = jiffies;
5303 err_rec->last_err_code = ue_err_code;
5304 return true;
5305}
5306
5307static int be_tpe_recover(struct be_adapter *adapter)
5308{
5309 struct be_error_recovery *err_rec = &adapter->error_recovery;
5310 int status = -EAGAIN;
5311 u32 val;
5312
5313 switch (err_rec->recovery_state) {
5314 case ERR_RECOVERY_ST_NONE:
5315 err_rec->recovery_state = ERR_RECOVERY_ST_DETECT;
5316 err_rec->resched_delay = ERR_RECOVERY_UE_DETECT_DURATION;
5317 break;
5318
5319 case ERR_RECOVERY_ST_DETECT:
5320 val = be_POST_stage_get(adapter);
5321 if ((val & POST_STAGE_RECOVERABLE_ERR) !=
5322 POST_STAGE_RECOVERABLE_ERR) {
5323 dev_err(&adapter->pdev->dev,
5324 "Unrecoverable HW error detected: 0x%x\n", val);
5325 status = -EINVAL;
5326 err_rec->resched_delay = 0;
5327 break;
5328 }
5329
5330 dev_err(&adapter->pdev->dev, "Recoverable HW error detected\n");
5331
5332 /* Only PF0 initiates Chip Soft Reset. But PF0 must wait UE2SR
5333 * milliseconds before it checks for final error status in
5334 * SLIPORT_SEMAPHORE to determine if recovery criteria is met.
5335 * If it does, then PF0 initiates a Soft Reset.
5336 */
5337 if (adapter->pf_num == 0) {
5338 err_rec->recovery_state = ERR_RECOVERY_ST_RESET;
5339 err_rec->resched_delay = err_rec->ue_to_reset_time -
5340 ERR_RECOVERY_UE_DETECT_DURATION;
5341 break;
5342 }
5343
5344 err_rec->recovery_state = ERR_RECOVERY_ST_PRE_POLL;
5345 err_rec->resched_delay = err_rec->ue_to_poll_time -
5346 ERR_RECOVERY_UE_DETECT_DURATION;
5347 break;
5348
5349 case ERR_RECOVERY_ST_RESET:
5350 if (!be_err_is_recoverable(adapter)) {
5351 dev_err(&adapter->pdev->dev,
5352 "Failed to meet recovery criteria\n");
5353 status = -EIO;
5354 err_rec->resched_delay = 0;
5355 break;
5356 }
5357 be_soft_reset(adapter);
5358 err_rec->recovery_state = ERR_RECOVERY_ST_PRE_POLL;
5359 err_rec->resched_delay = err_rec->ue_to_poll_time -
5360 err_rec->ue_to_reset_time;
5361 break;
5362
5363 case ERR_RECOVERY_ST_PRE_POLL:
5364 err_rec->recovery_state = ERR_RECOVERY_ST_REINIT;
5365 err_rec->resched_delay = 0;
5366 status = 0; /* done */
5367 break;
5368
5369 default:
5370 status = -EINVAL;
5371 err_rec->resched_delay = 0;
5372 break;
5373 }
5374
5375 return status;
5376}
5377
484d76fd
KA
5378static int be_err_recover(struct be_adapter *adapter)
5379{
484d76fd
KA
5380 int status;
5381
710f3e59
SB
5382 if (!lancer_chip(adapter)) {
5383 if (!adapter->error_recovery.recovery_supported ||
5384 adapter->priv_flags & BE_DISABLE_TPE_RECOVERY)
5385 return -EIO;
5386 status = be_tpe_recover(adapter);
5387 if (status)
5388 goto err;
5389 }
1babbad4
PR
5390
5391 /* Wait for adapter to reach quiescent state before
5392 * destroying queues
5393 */
5394 status = be_fw_wait_ready(adapter);
5395 if (status)
5396 goto err;
5397
710f3e59
SB
5398 adapter->flags |= BE_FLAGS_TRY_RECOVERY;
5399
1babbad4
PR
5400 be_cleanup(adapter);
5401
484d76fd
KA
5402 status = be_resume(adapter);
5403 if (status)
5404 goto err;
5405
710f3e59
SB
5406 adapter->flags &= ~BE_FLAGS_TRY_RECOVERY;
5407
78fad34e 5408err:
78fad34e
SP
5409 return status;
5410}
5411
eb7dd46c 5412static void be_err_detection_task(struct work_struct *work)
78fad34e 5413{
710f3e59
SB
5414 struct be_error_recovery *err_rec =
5415 container_of(work, struct be_error_recovery,
5416 err_detection_work.work);
78fad34e 5417 struct be_adapter *adapter =
710f3e59
SB
5418 container_of(err_rec, struct be_adapter,
5419 error_recovery);
5420 u32 resched_delay = ERR_RECOVERY_DETECTION_DELAY;
1babbad4
PR
5421 struct device *dev = &adapter->pdev->dev;
5422 int recovery_status;
78fad34e
SP
5423
5424 be_detect_error(adapter);
710f3e59 5425 if (!be_check_error(adapter, BE_ERROR_HW))
1babbad4
PR
5426 goto reschedule_task;
5427
710f3e59 5428 recovery_status = be_err_recover(adapter);
1babbad4 5429 if (!recovery_status) {
710f3e59
SB
5430 err_rec->recovery_retries = 0;
5431 err_rec->recovery_state = ERR_RECOVERY_ST_NONE;
1babbad4
PR
5432 dev_info(dev, "Adapter recovery successful\n");
5433 goto reschedule_task;
710f3e59
SB
5434 } else if (!lancer_chip(adapter) && err_rec->resched_delay) {
5435 /* BEx/SH recovery state machine */
5436 if (adapter->pf_num == 0 &&
5437 err_rec->recovery_state > ERR_RECOVERY_ST_DETECT)
5438 dev_err(&adapter->pdev->dev,
5439 "Adapter recovery in progress\n");
5440 resched_delay = err_rec->resched_delay;
5441 goto reschedule_task;
5442 } else if (lancer_chip(adapter) && be_virtfn(adapter)) {
1babbad4
PR
5443 /* For VFs, check if PF have allocated resources
5444 * every second.
5445 */
5446 dev_err(dev, "Re-trying adapter recovery\n");
5447 goto reschedule_task;
710f3e59
SB
5448 } else if (lancer_chip(adapter) && err_rec->recovery_retries++ <
5449 ERR_RECOVERY_MAX_RETRY_COUNT) {
972f37b4
PR
5450 /* In case of another error during recovery, it takes 30 sec
5451 * for adapter to come out of error. Retry error recovery after
5452 * this time interval.
5453 */
5454 dev_err(&adapter->pdev->dev, "Re-trying adapter recovery\n");
710f3e59 5455 resched_delay = ERR_RECOVERY_RETRY_DELAY;
972f37b4 5456 goto reschedule_task;
1babbad4
PR
5457 } else {
5458 dev_err(dev, "Adapter recovery failed\n");
710f3e59 5459 dev_err(dev, "Please reboot server to recover\n");
78fad34e
SP
5460 }
5461
1babbad4 5462 return;
710f3e59 5463
1babbad4 5464reschedule_task:
710f3e59 5465 be_schedule_err_detection(adapter, resched_delay);
78fad34e
SP
5466}
5467
5468static void be_log_sfp_info(struct be_adapter *adapter)
5469{
5470 int status;
5471
5472 status = be_cmd_query_sfp_info(adapter);
5473 if (!status) {
5474 dev_err(&adapter->pdev->dev,
51d1f98a
AK
5475 "Port %c: %s Vendor: %s part no: %s",
5476 adapter->port_name,
5477 be_misconfig_evt_port_state[adapter->phy_state],
5478 adapter->phy.vendor_name,
78fad34e
SP
5479 adapter->phy.vendor_pn);
5480 }
51d1f98a 5481 adapter->flags &= ~BE_FLAGS_PHY_MISCONFIGURED;
78fad34e
SP
5482}
5483
5484static void be_worker(struct work_struct *work)
5485{
5486 struct be_adapter *adapter =
5487 container_of(work, struct be_adapter, work.work);
5488 struct be_rx_obj *rxo;
5489 int i;
5490
d3480615
GP
5491 if (be_physfn(adapter) &&
5492 MODULO(adapter->work_counter, adapter->be_get_temp_freq) == 0)
5493 be_cmd_get_die_temperature(adapter);
5494
78fad34e
SP
5495 /* when interrupts are not yet enabled, just reap any pending
5496 * mcc completions
5497 */
5498 if (!netif_running(adapter->netdev)) {
d6765985 5499 local_bh_disable();
78fad34e 5500 be_process_mcc(adapter);
d6765985 5501 local_bh_enable();
78fad34e
SP
5502 goto reschedule;
5503 }
5504
5505 if (!adapter->stats_cmd_sent) {
5506 if (lancer_chip(adapter))
5507 lancer_cmd_get_pport_stats(adapter,
5508 &adapter->stats_cmd);
5509 else
5510 be_cmd_get_stats(adapter, &adapter->stats_cmd);
5511 }
5512
78fad34e
SP
5513 for_all_rx_queues(adapter, rxo, i) {
5514 /* Replenish RX-queues starved due to memory
5515 * allocation failures.
5516 */
5517 if (rxo->rx_post_starved)
5518 be_post_rx_frags(rxo, GFP_KERNEL, MAX_RX_POST);
5519 }
5520
20947770
PR
5521 /* EQ-delay update for Skyhawk is done while notifying EQ */
5522 if (!skyhawk_chip(adapter))
5523 be_eqd_update(adapter, false);
78fad34e 5524
51d1f98a 5525 if (adapter->flags & BE_FLAGS_PHY_MISCONFIGURED)
78fad34e
SP
5526 be_log_sfp_info(adapter);
5527
5528reschedule:
5529 adapter->work_counter++;
b7172414 5530 queue_delayed_work(be_wq, &adapter->work, msecs_to_jiffies(1000));
78fad34e
SP
5531}
5532
6b7c5b94
SP
5533static void be_unmap_pci_bars(struct be_adapter *adapter)
5534{
c5b3ad4c
SP
5535 if (adapter->csr)
5536 pci_iounmap(adapter->pdev, adapter->csr);
8788fdc2 5537 if (adapter->db)
ce66f781 5538 pci_iounmap(adapter->pdev, adapter->db);
a69bf3c5
DM
5539 if (adapter->pcicfg && adapter->pcicfg_mapped)
5540 pci_iounmap(adapter->pdev, adapter->pcicfg);
045508a8
PP
5541}
5542
ce66f781
SP
5543static int db_bar(struct be_adapter *adapter)
5544{
18c57c74 5545 if (lancer_chip(adapter) || be_virtfn(adapter))
ce66f781
SP
5546 return 0;
5547 else
5548 return 4;
5549}
5550
5551static int be_roce_map_pci_bars(struct be_adapter *adapter)
045508a8 5552{
dbf0f2a7 5553 if (skyhawk_chip(adapter)) {
ce66f781
SP
5554 adapter->roce_db.size = 4096;
5555 adapter->roce_db.io_addr = pci_resource_start(adapter->pdev,
5556 db_bar(adapter));
5557 adapter->roce_db.total_size = pci_resource_len(adapter->pdev,
5558 db_bar(adapter));
5559 }
045508a8 5560 return 0;
6b7c5b94
SP
5561}
5562
5563static int be_map_pci_bars(struct be_adapter *adapter)
5564{
0fa74a4b 5565 struct pci_dev *pdev = adapter->pdev;
6b7c5b94 5566 u8 __iomem *addr;
78fad34e
SP
5567 u32 sli_intf;
5568
5569 pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
5570 adapter->sli_family = (sli_intf & SLI_INTF_FAMILY_MASK) >>
5571 SLI_INTF_FAMILY_SHIFT;
5572 adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
fe6d2a38 5573
c5b3ad4c 5574 if (BEx_chip(adapter) && be_physfn(adapter)) {
0fa74a4b 5575 adapter->csr = pci_iomap(pdev, 2, 0);
ddf1169f 5576 if (!adapter->csr)
c5b3ad4c
SP
5577 return -ENOMEM;
5578 }
5579
25848c90 5580 addr = pci_iomap(pdev, db_bar(adapter), 0);
ddf1169f 5581 if (!addr)
6b7c5b94 5582 goto pci_map_err;
ba343c77 5583 adapter->db = addr;
ce66f781 5584
25848c90
SR
5585 if (skyhawk_chip(adapter) || BEx_chip(adapter)) {
5586 if (be_physfn(adapter)) {
5587 /* PCICFG is the 2nd BAR in BE2 */
5588 addr = pci_iomap(pdev, BE2_chip(adapter) ? 1 : 0, 0);
5589 if (!addr)
5590 goto pci_map_err;
5591 adapter->pcicfg = addr;
a69bf3c5 5592 adapter->pcicfg_mapped = true;
25848c90
SR
5593 } else {
5594 adapter->pcicfg = adapter->db + SRIOV_VF_PCICFG_OFFSET;
a69bf3c5 5595 adapter->pcicfg_mapped = false;
25848c90
SR
5596 }
5597 }
5598
ce66f781 5599 be_roce_map_pci_bars(adapter);
6b7c5b94 5600 return 0;
ce66f781 5601
6b7c5b94 5602pci_map_err:
25848c90 5603 dev_err(&pdev->dev, "Error in mapping PCI BARs\n");
6b7c5b94
SP
5604 be_unmap_pci_bars(adapter);
5605 return -ENOMEM;
5606}
5607
78fad34e 5608static void be_drv_cleanup(struct be_adapter *adapter)
6b7c5b94 5609{
8788fdc2 5610 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
78fad34e 5611 struct device *dev = &adapter->pdev->dev;
6b7c5b94
SP
5612
5613 if (mem->va)
78fad34e 5614 dma_free_coherent(dev, mem->size, mem->va, mem->dma);
e7b909a6 5615
5b8821b7 5616 mem = &adapter->rx_filter;
e7b909a6 5617 if (mem->va)
78fad34e
SP
5618 dma_free_coherent(dev, mem->size, mem->va, mem->dma);
5619
5620 mem = &adapter->stats_cmd;
5621 if (mem->va)
5622 dma_free_coherent(dev, mem->size, mem->va, mem->dma);
6b7c5b94
SP
5623}
5624
78fad34e
SP
5625/* Allocate and initialize various fields in be_adapter struct */
5626static int be_drv_init(struct be_adapter *adapter)
6b7c5b94 5627{
8788fdc2
SP
5628 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
5629 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
5b8821b7 5630 struct be_dma_mem *rx_filter = &adapter->rx_filter;
78fad34e
SP
5631 struct be_dma_mem *stats_cmd = &adapter->stats_cmd;
5632 struct device *dev = &adapter->pdev->dev;
5633 int status = 0;
6b7c5b94
SP
5634
5635 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
750afb08
LC
5636 mbox_mem_alloc->va = dma_alloc_coherent(dev, mbox_mem_alloc->size,
5637 &mbox_mem_alloc->dma,
5638 GFP_KERNEL);
78fad34e
SP
5639 if (!mbox_mem_alloc->va)
5640 return -ENOMEM;
5641
6b7c5b94
SP
5642 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
5643 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
5644 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
e7b909a6 5645
5b8821b7 5646 rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
750afb08
LC
5647 rx_filter->va = dma_alloc_coherent(dev, rx_filter->size,
5648 &rx_filter->dma, GFP_KERNEL);
ddf1169f 5649 if (!rx_filter->va) {
e7b909a6
SP
5650 status = -ENOMEM;
5651 goto free_mbox;
5652 }
1f9061d2 5653
78fad34e
SP
5654 if (lancer_chip(adapter))
5655 stats_cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
5656 else if (BE2_chip(adapter))
5657 stats_cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
5658 else if (BE3_chip(adapter))
5659 stats_cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
5660 else
5661 stats_cmd->size = sizeof(struct be_cmd_req_get_stats_v2);
750afb08
LC
5662 stats_cmd->va = dma_alloc_coherent(dev, stats_cmd->size,
5663 &stats_cmd->dma, GFP_KERNEL);
78fad34e
SP
5664 if (!stats_cmd->va) {
5665 status = -ENOMEM;
5666 goto free_rx_filter;
5667 }
5668
2984961c 5669 mutex_init(&adapter->mbox_lock);
b7172414
SP
5670 mutex_init(&adapter->mcc_lock);
5671 mutex_init(&adapter->rx_filter_lock);
8788fdc2 5672 spin_lock_init(&adapter->mcc_cq_lock);
5eeff635 5673 init_completion(&adapter->et_cmd_compl);
e7b909a6 5674
78fad34e 5675 pci_save_state(adapter->pdev);
6b7c5b94 5676
78fad34e 5677 INIT_DELAYED_WORK(&adapter->work, be_worker);
710f3e59
SB
5678
5679 adapter->error_recovery.recovery_state = ERR_RECOVERY_ST_NONE;
5680 adapter->error_recovery.resched_delay = 0;
5681 INIT_DELAYED_WORK(&adapter->error_recovery.err_detection_work,
eb7dd46c 5682 be_err_detection_task);
6b7c5b94 5683
78fad34e
SP
5684 adapter->rx_fc = true;
5685 adapter->tx_fc = true;
6b7c5b94 5686
78fad34e
SP
5687 /* Must be a power of 2 or else MODULO will BUG_ON */
5688 adapter->be_get_temp_freq = 64;
ca34fe38 5689
6b7c5b94 5690 return 0;
78fad34e
SP
5691
5692free_rx_filter:
5693 dma_free_coherent(dev, rx_filter->size, rx_filter->va, rx_filter->dma);
5694free_mbox:
5695 dma_free_coherent(dev, mbox_mem_alloc->size, mbox_mem_alloc->va,
5696 mbox_mem_alloc->dma);
5697 return status;
6b7c5b94
SP
5698}
5699
3bc6b06c 5700static void be_remove(struct pci_dev *pdev)
6b7c5b94
SP
5701{
5702 struct be_adapter *adapter = pci_get_drvdata(pdev);
8d56ff11 5703
6b7c5b94
SP
5704 if (!adapter)
5705 return;
5706
045508a8 5707 be_roce_dev_remove(adapter);
8cef7a78 5708 be_intr_set(adapter, false);
045508a8 5709
eb7dd46c 5710 be_cancel_err_detection(adapter);
f67ef7ba 5711
6b7c5b94
SP
5712 unregister_netdev(adapter->netdev);
5713
5fb379ee
SP
5714 be_clear(adapter);
5715
f72099e0
SK
5716 if (!pci_vfs_assigned(adapter->pdev))
5717 be_cmd_reset_function(adapter);
5718
bf99e50d
PR
5719 /* tell fw we're done with firing cmds */
5720 be_cmd_fw_clean(adapter);
5721
78fad34e
SP
5722 be_unmap_pci_bars(adapter);
5723 be_drv_cleanup(adapter);
6b7c5b94 5724
6b7c5b94
SP
5725 pci_release_regions(pdev);
5726 pci_disable_device(pdev);
5727
5728 free_netdev(adapter->netdev);
5729}
5730
9a03259c
AB
5731static ssize_t be_hwmon_show_temp(struct device *dev,
5732 struct device_attribute *dev_attr,
5733 char *buf)
29e9122b
VD
5734{
5735 struct be_adapter *adapter = dev_get_drvdata(dev);
5736
5737 /* Unit: millidegree Celsius */
5738 if (adapter->hwmon_info.be_on_die_temp == BE_INVALID_DIE_TEMP)
5739 return -EIO;
5740 else
5741 return sprintf(buf, "%u\n",
5742 adapter->hwmon_info.be_on_die_temp * 1000);
5743}
5744
d3757ba4 5745static SENSOR_DEVICE_ATTR(temp1_input, 0444,
29e9122b
VD
5746 be_hwmon_show_temp, NULL, 1);
5747
5748static struct attribute *be_hwmon_attrs[] = {
5749 &sensor_dev_attr_temp1_input.dev_attr.attr,
5750 NULL
5751};
5752
5753ATTRIBUTE_GROUPS(be_hwmon);
5754
d379142b
SP
5755static char *mc_name(struct be_adapter *adapter)
5756{
f93f160b
VV
5757 char *str = ""; /* default */
5758
5759 switch (adapter->mc_type) {
5760 case UMC:
5761 str = "UMC";
5762 break;
5763 case FLEX10:
5764 str = "FLEX10";
5765 break;
5766 case vNIC1:
5767 str = "vNIC-1";
5768 break;
5769 case nPAR:
5770 str = "nPAR";
5771 break;
5772 case UFP:
5773 str = "UFP";
5774 break;
5775 case vNIC2:
5776 str = "vNIC-2";
5777 break;
5778 default:
5779 str = "";
5780 }
5781
5782 return str;
d379142b
SP
5783}
5784
5785static inline char *func_name(struct be_adapter *adapter)
5786{
5787 return be_physfn(adapter) ? "PF" : "VF";
5788}
5789
f7062ee5
SP
5790static inline char *nic_name(struct pci_dev *pdev)
5791{
5792 switch (pdev->device) {
5793 case OC_DEVICE_ID1:
5794 return OC_NAME;
5795 case OC_DEVICE_ID2:
5796 return OC_NAME_BE;
5797 case OC_DEVICE_ID3:
5798 case OC_DEVICE_ID4:
5799 return OC_NAME_LANCER;
5800 case BE_DEVICE_ID2:
5801 return BE3_NAME;
5802 case OC_DEVICE_ID5:
5803 case OC_DEVICE_ID6:
5804 return OC_NAME_SH;
5805 default:
5806 return BE_NAME;
5807 }
5808}
5809
1dd06ae8 5810static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id)
6b7c5b94 5811{
6b7c5b94
SP
5812 struct be_adapter *adapter;
5813 struct net_device *netdev;
21252377 5814 int status = 0;
6b7c5b94
SP
5815
5816 status = pci_enable_device(pdev);
5817 if (status)
5818 goto do_none;
5819
5820 status = pci_request_regions(pdev, DRV_NAME);
5821 if (status)
5822 goto disable_dev;
5823 pci_set_master(pdev);
5824
7f640062 5825 netdev = alloc_etherdev_mqs(sizeof(*adapter), MAX_TX_QS, MAX_RX_QS);
ddf1169f 5826 if (!netdev) {
6b7c5b94
SP
5827 status = -ENOMEM;
5828 goto rel_reg;
5829 }
5830 adapter = netdev_priv(netdev);
5831 adapter->pdev = pdev;
5832 pci_set_drvdata(pdev, adapter);
5833 adapter->netdev = netdev;
2243e2e9 5834 SET_NETDEV_DEV(netdev, &pdev->dev);
6b7c5b94 5835
4c15c243 5836 status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
942e7891
CJ
5837 if (status) {
5838 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
5839 goto free_netdev;
6b7c5b94
SP
5840 }
5841
78fad34e 5842 status = be_map_pci_bars(adapter);
6b7c5b94 5843 if (status)
39f1d94d 5844 goto free_netdev;
6b7c5b94 5845
78fad34e
SP
5846 status = be_drv_init(adapter);
5847 if (status)
5848 goto unmap_bars;
5849
5fb379ee
SP
5850 status = be_setup(adapter);
5851 if (status)
78fad34e 5852 goto drv_cleanup;
2243e2e9 5853
3abcdeda 5854 be_netdev_init(netdev);
6b7c5b94
SP
5855 status = register_netdev(netdev);
5856 if (status != 0)
5fb379ee 5857 goto unsetup;
6b7c5b94 5858
045508a8
PP
5859 be_roce_dev_add(adapter);
5860
972f37b4 5861 be_schedule_err_detection(adapter, ERR_DETECTION_DELAY);
710f3e59 5862 adapter->error_recovery.probe_time = jiffies;
b4e32a71 5863
29e9122b 5864 /* On Die temperature not supported for VF. */
9a03259c 5865 if (be_physfn(adapter) && IS_ENABLED(CONFIG_BE2NET_HWMON)) {
29e9122b
VD
5866 adapter->hwmon_info.hwmon_dev =
5867 devm_hwmon_device_register_with_groups(&pdev->dev,
5868 DRV_NAME,
5869 adapter,
5870 be_hwmon_groups);
5871 adapter->hwmon_info.be_on_die_temp = BE_INVALID_DIE_TEMP;
5872 }
5873
d379142b 5874 dev_info(&pdev->dev, "%s: %s %s port %c\n", nic_name(pdev),
21252377 5875 func_name(adapter), mc_name(adapter), adapter->port_name);
34b1ef04 5876
6b7c5b94
SP
5877 return 0;
5878
5fb379ee
SP
5879unsetup:
5880 be_clear(adapter);
78fad34e
SP
5881drv_cleanup:
5882 be_drv_cleanup(adapter);
5883unmap_bars:
5884 be_unmap_pci_bars(adapter);
f9449ab7 5885free_netdev:
fe6d2a38 5886 free_netdev(netdev);
6b7c5b94
SP
5887rel_reg:
5888 pci_release_regions(pdev);
5889disable_dev:
5890 pci_disable_device(pdev);
5891do_none:
c4ca2374 5892 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
6b7c5b94
SP
5893 return status;
5894}
5895
e9a7f8c5 5896static int __maybe_unused be_suspend(struct device *dev_d)
6b7c5b94 5897{
e9a7f8c5 5898 struct be_adapter *adapter = dev_get_drvdata(dev_d);
6b7c5b94 5899
d4360d6f 5900 be_intr_set(adapter, false);
eb7dd46c 5901 be_cancel_err_detection(adapter);
f67ef7ba 5902
87ac1a52 5903 be_cleanup(adapter);
6b7c5b94 5904
6b7c5b94
SP
5905 return 0;
5906}
5907
e9a7f8c5 5908static int __maybe_unused be_pci_resume(struct device *dev_d)
6b7c5b94 5909{
e9a7f8c5 5910 struct be_adapter *adapter = dev_get_drvdata(dev_d);
484d76fd 5911 int status = 0;
6b7c5b94 5912
484d76fd 5913 status = be_resume(adapter);
2243e2e9
SP
5914 if (status)
5915 return status;
5916
972f37b4 5917 be_schedule_err_detection(adapter, ERR_DETECTION_DELAY);
eb7dd46c 5918
6b7c5b94
SP
5919 return 0;
5920}
5921
82456b03
SP
5922/*
5923 * An FLR will stop BE from DMAing any data.
5924 */
5925static void be_shutdown(struct pci_dev *pdev)
5926{
5927 struct be_adapter *adapter = pci_get_drvdata(pdev);
82456b03 5928
2d5d4154
AK
5929 if (!adapter)
5930 return;
82456b03 5931
d114f99a 5932 be_roce_dev_shutdown(adapter);
0f4a6828 5933 cancel_delayed_work_sync(&adapter->work);
eb7dd46c 5934 be_cancel_err_detection(adapter);
a4ca055f 5935
2d5d4154 5936 netif_device_detach(adapter->netdev);
82456b03 5937
57841869
AK
5938 be_cmd_reset_function(adapter);
5939
82456b03 5940 pci_disable_device(pdev);
82456b03
SP
5941}
5942
cf588477 5943static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
748b539a 5944 pci_channel_state_t state)
cf588477
SP
5945{
5946 struct be_adapter *adapter = pci_get_drvdata(pdev);
cf588477
SP
5947
5948 dev_err(&adapter->pdev->dev, "EEH error detected\n");
5949
68f22793
PR
5950 be_roce_dev_remove(adapter);
5951
954f6825
VD
5952 if (!be_check_error(adapter, BE_ERROR_EEH)) {
5953 be_set_error(adapter, BE_ERROR_EEH);
cf588477 5954
eb7dd46c 5955 be_cancel_err_detection(adapter);
cf588477 5956
87ac1a52 5957 be_cleanup(adapter);
cf588477 5958 }
cf588477
SP
5959
5960 if (state == pci_channel_io_perm_failure)
5961 return PCI_ERS_RESULT_DISCONNECT;
5962
5963 pci_disable_device(pdev);
5964
eeb7fc7b
SK
5965 /* The error could cause the FW to trigger a flash debug dump.
5966 * Resetting the card while flash dump is in progress
c8a54163
PR
5967 * can cause it not to recover; wait for it to finish.
5968 * Wait only for first function as it is needed only once per
5969 * adapter.
eeb7fc7b 5970 */
c8a54163
PR
5971 if (pdev->devfn == 0)
5972 ssleep(30);
5973
cf588477
SP
5974 return PCI_ERS_RESULT_NEED_RESET;
5975}
5976
5977static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
5978{
5979 struct be_adapter *adapter = pci_get_drvdata(pdev);
5980 int status;
5981
5982 dev_info(&adapter->pdev->dev, "EEH reset\n");
cf588477
SP
5983
5984 status = pci_enable_device(pdev);
5985 if (status)
5986 return PCI_ERS_RESULT_DISCONNECT;
5987
5988 pci_set_master(pdev);
cf588477
SP
5989 pci_restore_state(pdev);
5990
5991 /* Check if card is ok and fw is ready */
c5b3ad4c
SP
5992 dev_info(&adapter->pdev->dev,
5993 "Waiting for FW to be ready after EEH reset\n");
bf99e50d 5994 status = be_fw_wait_ready(adapter);
cf588477
SP
5995 if (status)
5996 return PCI_ERS_RESULT_DISCONNECT;
5997
954f6825 5998 be_clear_error(adapter, BE_CLEAR_ALL);
cf588477
SP
5999 return PCI_ERS_RESULT_RECOVERED;
6000}
6001
6002static void be_eeh_resume(struct pci_dev *pdev)
6003{
6004 int status = 0;
6005 struct be_adapter *adapter = pci_get_drvdata(pdev);
cf588477
SP
6006
6007 dev_info(&adapter->pdev->dev, "EEH resume\n");
6008
6009 pci_save_state(pdev);
6010
484d76fd 6011 status = be_resume(adapter);
bf99e50d
PR
6012 if (status)
6013 goto err;
6014
68f22793
PR
6015 be_roce_dev_add(adapter);
6016
972f37b4 6017 be_schedule_err_detection(adapter, ERR_DETECTION_DELAY);
cf588477
SP
6018 return;
6019err:
6020 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
cf588477
SP
6021}
6022
ace40aff
VV
6023static int be_pci_sriov_configure(struct pci_dev *pdev, int num_vfs)
6024{
6025 struct be_adapter *adapter = pci_get_drvdata(pdev);
b9263cbf 6026 struct be_resources vft_res = {0};
ace40aff
VV
6027 int status;
6028
6029 if (!num_vfs)
6030 be_vf_clear(adapter);
6031
6032 adapter->num_vfs = num_vfs;
6033
6034 if (adapter->num_vfs == 0 && pci_vfs_assigned(pdev)) {
6035 dev_warn(&pdev->dev,
6036 "Cannot disable VFs while they are assigned\n");
6037 return -EBUSY;
6038 }
6039
6040 /* When the HW is in SRIOV capable configuration, the PF-pool resources
6041 * are equally distributed across the max-number of VFs. The user may
6042 * request only a subset of the max-vfs to be enabled.
6043 * Based on num_vfs, redistribute the resources across num_vfs so that
6044 * each VF will have access to more number of resources.
6045 * This facility is not available in BE3 FW.
6046 * Also, this is done by FW in Lancer chip.
6047 */
6048 if (skyhawk_chip(adapter) && !pci_num_vf(pdev)) {
b9263cbf
SR
6049 be_calculate_vf_res(adapter, adapter->num_vfs,
6050 &vft_res);
ace40aff 6051 status = be_cmd_set_sriov_config(adapter, adapter->pool_res,
b9263cbf 6052 adapter->num_vfs, &vft_res);
ace40aff
VV
6053 if (status)
6054 dev_err(&pdev->dev,
6055 "Failed to optimize SR-IOV resources\n");
6056 }
6057
6058 status = be_get_resources(adapter);
6059 if (status)
6060 return be_cmd_status(status);
6061
6062 /* Updating real_num_tx/rx_queues() requires rtnl_lock() */
6063 rtnl_lock();
6064 status = be_update_queues(adapter);
6065 rtnl_unlock();
6066 if (status)
6067 return be_cmd_status(status);
6068
6069 if (adapter->num_vfs)
6070 status = be_vf_setup(adapter);
6071
6072 if (!status)
6073 return adapter->num_vfs;
6074
6075 return 0;
6076}
6077
3646f0e5 6078static const struct pci_error_handlers be_eeh_handlers = {
cf588477
SP
6079 .error_detected = be_eeh_err_detected,
6080 .slot_reset = be_eeh_reset,
6081 .resume = be_eeh_resume,
6082};
6083
e9a7f8c5
VG
6084static SIMPLE_DEV_PM_OPS(be_pci_pm_ops, be_suspend, be_pci_resume);
6085
6b7c5b94
SP
6086static struct pci_driver be_driver = {
6087 .name = DRV_NAME,
6088 .id_table = be_dev_ids,
6089 .probe = be_probe,
6090 .remove = be_remove,
e9a7f8c5 6091 .driver.pm = &be_pci_pm_ops,
82456b03 6092 .shutdown = be_shutdown,
ace40aff 6093 .sriov_configure = be_pci_sriov_configure,
cf588477 6094 .err_handler = &be_eeh_handlers
6b7c5b94
SP
6095};
6096
6097static int __init be_init_module(void)
6098{
710f3e59
SB
6099 int status;
6100
8e95a202
JP
6101 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
6102 rx_frag_size != 2048) {
6b7c5b94
SP
6103 printk(KERN_WARNING DRV_NAME
6104 " : Module param rx_frag_size must be 2048/4096/8192."
6105 " Using 2048\n");
6106 rx_frag_size = 2048;
6107 }
6b7c5b94 6108
ace40aff
VV
6109 if (num_vfs > 0) {
6110 pr_info(DRV_NAME " : Module param num_vfs is obsolete.");
6111 pr_info(DRV_NAME " : Use sysfs method to enable VFs\n");
6112 }
6113
b7172414
SP
6114 be_wq = create_singlethread_workqueue("be_wq");
6115 if (!be_wq) {
6116 pr_warn(DRV_NAME "workqueue creation failed\n");
6117 return -1;
6118 }
6119
710f3e59
SB
6120 be_err_recovery_workq =
6121 create_singlethread_workqueue("be_err_recover");
6122 if (!be_err_recovery_workq)
6123 pr_warn(DRV_NAME "Could not create error recovery workqueue\n");
6124
6125 status = pci_register_driver(&be_driver);
6126 if (status) {
6127 destroy_workqueue(be_wq);
6128 be_destroy_err_recovery_workq();
6129 }
6130 return status;
6b7c5b94
SP
6131}
6132module_init(be_init_module);
6133
6134static void __exit be_exit_module(void)
6135{
6136 pci_unregister_driver(&be_driver);
b7172414 6137
710f3e59
SB
6138 be_destroy_err_recovery_workq();
6139
b7172414
SP
6140 if (be_wq)
6141 destroy_workqueue(be_wq);
6b7c5b94
SP
6142}
6143module_exit(be_exit_module);