be2net: remove unused old custom busy-poll fields
[linux-2.6-block.git] / drivers / net / ethernet / emulex / benet / be_main.c
CommitLineData
6b7c5b94 1/*
7dfbe7d7 2 * Copyright (C) 2005 - 2016 Broadcom
6b7c5b94
SP
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
d2145cde 11 * linux-drivers@emulex.com
6b7c5b94 12 *
d2145cde
AK
13 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
6b7c5b94
SP
16 */
17
70c71606 18#include <linux/prefetch.h>
9d9779e7 19#include <linux/module.h>
6b7c5b94 20#include "be.h"
8788fdc2 21#include "be_cmds.h"
65f71b8b 22#include <asm/div64.h>
d6b6d987 23#include <linux/aer.h>
a77dcb8c 24#include <linux/if_bridge.h>
6384a4d0 25#include <net/busy_poll.h>
c9c47142 26#include <net/vxlan.h>
6b7c5b94
SP
27
28MODULE_VERSION(DRV_VER);
6b7c5b94 29MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
00d3d51e 30MODULE_AUTHOR("Emulex Corporation");
6b7c5b94
SP
31MODULE_LICENSE("GPL");
32
ace40aff
VV
33/* num_vfs module param is obsolete.
34 * Use sysfs method to enable/disable VFs.
35 */
ba343c77 36static unsigned int num_vfs;
d3757ba4 37module_param(num_vfs, uint, 0444);
ba343c77 38MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
6b7c5b94 39
11ac75ed 40static ushort rx_frag_size = 2048;
d3757ba4 41module_param(rx_frag_size, ushort, 0444);
11ac75ed
SP
42MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
43
710f3e59
SB
44/* Per-module error detection/recovery workq shared across all functions.
45 * Each function schedules its own work request on this shared workq.
46 */
e6053dd5 47static struct workqueue_struct *be_err_recovery_workq;
710f3e59 48
9baa3c34 49static const struct pci_device_id be_dev_ids[] = {
c4ca2374 50 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
59fd5d87 51 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
c4ca2374
AK
52 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
53 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
fe6d2a38 54 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
12f4d0a8 55 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
ecedb6ae 56 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
76b73530 57 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID6)},
6b7c5b94
SP
58 { 0 }
59};
60MODULE_DEVICE_TABLE(pci, be_dev_ids);
b7172414
SP
61
62/* Workqueue used by all functions for defering cmd calls to the adapter */
e6053dd5 63static struct workqueue_struct *be_wq;
b7172414 64
7c185276 65/* UE Status Low CSR */
42c8b11e 66static const char * const ue_status_low_desc[] = {
7c185276
AK
67 "CEV",
68 "CTX",
69 "DBUF",
70 "ERX",
71 "Host",
72 "MPU",
73 "NDMA",
74 "PTC ",
75 "RDMA ",
76 "RXF ",
77 "RXIPS ",
78 "RXULP0 ",
79 "RXULP1 ",
80 "RXULP2 ",
81 "TIM ",
82 "TPOST ",
83 "TPRE ",
84 "TXIPS ",
85 "TXULP0 ",
86 "TXULP1 ",
87 "UC ",
88 "WDMA ",
89 "TXULP2 ",
90 "HOST1 ",
91 "P0_OB_LINK ",
92 "P1_OB_LINK ",
93 "HOST_GPIO ",
94 "MBOX ",
6bdf8f55
VV
95 "ERX2 ",
96 "SPARE ",
97 "JTAG ",
98 "MPU_INTPEND "
7c185276 99};
e2fb1afa 100
7c185276 101/* UE Status High CSR */
42c8b11e 102static const char * const ue_status_hi_desc[] = {
7c185276
AK
103 "LPCMEMHOST",
104 "MGMT_MAC",
105 "PCS0ONLINE",
106 "MPU_IRAM",
107 "PCS1ONLINE",
108 "PCTL0",
109 "PCTL1",
110 "PMEM",
111 "RR",
112 "TXPB",
113 "RXPP",
114 "XAUI",
115 "TXP",
116 "ARM",
117 "IPC",
118 "HOST2",
119 "HOST3",
120 "HOST4",
121 "HOST5",
122 "HOST6",
123 "HOST7",
6bdf8f55
VV
124 "ECRC",
125 "Poison TLP",
42c8b11e 126 "NETC",
6bdf8f55
VV
127 "PERIPH",
128 "LLTXULP",
129 "D2P",
130 "RCON",
131 "LDMA",
132 "LLTXP",
133 "LLTXPB",
7c185276
AK
134 "Unknown"
135};
6b7c5b94 136
c1bb0a55
VD
137#define BE_VF_IF_EN_FLAGS (BE_IF_FLAGS_UNTAGGED | \
138 BE_IF_FLAGS_BROADCAST | \
139 BE_IF_FLAGS_MULTICAST | \
140 BE_IF_FLAGS_PASS_L3L4_ERRORS)
141
6b7c5b94
SP
142static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
143{
144 struct be_dma_mem *mem = &q->dma_mem;
03d28ffe 145
1cfafab9 146 if (mem->va) {
2b7bcebf
IV
147 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
148 mem->dma);
1cfafab9
SP
149 mem->va = NULL;
150 }
6b7c5b94
SP
151}
152
153static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
748b539a 154 u16 len, u16 entry_size)
6b7c5b94
SP
155{
156 struct be_dma_mem *mem = &q->dma_mem;
157
158 memset(q, 0, sizeof(*q));
159 q->len = len;
160 q->entry_size = entry_size;
161 mem->size = len * entry_size;
ede23fa8
JP
162 mem->va = dma_zalloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
163 GFP_KERNEL);
6b7c5b94 164 if (!mem->va)
10ef9ab4 165 return -ENOMEM;
6b7c5b94
SP
166 return 0;
167}
168
68c45a2d 169static void be_reg_intr_set(struct be_adapter *adapter, bool enable)
6b7c5b94 170{
db3ea781 171 u32 reg, enabled;
5f0b849e 172
db3ea781 173 pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
748b539a 174 &reg);
db3ea781
SP
175 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
176
5f0b849e 177 if (!enabled && enable)
6b7c5b94 178 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
5f0b849e 179 else if (enabled && !enable)
6b7c5b94 180 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
5f0b849e 181 else
6b7c5b94 182 return;
5f0b849e 183
db3ea781 184 pci_write_config_dword(adapter->pdev,
748b539a 185 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
6b7c5b94
SP
186}
187
68c45a2d
SK
188static void be_intr_set(struct be_adapter *adapter, bool enable)
189{
190 int status = 0;
191
192 /* On lancer interrupts can't be controlled via this register */
193 if (lancer_chip(adapter))
194 return;
195
954f6825 196 if (be_check_error(adapter, BE_ERROR_EEH))
68c45a2d
SK
197 return;
198
199 status = be_cmd_intr_set(adapter, enable);
200 if (status)
201 be_reg_intr_set(adapter, enable);
202}
203
8788fdc2 204static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
6b7c5b94
SP
205{
206 u32 val = 0;
03d28ffe 207
954f6825
VD
208 if (be_check_error(adapter, BE_ERROR_HW))
209 return;
210
6b7c5b94
SP
211 val |= qid & DB_RQ_RING_ID_MASK;
212 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
f3eb62d2
SP
213
214 wmb();
8788fdc2 215 iowrite32(val, adapter->db + DB_RQ_OFFSET);
6b7c5b94
SP
216}
217
94d73aaa
VV
218static void be_txq_notify(struct be_adapter *adapter, struct be_tx_obj *txo,
219 u16 posted)
6b7c5b94
SP
220{
221 u32 val = 0;
03d28ffe 222
954f6825
VD
223 if (be_check_error(adapter, BE_ERROR_HW))
224 return;
225
94d73aaa 226 val |= txo->q.id & DB_TXULP_RING_ID_MASK;
6b7c5b94 227 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
f3eb62d2
SP
228
229 wmb();
94d73aaa 230 iowrite32(val, adapter->db + txo->db_offset);
6b7c5b94
SP
231}
232
8788fdc2 233static void be_eq_notify(struct be_adapter *adapter, u16 qid,
20947770
PR
234 bool arm, bool clear_int, u16 num_popped,
235 u32 eq_delay_mult_enc)
6b7c5b94
SP
236{
237 u32 val = 0;
03d28ffe 238
6b7c5b94 239 val |= qid & DB_EQ_RING_ID_MASK;
748b539a 240 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) << DB_EQ_RING_ID_EXT_MASK_SHIFT);
cf588477 241
954f6825 242 if (be_check_error(adapter, BE_ERROR_HW))
cf588477
SP
243 return;
244
6b7c5b94
SP
245 if (arm)
246 val |= 1 << DB_EQ_REARM_SHIFT;
247 if (clear_int)
248 val |= 1 << DB_EQ_CLR_SHIFT;
249 val |= 1 << DB_EQ_EVNT_SHIFT;
250 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
20947770 251 val |= eq_delay_mult_enc << DB_EQ_R2I_DLY_SHIFT;
8788fdc2 252 iowrite32(val, adapter->db + DB_EQ_OFFSET);
6b7c5b94
SP
253}
254
8788fdc2 255void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
6b7c5b94
SP
256{
257 u32 val = 0;
03d28ffe 258
6b7c5b94 259 val |= qid & DB_CQ_RING_ID_MASK;
fe6d2a38
SP
260 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
261 DB_CQ_RING_ID_EXT_MASK_SHIFT);
cf588477 262
954f6825 263 if (be_check_error(adapter, BE_ERROR_HW))
cf588477
SP
264 return;
265
6b7c5b94
SP
266 if (arm)
267 val |= 1 << DB_CQ_REARM_SHIFT;
268 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
8788fdc2 269 iowrite32(val, adapter->db + DB_CQ_OFFSET);
6b7c5b94
SP
270}
271
988d44b1
SR
272static int be_dev_mac_add(struct be_adapter *adapter, u8 *mac)
273{
274 int i;
275
276 /* Check if mac has already been added as part of uc-list */
277 for (i = 0; i < adapter->uc_macs; i++) {
1d0f110a 278 if (ether_addr_equal(adapter->uc_list[i].mac, mac)) {
988d44b1
SR
279 /* mac already added, skip addition */
280 adapter->pmac_id[0] = adapter->pmac_id[i + 1];
281 return 0;
282 }
283 }
284
285 return be_cmd_pmac_add(adapter, mac, adapter->if_handle,
286 &adapter->pmac_id[0], 0);
287}
288
289static void be_dev_mac_del(struct be_adapter *adapter, int pmac_id)
290{
291 int i;
292
293 /* Skip deletion if the programmed mac is
294 * being used in uc-list
295 */
296 for (i = 0; i < adapter->uc_macs; i++) {
297 if (adapter->pmac_id[i + 1] == pmac_id)
298 return;
299 }
300 be_cmd_pmac_del(adapter, adapter->if_handle, pmac_id, 0);
301}
302
6b7c5b94
SP
303static int be_mac_addr_set(struct net_device *netdev, void *p)
304{
305 struct be_adapter *adapter = netdev_priv(netdev);
5a712c13 306 struct device *dev = &adapter->pdev->dev;
6b7c5b94 307 struct sockaddr *addr = p;
5a712c13
SP
308 int status;
309 u8 mac[ETH_ALEN];
988d44b1 310 u32 old_pmac_id = adapter->pmac_id[0];
6b7c5b94 311
ca9e4988
AK
312 if (!is_valid_ether_addr(addr->sa_data))
313 return -EADDRNOTAVAIL;
314
ff32f8ab
VV
315 /* Proceed further only if, User provided MAC is different
316 * from active MAC
317 */
c27ebf58 318 if (ether_addr_equal(addr->sa_data, adapter->dev_mac))
ff32f8ab
VV
319 return 0;
320
34393529
IV
321 /* BE3 VFs without FILTMGMT privilege are not allowed to set its MAC
322 * address
323 */
324 if (BEx_chip(adapter) && be_virtfn(adapter) &&
325 !check_privilege(adapter, BE_PRIV_FILTMGMT))
326 return -EPERM;
327
bcc84140
KA
328 /* if device is not running, copy MAC to netdev->dev_addr */
329 if (!netif_running(netdev))
330 goto done;
331
5a712c13
SP
332 /* The PMAC_ADD cmd may fail if the VF doesn't have FILTMGMT
333 * privilege or if PF did not provision the new MAC address.
334 * On BE3, this cmd will always fail if the VF doesn't have the
335 * FILTMGMT privilege. This failure is OK, only if the PF programmed
336 * the MAC for the VF.
704e4c88 337 */
988d44b1
SR
338 mutex_lock(&adapter->rx_filter_lock);
339 status = be_dev_mac_add(adapter, (u8 *)addr->sa_data);
5a712c13 340 if (!status) {
5a712c13
SP
341
342 /* Delete the old programmed MAC. This call may fail if the
343 * old MAC was already deleted by the PF driver.
344 */
345 if (adapter->pmac_id[0] != old_pmac_id)
988d44b1 346 be_dev_mac_del(adapter, old_pmac_id);
704e4c88
PR
347 }
348
988d44b1 349 mutex_unlock(&adapter->rx_filter_lock);
5a712c13
SP
350 /* Decide if the new MAC is successfully activated only after
351 * querying the FW
704e4c88 352 */
988d44b1 353 status = be_cmd_get_active_mac(adapter, adapter->pmac_id[0], mac,
b188f090 354 adapter->if_handle, true, 0);
a65027e4 355 if (status)
e3a7ae2c 356 goto err;
6b7c5b94 357
5a712c13
SP
358 /* The MAC change did not happen, either due to lack of privilege
359 * or PF didn't pre-provision.
360 */
61d23e9f 361 if (!ether_addr_equal(addr->sa_data, mac)) {
5a712c13
SP
362 status = -EPERM;
363 goto err;
364 }
4993b39a
IV
365
366 /* Remember currently programmed MAC */
c27ebf58 367 ether_addr_copy(adapter->dev_mac, addr->sa_data);
4993b39a 368done:
bcc84140
KA
369 ether_addr_copy(netdev->dev_addr, addr->sa_data);
370 dev_info(dev, "MAC address changed to %pM\n", addr->sa_data);
e3a7ae2c
SK
371 return 0;
372err:
5a712c13 373 dev_warn(dev, "MAC address change to %pM failed\n", addr->sa_data);
6b7c5b94
SP
374 return status;
375}
376
ca34fe38
SP
377/* BE2 supports only v0 cmd */
378static void *hw_stats_from_cmd(struct be_adapter *adapter)
379{
380 if (BE2_chip(adapter)) {
381 struct be_cmd_resp_get_stats_v0 *cmd = adapter->stats_cmd.va;
382
383 return &cmd->hw_stats;
61000861 384 } else if (BE3_chip(adapter)) {
ca34fe38
SP
385 struct be_cmd_resp_get_stats_v1 *cmd = adapter->stats_cmd.va;
386
61000861
AK
387 return &cmd->hw_stats;
388 } else {
389 struct be_cmd_resp_get_stats_v2 *cmd = adapter->stats_cmd.va;
390
ca34fe38
SP
391 return &cmd->hw_stats;
392 }
393}
394
395/* BE2 supports only v0 cmd */
396static void *be_erx_stats_from_cmd(struct be_adapter *adapter)
397{
398 if (BE2_chip(adapter)) {
399 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
400
401 return &hw_stats->erx;
61000861 402 } else if (BE3_chip(adapter)) {
ca34fe38
SP
403 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
404
61000861
AK
405 return &hw_stats->erx;
406 } else {
407 struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
408
ca34fe38
SP
409 return &hw_stats->erx;
410 }
411}
412
413static void populate_be_v0_stats(struct be_adapter *adapter)
89a88ab8 414{
ac124ff9
SP
415 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
416 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
417 struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
89a88ab8 418 struct be_port_rxf_stats_v0 *port_stats =
ac124ff9
SP
419 &rxf_stats->port[adapter->port_num];
420 struct be_drv_stats *drvs = &adapter->drv_stats;
89a88ab8 421
ac124ff9 422 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
89a88ab8
AK
423 drvs->rx_pause_frames = port_stats->rx_pause_frames;
424 drvs->rx_crc_errors = port_stats->rx_crc_errors;
425 drvs->rx_control_frames = port_stats->rx_control_frames;
426 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
427 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
428 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
429 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
430 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
431 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
432 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
433 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
434 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
435 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
436 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
ac124ff9 437 drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
89a88ab8
AK
438 drvs->rx_dropped_header_too_small =
439 port_stats->rx_dropped_header_too_small;
18fb06a1
SR
440 drvs->rx_address_filtered =
441 port_stats->rx_address_filtered +
442 port_stats->rx_vlan_filtered;
89a88ab8
AK
443 drvs->rx_alignment_symbol_errors =
444 port_stats->rx_alignment_symbol_errors;
445
446 drvs->tx_pauseframes = port_stats->tx_pauseframes;
447 drvs->tx_controlframes = port_stats->tx_controlframes;
448
449 if (adapter->port_num)
ac124ff9 450 drvs->jabber_events = rxf_stats->port1_jabber_events;
89a88ab8 451 else
ac124ff9 452 drvs->jabber_events = rxf_stats->port0_jabber_events;
89a88ab8 453 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
89a88ab8 454 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
89a88ab8
AK
455 drvs->forwarded_packets = rxf_stats->forwarded_packets;
456 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
ac124ff9
SP
457 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
458 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
89a88ab8
AK
459 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
460}
461
ca34fe38 462static void populate_be_v1_stats(struct be_adapter *adapter)
89a88ab8 463{
ac124ff9
SP
464 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
465 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
466 struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
89a88ab8 467 struct be_port_rxf_stats_v1 *port_stats =
ac124ff9
SP
468 &rxf_stats->port[adapter->port_num];
469 struct be_drv_stats *drvs = &adapter->drv_stats;
89a88ab8 470
ac124ff9 471 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
02fe7027
AK
472 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
473 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
89a88ab8
AK
474 drvs->rx_pause_frames = port_stats->rx_pause_frames;
475 drvs->rx_crc_errors = port_stats->rx_crc_errors;
476 drvs->rx_control_frames = port_stats->rx_control_frames;
477 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
478 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
479 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
480 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
481 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
482 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
483 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
484 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
485 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
486 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
487 drvs->rx_dropped_header_too_small =
488 port_stats->rx_dropped_header_too_small;
489 drvs->rx_input_fifo_overflow_drop =
490 port_stats->rx_input_fifo_overflow_drop;
18fb06a1 491 drvs->rx_address_filtered = port_stats->rx_address_filtered;
89a88ab8
AK
492 drvs->rx_alignment_symbol_errors =
493 port_stats->rx_alignment_symbol_errors;
ac124ff9 494 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
89a88ab8
AK
495 drvs->tx_pauseframes = port_stats->tx_pauseframes;
496 drvs->tx_controlframes = port_stats->tx_controlframes;
b5adffc4 497 drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
89a88ab8
AK
498 drvs->jabber_events = port_stats->jabber_events;
499 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
89a88ab8 500 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
89a88ab8
AK
501 drvs->forwarded_packets = rxf_stats->forwarded_packets;
502 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
ac124ff9
SP
503 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
504 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
89a88ab8
AK
505 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
506}
507
61000861
AK
508static void populate_be_v2_stats(struct be_adapter *adapter)
509{
510 struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
511 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
512 struct be_rxf_stats_v2 *rxf_stats = &hw_stats->rxf;
513 struct be_port_rxf_stats_v2 *port_stats =
514 &rxf_stats->port[adapter->port_num];
515 struct be_drv_stats *drvs = &adapter->drv_stats;
516
517 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
518 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
519 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
520 drvs->rx_pause_frames = port_stats->rx_pause_frames;
521 drvs->rx_crc_errors = port_stats->rx_crc_errors;
522 drvs->rx_control_frames = port_stats->rx_control_frames;
523 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
524 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
525 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
526 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
527 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
528 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
529 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
530 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
531 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
532 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
533 drvs->rx_dropped_header_too_small =
534 port_stats->rx_dropped_header_too_small;
535 drvs->rx_input_fifo_overflow_drop =
536 port_stats->rx_input_fifo_overflow_drop;
537 drvs->rx_address_filtered = port_stats->rx_address_filtered;
538 drvs->rx_alignment_symbol_errors =
539 port_stats->rx_alignment_symbol_errors;
540 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
541 drvs->tx_pauseframes = port_stats->tx_pauseframes;
542 drvs->tx_controlframes = port_stats->tx_controlframes;
543 drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
544 drvs->jabber_events = port_stats->jabber_events;
545 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
546 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
547 drvs->forwarded_packets = rxf_stats->forwarded_packets;
548 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
549 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
550 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
551 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
748b539a 552 if (be_roce_supported(adapter)) {
461ae379
AK
553 drvs->rx_roce_bytes_lsd = port_stats->roce_bytes_received_lsd;
554 drvs->rx_roce_bytes_msd = port_stats->roce_bytes_received_msd;
555 drvs->rx_roce_frames = port_stats->roce_frames_received;
556 drvs->roce_drops_crc = port_stats->roce_drops_crc;
557 drvs->roce_drops_payload_len =
558 port_stats->roce_drops_payload_len;
559 }
61000861
AK
560}
561
005d5696
SX
562static void populate_lancer_stats(struct be_adapter *adapter)
563{
005d5696 564 struct be_drv_stats *drvs = &adapter->drv_stats;
748b539a 565 struct lancer_pport_stats *pport_stats = pport_stats_from_cmd(adapter);
ac124ff9
SP
566
567 be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
568 drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
569 drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
570 drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
005d5696 571 drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
ac124ff9 572 drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
005d5696
SX
573 drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
574 drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
575 drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
576 drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
577 drvs->rx_dropped_tcp_length =
578 pport_stats->rx_dropped_invalid_tcp_length;
579 drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
580 drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
581 drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
582 drvs->rx_dropped_header_too_small =
583 pport_stats->rx_dropped_header_too_small;
584 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
18fb06a1
SR
585 drvs->rx_address_filtered =
586 pport_stats->rx_address_filtered +
587 pport_stats->rx_vlan_filtered;
ac124ff9 588 drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
005d5696 589 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
ac124ff9
SP
590 drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
591 drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
005d5696 592 drvs->jabber_events = pport_stats->rx_jabbers;
ac124ff9
SP
593 drvs->forwarded_packets = pport_stats->num_forwards_lo;
594 drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
005d5696 595 drvs->rx_drops_too_many_frags =
ac124ff9 596 pport_stats->rx_drops_too_many_frags_lo;
005d5696 597}
89a88ab8 598
09c1c68f
SP
599static void accumulate_16bit_val(u32 *acc, u16 val)
600{
601#define lo(x) (x & 0xFFFF)
602#define hi(x) (x & 0xFFFF0000)
603 bool wrapped = val < lo(*acc);
604 u32 newacc = hi(*acc) + val;
605
606 if (wrapped)
607 newacc += 65536;
6aa7de05 608 WRITE_ONCE(*acc, newacc);
09c1c68f
SP
609}
610
4188e7df 611static void populate_erx_stats(struct be_adapter *adapter,
748b539a 612 struct be_rx_obj *rxo, u32 erx_stat)
a6c578ef
AK
613{
614 if (!BEx_chip(adapter))
615 rx_stats(rxo)->rx_drops_no_frags = erx_stat;
616 else
617 /* below erx HW counter can actually wrap around after
618 * 65535. Driver accumulates a 32-bit value
619 */
620 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
621 (u16)erx_stat);
622}
623
89a88ab8
AK
624void be_parse_stats(struct be_adapter *adapter)
625{
61000861 626 struct be_erx_stats_v2 *erx = be_erx_stats_from_cmd(adapter);
ac124ff9
SP
627 struct be_rx_obj *rxo;
628 int i;
a6c578ef 629 u32 erx_stat;
ac124ff9 630
ca34fe38
SP
631 if (lancer_chip(adapter)) {
632 populate_lancer_stats(adapter);
005d5696 633 } else {
ca34fe38
SP
634 if (BE2_chip(adapter))
635 populate_be_v0_stats(adapter);
61000861
AK
636 else if (BE3_chip(adapter))
637 /* for BE3 */
ca34fe38 638 populate_be_v1_stats(adapter);
61000861
AK
639 else
640 populate_be_v2_stats(adapter);
d51ebd33 641
61000861 642 /* erx_v2 is longer than v0, v1. use v2 for v0, v1 access */
ca34fe38 643 for_all_rx_queues(adapter, rxo, i) {
a6c578ef
AK
644 erx_stat = erx->rx_drops_no_fragments[rxo->q.id];
645 populate_erx_stats(adapter, rxo, erx_stat);
ca34fe38 646 }
09c1c68f 647 }
89a88ab8
AK
648}
649
bc1f4470 650static void be_get_stats64(struct net_device *netdev,
651 struct rtnl_link_stats64 *stats)
6b7c5b94 652{
ab1594e9 653 struct be_adapter *adapter = netdev_priv(netdev);
89a88ab8 654 struct be_drv_stats *drvs = &adapter->drv_stats;
3abcdeda 655 struct be_rx_obj *rxo;
3c8def97 656 struct be_tx_obj *txo;
ab1594e9
SP
657 u64 pkts, bytes;
658 unsigned int start;
3abcdeda 659 int i;
6b7c5b94 660
3abcdeda 661 for_all_rx_queues(adapter, rxo, i) {
ab1594e9 662 const struct be_rx_stats *rx_stats = rx_stats(rxo);
03d28ffe 663
ab1594e9 664 do {
57a7744e 665 start = u64_stats_fetch_begin_irq(&rx_stats->sync);
ab1594e9
SP
666 pkts = rx_stats(rxo)->rx_pkts;
667 bytes = rx_stats(rxo)->rx_bytes;
57a7744e 668 } while (u64_stats_fetch_retry_irq(&rx_stats->sync, start));
ab1594e9
SP
669 stats->rx_packets += pkts;
670 stats->rx_bytes += bytes;
671 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
672 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
673 rx_stats(rxo)->rx_drops_no_frags;
3abcdeda
SP
674 }
675
3c8def97 676 for_all_tx_queues(adapter, txo, i) {
ab1594e9 677 const struct be_tx_stats *tx_stats = tx_stats(txo);
03d28ffe 678
ab1594e9 679 do {
57a7744e 680 start = u64_stats_fetch_begin_irq(&tx_stats->sync);
ab1594e9
SP
681 pkts = tx_stats(txo)->tx_pkts;
682 bytes = tx_stats(txo)->tx_bytes;
57a7744e 683 } while (u64_stats_fetch_retry_irq(&tx_stats->sync, start));
ab1594e9
SP
684 stats->tx_packets += pkts;
685 stats->tx_bytes += bytes;
3c8def97 686 }
6b7c5b94
SP
687
688 /* bad pkts received */
ab1594e9 689 stats->rx_errors = drvs->rx_crc_errors +
89a88ab8
AK
690 drvs->rx_alignment_symbol_errors +
691 drvs->rx_in_range_errors +
692 drvs->rx_out_range_errors +
693 drvs->rx_frame_too_long +
694 drvs->rx_dropped_too_small +
695 drvs->rx_dropped_too_short +
696 drvs->rx_dropped_header_too_small +
697 drvs->rx_dropped_tcp_length +
ab1594e9 698 drvs->rx_dropped_runt;
68110868 699
6b7c5b94 700 /* detailed rx errors */
ab1594e9 701 stats->rx_length_errors = drvs->rx_in_range_errors +
89a88ab8
AK
702 drvs->rx_out_range_errors +
703 drvs->rx_frame_too_long;
68110868 704
ab1594e9 705 stats->rx_crc_errors = drvs->rx_crc_errors;
6b7c5b94
SP
706
707 /* frame alignment errors */
ab1594e9 708 stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
68110868 709
6b7c5b94
SP
710 /* receiver fifo overrun */
711 /* drops_no_pbuf is no per i/f, it's per BE card */
ab1594e9 712 stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
89a88ab8
AK
713 drvs->rx_input_fifo_overflow_drop +
714 drvs->rx_drops_no_pbuf;
6b7c5b94
SP
715}
716
b236916a 717void be_link_status_update(struct be_adapter *adapter, u8 link_status)
6b7c5b94 718{
6b7c5b94
SP
719 struct net_device *netdev = adapter->netdev;
720
b236916a 721 if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
ea172a01 722 netif_carrier_off(netdev);
b236916a 723 adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
6b7c5b94 724 }
b236916a 725
bdce2ad7 726 if (link_status)
b236916a
AK
727 netif_carrier_on(netdev);
728 else
729 netif_carrier_off(netdev);
18824894
IV
730
731 netdev_info(netdev, "Link is %s\n", link_status ? "Up" : "Down");
6b7c5b94
SP
732}
733
f3d6ad84
SB
734static int be_gso_hdr_len(struct sk_buff *skb)
735{
736 if (skb->encapsulation)
737 return skb_inner_transport_offset(skb) +
738 inner_tcp_hdrlen(skb);
739 return skb_transport_offset(skb) + tcp_hdrlen(skb);
740}
741
5f07b3c5 742static void be_tx_stats_update(struct be_tx_obj *txo, struct sk_buff *skb)
6b7c5b94 743{
3c8def97 744 struct be_tx_stats *stats = tx_stats(txo);
f3d6ad84
SB
745 u32 tx_pkts = skb_shinfo(skb)->gso_segs ? : 1;
746 /* Account for headers which get duplicated in TSO pkt */
747 u32 dup_hdr_len = tx_pkts > 1 ? be_gso_hdr_len(skb) * (tx_pkts - 1) : 0;
3c8def97 748
ab1594e9 749 u64_stats_update_begin(&stats->sync);
ac124ff9 750 stats->tx_reqs++;
f3d6ad84 751 stats->tx_bytes += skb->len + dup_hdr_len;
8670f2a5
SB
752 stats->tx_pkts += tx_pkts;
753 if (skb->encapsulation && skb->ip_summed == CHECKSUM_PARTIAL)
754 stats->tx_vxlan_offload_pkts += tx_pkts;
ab1594e9 755 u64_stats_update_end(&stats->sync);
6b7c5b94
SP
756}
757
5f07b3c5
SP
758/* Returns number of WRBs needed for the skb */
759static u32 skb_wrb_cnt(struct sk_buff *skb)
6b7c5b94 760{
5f07b3c5
SP
761 /* +1 for the header wrb */
762 return 1 + (skb_headlen(skb) ? 1 : 0) + skb_shinfo(skb)->nr_frags;
6b7c5b94
SP
763}
764
765static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
766{
f986afcb
SP
767 wrb->frag_pa_hi = cpu_to_le32(upper_32_bits(addr));
768 wrb->frag_pa_lo = cpu_to_le32(lower_32_bits(addr));
769 wrb->frag_len = cpu_to_le32(len & ETH_WRB_FRAG_LEN_MASK);
770 wrb->rsvd0 = 0;
771}
772
773/* A dummy wrb is just all zeros. Using a separate routine for dummy-wrb
774 * to avoid the swap and shift/mask operations in wrb_fill().
775 */
776static inline void wrb_fill_dummy(struct be_eth_wrb *wrb)
777{
778 wrb->frag_pa_hi = 0;
779 wrb->frag_pa_lo = 0;
780 wrb->frag_len = 0;
89b1f496 781 wrb->rsvd0 = 0;
6b7c5b94
SP
782}
783
1ded132d 784static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
748b539a 785 struct sk_buff *skb)
1ded132d
AK
786{
787 u8 vlan_prio;
788 u16 vlan_tag;
789
df8a39de 790 vlan_tag = skb_vlan_tag_get(skb);
1ded132d
AK
791 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
792 /* If vlan priority provided by OS is NOT in available bmap */
793 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
794 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
fdf81bfb 795 adapter->recommended_prio_bits;
1ded132d
AK
796
797 return vlan_tag;
798}
799
c9c47142
SP
800/* Used only for IP tunnel packets */
801static u16 skb_inner_ip_proto(struct sk_buff *skb)
802{
803 return (inner_ip_hdr(skb)->version == 4) ?
804 inner_ip_hdr(skb)->protocol : inner_ipv6_hdr(skb)->nexthdr;
805}
806
807static u16 skb_ip_proto(struct sk_buff *skb)
808{
809 return (ip_hdr(skb)->version == 4) ?
810 ip_hdr(skb)->protocol : ipv6_hdr(skb)->nexthdr;
811}
812
cf5671e6
SB
813static inline bool be_is_txq_full(struct be_tx_obj *txo)
814{
815 return atomic_read(&txo->q.used) + BE_MAX_TX_FRAG_COUNT >= txo->q.len;
816}
817
818static inline bool be_can_txq_wake(struct be_tx_obj *txo)
819{
820 return atomic_read(&txo->q.used) < txo->q.len / 2;
821}
822
823static inline bool be_is_tx_compl_pending(struct be_tx_obj *txo)
824{
825 return atomic_read(&txo->q.used) > txo->pend_wrb_cnt;
826}
827
804abcdb
SB
828static void be_get_wrb_params_from_skb(struct be_adapter *adapter,
829 struct sk_buff *skb,
830 struct be_wrb_params *wrb_params)
6b7c5b94 831{
804abcdb 832 u16 proto;
6b7c5b94 833
49e4b847 834 if (skb_is_gso(skb)) {
804abcdb
SB
835 BE_WRB_F_SET(wrb_params->features, LSO, 1);
836 wrb_params->lso_mss = skb_shinfo(skb)->gso_size;
fe6d2a38 837 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
804abcdb 838 BE_WRB_F_SET(wrb_params->features, LSO6, 1);
6b7c5b94 839 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
c9c47142 840 if (skb->encapsulation) {
804abcdb 841 BE_WRB_F_SET(wrb_params->features, IPCS, 1);
c9c47142
SP
842 proto = skb_inner_ip_proto(skb);
843 } else {
844 proto = skb_ip_proto(skb);
845 }
846 if (proto == IPPROTO_TCP)
804abcdb 847 BE_WRB_F_SET(wrb_params->features, TCPCS, 1);
c9c47142 848 else if (proto == IPPROTO_UDP)
804abcdb 849 BE_WRB_F_SET(wrb_params->features, UDPCS, 1);
6b7c5b94
SP
850 }
851
df8a39de 852 if (skb_vlan_tag_present(skb)) {
804abcdb
SB
853 BE_WRB_F_SET(wrb_params->features, VLAN, 1);
854 wrb_params->vlan_tag = be_get_tx_vlan_tag(adapter, skb);
6b7c5b94
SP
855 }
856
804abcdb
SB
857 BE_WRB_F_SET(wrb_params->features, CRC, 1);
858}
5f07b3c5 859
804abcdb
SB
860static void wrb_fill_hdr(struct be_adapter *adapter,
861 struct be_eth_hdr_wrb *hdr,
862 struct be_wrb_params *wrb_params,
863 struct sk_buff *skb)
864{
865 memset(hdr, 0, sizeof(*hdr));
866
867 SET_TX_WRB_HDR_BITS(crc, hdr,
868 BE_WRB_F_GET(wrb_params->features, CRC));
869 SET_TX_WRB_HDR_BITS(ipcs, hdr,
870 BE_WRB_F_GET(wrb_params->features, IPCS));
871 SET_TX_WRB_HDR_BITS(tcpcs, hdr,
872 BE_WRB_F_GET(wrb_params->features, TCPCS));
873 SET_TX_WRB_HDR_BITS(udpcs, hdr,
874 BE_WRB_F_GET(wrb_params->features, UDPCS));
875
876 SET_TX_WRB_HDR_BITS(lso, hdr,
877 BE_WRB_F_GET(wrb_params->features, LSO));
878 SET_TX_WRB_HDR_BITS(lso6, hdr,
879 BE_WRB_F_GET(wrb_params->features, LSO6));
880 SET_TX_WRB_HDR_BITS(lso_mss, hdr, wrb_params->lso_mss);
881
882 /* Hack to skip HW VLAN tagging needs evt = 1, compl = 0. When this
883 * hack is not needed, the evt bit is set while ringing DB.
5f07b3c5 884 */
804abcdb
SB
885 SET_TX_WRB_HDR_BITS(event, hdr,
886 BE_WRB_F_GET(wrb_params->features, VLAN_SKIP_HW));
887 SET_TX_WRB_HDR_BITS(vlan, hdr,
888 BE_WRB_F_GET(wrb_params->features, VLAN));
889 SET_TX_WRB_HDR_BITS(vlan_tag, hdr, wrb_params->vlan_tag);
890
891 SET_TX_WRB_HDR_BITS(num_wrb, hdr, skb_wrb_cnt(skb));
892 SET_TX_WRB_HDR_BITS(len, hdr, skb->len);
760c295e
VD
893 SET_TX_WRB_HDR_BITS(mgmt, hdr,
894 BE_WRB_F_GET(wrb_params->features, OS2BMC));
6b7c5b94
SP
895}
896
2b7bcebf 897static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
748b539a 898 bool unmap_single)
7101e111
SP
899{
900 dma_addr_t dma;
f986afcb 901 u32 frag_len = le32_to_cpu(wrb->frag_len);
7101e111 902
7101e111 903
f986afcb
SP
904 dma = (u64)le32_to_cpu(wrb->frag_pa_hi) << 32 |
905 (u64)le32_to_cpu(wrb->frag_pa_lo);
906 if (frag_len) {
7101e111 907 if (unmap_single)
f986afcb 908 dma_unmap_single(dev, dma, frag_len, DMA_TO_DEVICE);
7101e111 909 else
f986afcb 910 dma_unmap_page(dev, dma, frag_len, DMA_TO_DEVICE);
7101e111
SP
911 }
912}
6b7c5b94 913
79a0d7d8 914/* Grab a WRB header for xmit */
b0fd2eb2 915static u32 be_tx_get_wrb_hdr(struct be_tx_obj *txo)
79a0d7d8 916{
b0fd2eb2 917 u32 head = txo->q.head;
79a0d7d8
SB
918
919 queue_head_inc(&txo->q);
920 return head;
921}
922
923/* Set up the WRB header for xmit */
924static void be_tx_setup_wrb_hdr(struct be_adapter *adapter,
925 struct be_tx_obj *txo,
926 struct be_wrb_params *wrb_params,
927 struct sk_buff *skb, u16 head)
928{
929 u32 num_frags = skb_wrb_cnt(skb);
930 struct be_queue_info *txq = &txo->q;
931 struct be_eth_hdr_wrb *hdr = queue_index_node(txq, head);
932
933 wrb_fill_hdr(adapter, hdr, wrb_params, skb);
934 be_dws_cpu_to_le(hdr, sizeof(*hdr));
935
936 BUG_ON(txo->sent_skb_list[head]);
937 txo->sent_skb_list[head] = skb;
938 txo->last_req_hdr = head;
939 atomic_add(num_frags, &txq->used);
940 txo->last_req_wrb_cnt = num_frags;
941 txo->pend_wrb_cnt += num_frags;
942}
943
944/* Setup a WRB fragment (buffer descriptor) for xmit */
945static void be_tx_setup_wrb_frag(struct be_tx_obj *txo, dma_addr_t busaddr,
946 int len)
947{
948 struct be_eth_wrb *wrb;
949 struct be_queue_info *txq = &txo->q;
950
951 wrb = queue_head_node(txq);
952 wrb_fill(wrb, busaddr, len);
953 queue_head_inc(txq);
954}
955
956/* Bring the queue back to the state it was in before be_xmit_enqueue() routine
957 * was invoked. The producer index is restored to the previous packet and the
958 * WRBs of the current packet are unmapped. Invoked to handle tx setup errors.
959 */
960static void be_xmit_restore(struct be_adapter *adapter,
b0fd2eb2 961 struct be_tx_obj *txo, u32 head, bool map_single,
79a0d7d8
SB
962 u32 copied)
963{
964 struct device *dev;
965 struct be_eth_wrb *wrb;
966 struct be_queue_info *txq = &txo->q;
967
968 dev = &adapter->pdev->dev;
969 txq->head = head;
970
971 /* skip the first wrb (hdr); it's not mapped */
972 queue_head_inc(txq);
973 while (copied) {
974 wrb = queue_head_node(txq);
975 unmap_tx_frag(dev, wrb, map_single);
976 map_single = false;
977 copied -= le32_to_cpu(wrb->frag_len);
978 queue_head_inc(txq);
979 }
980
981 txq->head = head;
982}
983
984/* Enqueue the given packet for transmit. This routine allocates WRBs for the
985 * packet, dma maps the packet buffers and sets up the WRBs. Returns the number
986 * of WRBs used up by the packet.
987 */
5f07b3c5 988static u32 be_xmit_enqueue(struct be_adapter *adapter, struct be_tx_obj *txo,
804abcdb
SB
989 struct sk_buff *skb,
990 struct be_wrb_params *wrb_params)
6b7c5b94 991{
5f07b3c5 992 u32 i, copied = 0, wrb_cnt = skb_wrb_cnt(skb);
2b7bcebf 993 struct device *dev = &adapter->pdev->dev;
7101e111 994 bool map_single = false;
2e85283d 995 u32 head;
79a0d7d8
SB
996 dma_addr_t busaddr;
997 int len;
6b7c5b94 998
79a0d7d8 999 head = be_tx_get_wrb_hdr(txo);
6b7c5b94 1000
ebc8d2ab 1001 if (skb->len > skb->data_len) {
79a0d7d8 1002 len = skb_headlen(skb);
03d28ffe 1003
2b7bcebf
IV
1004 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
1005 if (dma_mapping_error(dev, busaddr))
7101e111
SP
1006 goto dma_err;
1007 map_single = true;
79a0d7d8 1008 be_tx_setup_wrb_frag(txo, busaddr, len);
ebc8d2ab
DM
1009 copied += len;
1010 }
6b7c5b94 1011
ebc8d2ab 1012 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
748b539a 1013 const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
79a0d7d8 1014 len = skb_frag_size(frag);
03d28ffe 1015
79a0d7d8 1016 busaddr = skb_frag_dma_map(dev, frag, 0, len, DMA_TO_DEVICE);
2b7bcebf 1017 if (dma_mapping_error(dev, busaddr))
7101e111 1018 goto dma_err;
79a0d7d8
SB
1019 be_tx_setup_wrb_frag(txo, busaddr, len);
1020 copied += len;
6b7c5b94
SP
1021 }
1022
79a0d7d8 1023 be_tx_setup_wrb_hdr(adapter, txo, wrb_params, skb, head);
6b7c5b94 1024
5f07b3c5
SP
1025 be_tx_stats_update(txo, skb);
1026 return wrb_cnt;
6b7c5b94 1027
7101e111 1028dma_err:
79a0d7d8
SB
1029 adapter->drv_stats.dma_map_errors++;
1030 be_xmit_restore(adapter, txo, head, map_single, copied);
7101e111 1031 return 0;
6b7c5b94
SP
1032}
1033
f7062ee5
SP
1034static inline int qnq_async_evt_rcvd(struct be_adapter *adapter)
1035{
1036 return adapter->flags & BE_FLAGS_QNQ_ASYNC_EVT_RCVD;
1037}
1038
93040ae5 1039static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
bc0c3405 1040 struct sk_buff *skb,
804abcdb
SB
1041 struct be_wrb_params
1042 *wrb_params)
93040ae5
SK
1043{
1044 u16 vlan_tag = 0;
1045
1046 skb = skb_share_check(skb, GFP_ATOMIC);
1047 if (unlikely(!skb))
1048 return skb;
1049
df8a39de 1050 if (skb_vlan_tag_present(skb))
93040ae5 1051 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
52fe29e4
SB
1052
1053 if (qnq_async_evt_rcvd(adapter) && adapter->pvid) {
1054 if (!vlan_tag)
1055 vlan_tag = adapter->pvid;
1056 /* f/w workaround to set skip_hw_vlan = 1, informs the F/W to
1057 * skip VLAN insertion
1058 */
804abcdb 1059 BE_WRB_F_SET(wrb_params->features, VLAN_SKIP_HW, 1);
52fe29e4 1060 }
bc0c3405
AK
1061
1062 if (vlan_tag) {
62749e2c
JP
1063 skb = vlan_insert_tag_set_proto(skb, htons(ETH_P_8021Q),
1064 vlan_tag);
bc0c3405
AK
1065 if (unlikely(!skb))
1066 return skb;
bc0c3405
AK
1067 skb->vlan_tci = 0;
1068 }
1069
1070 /* Insert the outer VLAN, if any */
1071 if (adapter->qnq_vid) {
1072 vlan_tag = adapter->qnq_vid;
62749e2c
JP
1073 skb = vlan_insert_tag_set_proto(skb, htons(ETH_P_8021Q),
1074 vlan_tag);
bc0c3405
AK
1075 if (unlikely(!skb))
1076 return skb;
804abcdb 1077 BE_WRB_F_SET(wrb_params->features, VLAN_SKIP_HW, 1);
bc0c3405
AK
1078 }
1079
93040ae5
SK
1080 return skb;
1081}
1082
bc0c3405
AK
1083static bool be_ipv6_exthdr_check(struct sk_buff *skb)
1084{
1085 struct ethhdr *eh = (struct ethhdr *)skb->data;
1086 u16 offset = ETH_HLEN;
1087
1088 if (eh->h_proto == htons(ETH_P_IPV6)) {
1089 struct ipv6hdr *ip6h = (struct ipv6hdr *)(skb->data + offset);
1090
1091 offset += sizeof(struct ipv6hdr);
1092 if (ip6h->nexthdr != NEXTHDR_TCP &&
1093 ip6h->nexthdr != NEXTHDR_UDP) {
1094 struct ipv6_opt_hdr *ehdr =
504fbf1e 1095 (struct ipv6_opt_hdr *)(skb->data + offset);
bc0c3405
AK
1096
1097 /* offending pkt: 2nd byte following IPv6 hdr is 0xff */
1098 if (ehdr->hdrlen == 0xff)
1099 return true;
1100 }
1101 }
1102 return false;
1103}
1104
1105static int be_vlan_tag_tx_chk(struct be_adapter *adapter, struct sk_buff *skb)
1106{
df8a39de 1107 return skb_vlan_tag_present(skb) || adapter->pvid || adapter->qnq_vid;
bc0c3405
AK
1108}
1109
748b539a 1110static int be_ipv6_tx_stall_chk(struct be_adapter *adapter, struct sk_buff *skb)
bc0c3405 1111{
ee9c799c 1112 return BE3_chip(adapter) && be_ipv6_exthdr_check(skb);
bc0c3405
AK
1113}
1114
ec495fac
VV
1115static struct sk_buff *be_lancer_xmit_workarounds(struct be_adapter *adapter,
1116 struct sk_buff *skb,
804abcdb
SB
1117 struct be_wrb_params
1118 *wrb_params)
6b7c5b94 1119{
d2cb6ce7 1120 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
ee9c799c
SP
1121 unsigned int eth_hdr_len;
1122 struct iphdr *ip;
93040ae5 1123
1297f9db
AK
1124 /* For padded packets, BE HW modifies tot_len field in IP header
1125 * incorrecly when VLAN tag is inserted by HW.
3904dcc4 1126 * For padded packets, Lancer computes incorrect checksum.
1ded132d 1127 */
ee9c799c
SP
1128 eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
1129 VLAN_ETH_HLEN : ETH_HLEN;
3904dcc4 1130 if (skb->len <= 60 &&
df8a39de 1131 (lancer_chip(adapter) || skb_vlan_tag_present(skb)) &&
ee9c799c 1132 is_ipv4_pkt(skb)) {
93040ae5
SK
1133 ip = (struct iphdr *)ip_hdr(skb);
1134 pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len));
1135 }
1ded132d 1136
d2cb6ce7 1137 /* If vlan tag is already inlined in the packet, skip HW VLAN
f93f160b 1138 * tagging in pvid-tagging mode
d2cb6ce7 1139 */
f93f160b 1140 if (be_pvid_tagging_enabled(adapter) &&
d2cb6ce7 1141 veh->h_vlan_proto == htons(ETH_P_8021Q))
804abcdb 1142 BE_WRB_F_SET(wrb_params->features, VLAN_SKIP_HW, 1);
d2cb6ce7 1143
93040ae5
SK
1144 /* HW has a bug wherein it will calculate CSUM for VLAN
1145 * pkts even though it is disabled.
1146 * Manually insert VLAN in pkt.
1147 */
1148 if (skb->ip_summed != CHECKSUM_PARTIAL &&
df8a39de 1149 skb_vlan_tag_present(skb)) {
804abcdb 1150 skb = be_insert_vlan_in_pkt(adapter, skb, wrb_params);
bc0c3405 1151 if (unlikely(!skb))
c9128951 1152 goto err;
bc0c3405
AK
1153 }
1154
1155 /* HW may lockup when VLAN HW tagging is requested on
1156 * certain ipv6 packets. Drop such pkts if the HW workaround to
1157 * skip HW tagging is not enabled by FW.
1158 */
1159 if (unlikely(be_ipv6_tx_stall_chk(adapter, skb) &&
cd3307aa
KA
1160 (adapter->pvid || adapter->qnq_vid) &&
1161 !qnq_async_evt_rcvd(adapter)))
bc0c3405
AK
1162 goto tx_drop;
1163
1164 /* Manual VLAN tag insertion to prevent:
1165 * ASIC lockup when the ASIC inserts VLAN tag into
1166 * certain ipv6 packets. Insert VLAN tags in driver,
1167 * and set event, completion, vlan bits accordingly
1168 * in the Tx WRB.
1169 */
1170 if (be_ipv6_tx_stall_chk(adapter, skb) &&
1171 be_vlan_tag_tx_chk(adapter, skb)) {
804abcdb 1172 skb = be_insert_vlan_in_pkt(adapter, skb, wrb_params);
1ded132d 1173 if (unlikely(!skb))
c9128951 1174 goto err;
1ded132d
AK
1175 }
1176
ee9c799c
SP
1177 return skb;
1178tx_drop:
1179 dev_kfree_skb_any(skb);
c9128951 1180err:
ee9c799c
SP
1181 return NULL;
1182}
1183
ec495fac
VV
1184static struct sk_buff *be_xmit_workarounds(struct be_adapter *adapter,
1185 struct sk_buff *skb,
804abcdb 1186 struct be_wrb_params *wrb_params)
ec495fac 1187{
127bfce5 1188 int err;
1189
8227e990
SR
1190 /* Lancer, SH and BE3 in SRIOV mode have a bug wherein
1191 * packets that are 32b or less may cause a transmit stall
1192 * on that port. The workaround is to pad such packets
1193 * (len <= 32 bytes) to a minimum length of 36b.
ec495fac 1194 */
8227e990 1195 if (skb->len <= 32) {
74b6939d 1196 if (skb_put_padto(skb, 36))
ec495fac 1197 return NULL;
ec495fac
VV
1198 }
1199
1200 if (BEx_chip(adapter) || lancer_chip(adapter)) {
804abcdb 1201 skb = be_lancer_xmit_workarounds(adapter, skb, wrb_params);
ec495fac
VV
1202 if (!skb)
1203 return NULL;
1204 }
1205
127bfce5 1206 /* The stack can send us skbs with length greater than
1207 * what the HW can handle. Trim the extra bytes.
1208 */
1209 WARN_ON_ONCE(skb->len > BE_MAX_GSO_SIZE);
1210 err = pskb_trim(skb, BE_MAX_GSO_SIZE);
1211 WARN_ON(err);
1212
ec495fac
VV
1213 return skb;
1214}
1215
5f07b3c5
SP
1216static void be_xmit_flush(struct be_adapter *adapter, struct be_tx_obj *txo)
1217{
1218 struct be_queue_info *txq = &txo->q;
1219 struct be_eth_hdr_wrb *hdr = queue_index_node(txq, txo->last_req_hdr);
1220
1221 /* Mark the last request eventable if it hasn't been marked already */
1222 if (!(hdr->dw[2] & cpu_to_le32(TX_HDR_WRB_EVT)))
1223 hdr->dw[2] |= cpu_to_le32(TX_HDR_WRB_EVT | TX_HDR_WRB_COMPL);
1224
1225 /* compose a dummy wrb if there are odd set of wrbs to notify */
1226 if (!lancer_chip(adapter) && (txo->pend_wrb_cnt & 1)) {
f986afcb 1227 wrb_fill_dummy(queue_head_node(txq));
5f07b3c5
SP
1228 queue_head_inc(txq);
1229 atomic_inc(&txq->used);
1230 txo->pend_wrb_cnt++;
1231 hdr->dw[2] &= ~cpu_to_le32(TX_HDR_WRB_NUM_MASK <<
1232 TX_HDR_WRB_NUM_SHIFT);
1233 hdr->dw[2] |= cpu_to_le32((txo->last_req_wrb_cnt + 1) <<
1234 TX_HDR_WRB_NUM_SHIFT);
1235 }
1236 be_txq_notify(adapter, txo, txo->pend_wrb_cnt);
1237 txo->pend_wrb_cnt = 0;
1238}
1239
760c295e
VD
1240/* OS2BMC related */
1241
1242#define DHCP_CLIENT_PORT 68
1243#define DHCP_SERVER_PORT 67
1244#define NET_BIOS_PORT1 137
1245#define NET_BIOS_PORT2 138
1246#define DHCPV6_RAS_PORT 547
1247
1248#define is_mc_allowed_on_bmc(adapter, eh) \
1249 (!is_multicast_filt_enabled(adapter) && \
1250 is_multicast_ether_addr(eh->h_dest) && \
1251 !is_broadcast_ether_addr(eh->h_dest))
1252
1253#define is_bc_allowed_on_bmc(adapter, eh) \
1254 (!is_broadcast_filt_enabled(adapter) && \
1255 is_broadcast_ether_addr(eh->h_dest))
1256
1257#define is_arp_allowed_on_bmc(adapter, skb) \
1258 (is_arp(skb) && is_arp_filt_enabled(adapter))
1259
1260#define is_broadcast_packet(eh, adapter) \
1261 (is_multicast_ether_addr(eh->h_dest) && \
1262 !compare_ether_addr(eh->h_dest, adapter->netdev->broadcast))
1263
1264#define is_arp(skb) (skb->protocol == htons(ETH_P_ARP))
1265
1266#define is_arp_filt_enabled(adapter) \
1267 (adapter->bmc_filt_mask & (BMC_FILT_BROADCAST_ARP))
1268
1269#define is_dhcp_client_filt_enabled(adapter) \
1270 (adapter->bmc_filt_mask & BMC_FILT_BROADCAST_DHCP_CLIENT)
1271
1272#define is_dhcp_srvr_filt_enabled(adapter) \
1273 (adapter->bmc_filt_mask & BMC_FILT_BROADCAST_DHCP_SERVER)
1274
1275#define is_nbios_filt_enabled(adapter) \
1276 (adapter->bmc_filt_mask & BMC_FILT_BROADCAST_NET_BIOS)
1277
1278#define is_ipv6_na_filt_enabled(adapter) \
1279 (adapter->bmc_filt_mask & \
1280 BMC_FILT_MULTICAST_IPV6_NEIGH_ADVER)
1281
1282#define is_ipv6_ra_filt_enabled(adapter) \
1283 (adapter->bmc_filt_mask & BMC_FILT_MULTICAST_IPV6_RA)
1284
1285#define is_ipv6_ras_filt_enabled(adapter) \
1286 (adapter->bmc_filt_mask & BMC_FILT_MULTICAST_IPV6_RAS)
1287
1288#define is_broadcast_filt_enabled(adapter) \
1289 (adapter->bmc_filt_mask & BMC_FILT_BROADCAST)
1290
1291#define is_multicast_filt_enabled(adapter) \
1292 (adapter->bmc_filt_mask & BMC_FILT_MULTICAST)
1293
1294static bool be_send_pkt_to_bmc(struct be_adapter *adapter,
1295 struct sk_buff **skb)
1296{
1297 struct ethhdr *eh = (struct ethhdr *)(*skb)->data;
1298 bool os2bmc = false;
1299
1300 if (!be_is_os2bmc_enabled(adapter))
1301 goto done;
1302
1303 if (!is_multicast_ether_addr(eh->h_dest))
1304 goto done;
1305
1306 if (is_mc_allowed_on_bmc(adapter, eh) ||
1307 is_bc_allowed_on_bmc(adapter, eh) ||
1308 is_arp_allowed_on_bmc(adapter, (*skb))) {
1309 os2bmc = true;
1310 goto done;
1311 }
1312
1313 if ((*skb)->protocol == htons(ETH_P_IPV6)) {
1314 struct ipv6hdr *hdr = ipv6_hdr((*skb));
1315 u8 nexthdr = hdr->nexthdr;
1316
1317 if (nexthdr == IPPROTO_ICMPV6) {
1318 struct icmp6hdr *icmp6 = icmp6_hdr((*skb));
1319
1320 switch (icmp6->icmp6_type) {
1321 case NDISC_ROUTER_ADVERTISEMENT:
1322 os2bmc = is_ipv6_ra_filt_enabled(adapter);
1323 goto done;
1324 case NDISC_NEIGHBOUR_ADVERTISEMENT:
1325 os2bmc = is_ipv6_na_filt_enabled(adapter);
1326 goto done;
1327 default:
1328 break;
1329 }
1330 }
1331 }
1332
1333 if (is_udp_pkt((*skb))) {
1334 struct udphdr *udp = udp_hdr((*skb));
1335
1645d997 1336 switch (ntohs(udp->dest)) {
760c295e
VD
1337 case DHCP_CLIENT_PORT:
1338 os2bmc = is_dhcp_client_filt_enabled(adapter);
1339 goto done;
1340 case DHCP_SERVER_PORT:
1341 os2bmc = is_dhcp_srvr_filt_enabled(adapter);
1342 goto done;
1343 case NET_BIOS_PORT1:
1344 case NET_BIOS_PORT2:
1345 os2bmc = is_nbios_filt_enabled(adapter);
1346 goto done;
1347 case DHCPV6_RAS_PORT:
1348 os2bmc = is_ipv6_ras_filt_enabled(adapter);
1349 goto done;
1350 default:
1351 break;
1352 }
1353 }
1354done:
1355 /* For packets over a vlan, which are destined
1356 * to BMC, asic expects the vlan to be inline in the packet.
1357 */
1358 if (os2bmc)
1359 *skb = be_insert_vlan_in_pkt(adapter, *skb, NULL);
1360
1361 return os2bmc;
1362}
1363
ee9c799c
SP
1364static netdev_tx_t be_xmit(struct sk_buff *skb, struct net_device *netdev)
1365{
1366 struct be_adapter *adapter = netdev_priv(netdev);
5f07b3c5
SP
1367 u16 q_idx = skb_get_queue_mapping(skb);
1368 struct be_tx_obj *txo = &adapter->tx_obj[q_idx];
804abcdb 1369 struct be_wrb_params wrb_params = { 0 };
804abcdb 1370 bool flush = !skb->xmit_more;
5f07b3c5 1371 u16 wrb_cnt;
ee9c799c 1372
804abcdb 1373 skb = be_xmit_workarounds(adapter, skb, &wrb_params);
5f07b3c5
SP
1374 if (unlikely(!skb))
1375 goto drop;
6b7c5b94 1376
804abcdb
SB
1377 be_get_wrb_params_from_skb(adapter, skb, &wrb_params);
1378
1379 wrb_cnt = be_xmit_enqueue(adapter, txo, skb, &wrb_params);
5f07b3c5
SP
1380 if (unlikely(!wrb_cnt)) {
1381 dev_kfree_skb_any(skb);
1382 goto drop;
1383 }
cd8f76c0 1384
760c295e
VD
1385 /* if os2bmc is enabled and if the pkt is destined to bmc,
1386 * enqueue the pkt a 2nd time with mgmt bit set.
1387 */
1388 if (be_send_pkt_to_bmc(adapter, &skb)) {
1389 BE_WRB_F_SET(wrb_params.features, OS2BMC, 1);
1390 wrb_cnt = be_xmit_enqueue(adapter, txo, skb, &wrb_params);
1391 if (unlikely(!wrb_cnt))
1392 goto drop;
1393 else
1394 skb_get(skb);
1395 }
1396
cf5671e6 1397 if (be_is_txq_full(txo)) {
5f07b3c5
SP
1398 netif_stop_subqueue(netdev, q_idx);
1399 tx_stats(txo)->tx_stops++;
1400 }
c190e3c8 1401
5f07b3c5
SP
1402 if (flush || __netif_subqueue_stopped(netdev, q_idx))
1403 be_xmit_flush(adapter, txo);
6b7c5b94 1404
5f07b3c5
SP
1405 return NETDEV_TX_OK;
1406drop:
1407 tx_stats(txo)->tx_drv_drops++;
1408 /* Flush the already enqueued tx requests */
1409 if (flush && txo->pend_wrb_cnt)
1410 be_xmit_flush(adapter, txo);
6b7c5b94 1411
6b7c5b94
SP
1412 return NETDEV_TX_OK;
1413}
1414
f66b7cfd
SP
1415static inline bool be_in_all_promisc(struct be_adapter *adapter)
1416{
1417 return (adapter->if_flags & BE_IF_FLAGS_ALL_PROMISCUOUS) ==
1418 BE_IF_FLAGS_ALL_PROMISCUOUS;
1419}
1420
1421static int be_set_vlan_promisc(struct be_adapter *adapter)
1422{
1423 struct device *dev = &adapter->pdev->dev;
1424 int status;
1425
1426 if (adapter->if_flags & BE_IF_FLAGS_VLAN_PROMISCUOUS)
1427 return 0;
1428
1429 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_VLAN_PROMISCUOUS, ON);
1430 if (!status) {
1431 dev_info(dev, "Enabled VLAN promiscuous mode\n");
1432 adapter->if_flags |= BE_IF_FLAGS_VLAN_PROMISCUOUS;
1433 } else {
1434 dev_err(dev, "Failed to enable VLAN promiscuous mode\n");
1435 }
1436 return status;
1437}
1438
1439static int be_clear_vlan_promisc(struct be_adapter *adapter)
1440{
1441 struct device *dev = &adapter->pdev->dev;
1442 int status;
1443
1444 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_VLAN_PROMISCUOUS, OFF);
1445 if (!status) {
1446 dev_info(dev, "Disabling VLAN promiscuous mode\n");
1447 adapter->if_flags &= ~BE_IF_FLAGS_VLAN_PROMISCUOUS;
1448 }
1449 return status;
1450}
1451
6b7c5b94 1452/*
82903e4b
AK
1453 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
1454 * If the user configures more, place BE in vlan promiscuous mode.
6b7c5b94 1455 */
10329df8 1456static int be_vid_config(struct be_adapter *adapter)
6b7c5b94 1457{
50762667 1458 struct device *dev = &adapter->pdev->dev;
10329df8 1459 u16 vids[BE_NUM_VLANS_SUPPORTED];
f6cbd364 1460 u16 num = 0, i = 0;
82903e4b 1461 int status = 0;
1da87b7f 1462
92fbb1df
SB
1463 /* No need to change the VLAN state if the I/F is in promiscuous */
1464 if (adapter->netdev->flags & IFF_PROMISC)
c0e64ef4
SP
1465 return 0;
1466
92bf14ab 1467 if (adapter->vlans_added > be_max_vlans(adapter))
f66b7cfd 1468 return be_set_vlan_promisc(adapter);
0fc16ebf 1469
841f60fc
SK
1470 if (adapter->if_flags & BE_IF_FLAGS_VLAN_PROMISCUOUS) {
1471 status = be_clear_vlan_promisc(adapter);
1472 if (status)
1473 return status;
1474 }
0fc16ebf 1475 /* Construct VLAN Table to give to HW */
f6cbd364
RN
1476 for_each_set_bit(i, adapter->vids, VLAN_N_VID)
1477 vids[num++] = cpu_to_le16(i);
0fc16ebf 1478
435452aa 1479 status = be_cmd_vlan_config(adapter, adapter->if_handle, vids, num, 0);
0fc16ebf 1480 if (status) {
f66b7cfd 1481 dev_err(dev, "Setting HW VLAN filtering failed\n");
d9d604f8 1482 /* Set to VLAN promisc mode as setting VLAN filter failed */
77be8c1c
KA
1483 if (addl_status(status) == MCC_ADDL_STATUS_INSUFFICIENT_VLANS ||
1484 addl_status(status) ==
4c60005f 1485 MCC_ADDL_STATUS_INSUFFICIENT_RESOURCES)
f66b7cfd 1486 return be_set_vlan_promisc(adapter);
6b7c5b94 1487 }
0fc16ebf 1488 return status;
6b7c5b94
SP
1489}
1490
80d5c368 1491static int be_vlan_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
6b7c5b94
SP
1492{
1493 struct be_adapter *adapter = netdev_priv(netdev);
80817cbf 1494 int status = 0;
6b7c5b94 1495
b7172414
SP
1496 mutex_lock(&adapter->rx_filter_lock);
1497
a85e9986
PR
1498 /* Packets with VID 0 are always received by Lancer by default */
1499 if (lancer_chip(adapter) && vid == 0)
b7172414 1500 goto done;
48291c22 1501
f6cbd364 1502 if (test_bit(vid, adapter->vids))
b7172414 1503 goto done;
a85e9986 1504
f6cbd364 1505 set_bit(vid, adapter->vids);
a6b74e01 1506 adapter->vlans_added++;
8e586137 1507
b7172414
SP
1508 status = be_vid_config(adapter);
1509done:
1510 mutex_unlock(&adapter->rx_filter_lock);
1511 return status;
6b7c5b94
SP
1512}
1513
80d5c368 1514static int be_vlan_rem_vid(struct net_device *netdev, __be16 proto, u16 vid)
6b7c5b94
SP
1515{
1516 struct be_adapter *adapter = netdev_priv(netdev);
b7172414
SP
1517 int status = 0;
1518
1519 mutex_lock(&adapter->rx_filter_lock);
6b7c5b94 1520
a85e9986
PR
1521 /* Packets with VID 0 are always received by Lancer by default */
1522 if (lancer_chip(adapter) && vid == 0)
b7172414 1523 goto done;
a85e9986 1524
41dcdfbd 1525 if (!test_bit(vid, adapter->vids))
b7172414 1526 goto done;
41dcdfbd 1527
f6cbd364 1528 clear_bit(vid, adapter->vids);
9d4dfe4a
KA
1529 adapter->vlans_added--;
1530
b7172414
SP
1531 status = be_vid_config(adapter);
1532done:
1533 mutex_unlock(&adapter->rx_filter_lock);
1534 return status;
6b7c5b94
SP
1535}
1536
f66b7cfd
SP
1537static void be_set_all_promisc(struct be_adapter *adapter)
1538{
1539 be_cmd_rx_filter(adapter, BE_IF_FLAGS_ALL_PROMISCUOUS, ON);
1540 adapter->if_flags |= BE_IF_FLAGS_ALL_PROMISCUOUS;
1541}
1542
1543static void be_set_mc_promisc(struct be_adapter *adapter)
6b7c5b94 1544{
0fc16ebf 1545 int status;
6b7c5b94 1546
f66b7cfd
SP
1547 if (adapter->if_flags & BE_IF_FLAGS_MCAST_PROMISCUOUS)
1548 return;
6b7c5b94 1549
f66b7cfd
SP
1550 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_MCAST_PROMISCUOUS, ON);
1551 if (!status)
1552 adapter->if_flags |= BE_IF_FLAGS_MCAST_PROMISCUOUS;
1553}
1554
92fbb1df 1555static void be_set_uc_promisc(struct be_adapter *adapter)
f66b7cfd
SP
1556{
1557 int status;
1558
92fbb1df
SB
1559 if (adapter->if_flags & BE_IF_FLAGS_PROMISCUOUS)
1560 return;
1561
1562 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_PROMISCUOUS, ON);
f66b7cfd 1563 if (!status)
92fbb1df
SB
1564 adapter->if_flags |= BE_IF_FLAGS_PROMISCUOUS;
1565}
1566
1567static void be_clear_uc_promisc(struct be_adapter *adapter)
1568{
1569 int status;
1570
1571 if (!(adapter->if_flags & BE_IF_FLAGS_PROMISCUOUS))
1572 return;
1573
1574 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_PROMISCUOUS, OFF);
1575 if (!status)
1576 adapter->if_flags &= ~BE_IF_FLAGS_PROMISCUOUS;
1577}
1578
1579/* The below 2 functions are the callback args for __dev_mc_sync/dev_uc_sync().
1580 * We use a single callback function for both sync and unsync. We really don't
1581 * add/remove addresses through this callback. But, we use it to detect changes
1582 * to the uc/mc lists. The entire uc/mc list is programmed in be_set_rx_mode().
1583 */
1584static int be_uc_list_update(struct net_device *netdev,
1585 const unsigned char *addr)
1586{
1587 struct be_adapter *adapter = netdev_priv(netdev);
1588
1589 adapter->update_uc_list = true;
1590 return 0;
1591}
1592
1593static int be_mc_list_update(struct net_device *netdev,
1594 const unsigned char *addr)
1595{
1596 struct be_adapter *adapter = netdev_priv(netdev);
1597
1598 adapter->update_mc_list = true;
1599 return 0;
1600}
1601
1602static void be_set_mc_list(struct be_adapter *adapter)
1603{
1604 struct net_device *netdev = adapter->netdev;
b7172414 1605 struct netdev_hw_addr *ha;
92fbb1df
SB
1606 bool mc_promisc = false;
1607 int status;
1608
b7172414 1609 netif_addr_lock_bh(netdev);
92fbb1df
SB
1610 __dev_mc_sync(netdev, be_mc_list_update, be_mc_list_update);
1611
1612 if (netdev->flags & IFF_PROMISC) {
1613 adapter->update_mc_list = false;
1614 } else if (netdev->flags & IFF_ALLMULTI ||
1615 netdev_mc_count(netdev) > be_max_mc(adapter)) {
1616 /* Enable multicast promisc if num configured exceeds
1617 * what we support
1618 */
1619 mc_promisc = true;
1620 adapter->update_mc_list = false;
1621 } else if (adapter->if_flags & BE_IF_FLAGS_MCAST_PROMISCUOUS) {
1622 /* Update mc-list unconditionally if the iface was previously
1623 * in mc-promisc mode and now is out of that mode.
1624 */
1625 adapter->update_mc_list = true;
1626 }
1627
b7172414
SP
1628 if (adapter->update_mc_list) {
1629 int i = 0;
1630
1631 /* cache the mc-list in adapter */
1632 netdev_for_each_mc_addr(ha, netdev) {
1633 ether_addr_copy(adapter->mc_list[i].mac, ha->addr);
1634 i++;
1635 }
1636 adapter->mc_count = netdev_mc_count(netdev);
1637 }
1638 netif_addr_unlock_bh(netdev);
1639
92fbb1df 1640 if (mc_promisc) {
f66b7cfd 1641 be_set_mc_promisc(adapter);
92fbb1df
SB
1642 } else if (adapter->update_mc_list) {
1643 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_MULTICAST, ON);
1644 if (!status)
1645 adapter->if_flags &= ~BE_IF_FLAGS_MCAST_PROMISCUOUS;
1646 else
1647 be_set_mc_promisc(adapter);
1648
1649 adapter->update_mc_list = false;
1650 }
1651}
1652
1653static void be_clear_mc_list(struct be_adapter *adapter)
1654{
1655 struct net_device *netdev = adapter->netdev;
1656
1657 __dev_mc_unsync(netdev, NULL);
1658 be_cmd_rx_filter(adapter, BE_IF_FLAGS_MULTICAST, OFF);
b7172414 1659 adapter->mc_count = 0;
f66b7cfd
SP
1660}
1661
988d44b1
SR
1662static int be_uc_mac_add(struct be_adapter *adapter, int uc_idx)
1663{
1d0f110a 1664 if (ether_addr_equal(adapter->uc_list[uc_idx].mac, adapter->dev_mac)) {
988d44b1
SR
1665 adapter->pmac_id[uc_idx + 1] = adapter->pmac_id[0];
1666 return 0;
1667 }
1668
1d0f110a 1669 return be_cmd_pmac_add(adapter, adapter->uc_list[uc_idx].mac,
988d44b1
SR
1670 adapter->if_handle,
1671 &adapter->pmac_id[uc_idx + 1], 0);
1672}
1673
1674static void be_uc_mac_del(struct be_adapter *adapter, int pmac_id)
1675{
1676 if (pmac_id == adapter->pmac_id[0])
1677 return;
1678
1679 be_cmd_pmac_del(adapter, adapter->if_handle, pmac_id, 0);
1680}
1681
f66b7cfd
SP
1682static void be_set_uc_list(struct be_adapter *adapter)
1683{
92fbb1df 1684 struct net_device *netdev = adapter->netdev;
f66b7cfd 1685 struct netdev_hw_addr *ha;
92fbb1df 1686 bool uc_promisc = false;
b7172414 1687 int curr_uc_macs = 0, i;
f66b7cfd 1688
b7172414 1689 netif_addr_lock_bh(netdev);
92fbb1df 1690 __dev_uc_sync(netdev, be_uc_list_update, be_uc_list_update);
f66b7cfd 1691
92fbb1df
SB
1692 if (netdev->flags & IFF_PROMISC) {
1693 adapter->update_uc_list = false;
1694 } else if (netdev_uc_count(netdev) > (be_max_uc(adapter) - 1)) {
1695 uc_promisc = true;
1696 adapter->update_uc_list = false;
1697 } else if (adapter->if_flags & BE_IF_FLAGS_PROMISCUOUS) {
1698 /* Update uc-list unconditionally if the iface was previously
1699 * in uc-promisc mode and now is out of that mode.
1700 */
1701 adapter->update_uc_list = true;
6b7c5b94
SP
1702 }
1703
b7172414 1704 if (adapter->update_uc_list) {
b7172414 1705 /* cache the uc-list in adapter array */
6052cd1a 1706 i = 0;
b7172414
SP
1707 netdev_for_each_uc_addr(ha, netdev) {
1708 ether_addr_copy(adapter->uc_list[i].mac, ha->addr);
1709 i++;
1710 }
1711 curr_uc_macs = netdev_uc_count(netdev);
1712 }
1713 netif_addr_unlock_bh(netdev);
1714
92fbb1df
SB
1715 if (uc_promisc) {
1716 be_set_uc_promisc(adapter);
1717 } else if (adapter->update_uc_list) {
1718 be_clear_uc_promisc(adapter);
1719
b7172414 1720 for (i = 0; i < adapter->uc_macs; i++)
988d44b1 1721 be_uc_mac_del(adapter, adapter->pmac_id[i + 1]);
92fbb1df 1722
b7172414 1723 for (i = 0; i < curr_uc_macs; i++)
988d44b1 1724 be_uc_mac_add(adapter, i);
b7172414 1725 adapter->uc_macs = curr_uc_macs;
92fbb1df 1726 adapter->update_uc_list = false;
f66b7cfd
SP
1727 }
1728}
6b7c5b94 1729
f66b7cfd
SP
1730static void be_clear_uc_list(struct be_adapter *adapter)
1731{
92fbb1df 1732 struct net_device *netdev = adapter->netdev;
f66b7cfd 1733 int i;
fbc13f01 1734
92fbb1df 1735 __dev_uc_unsync(netdev, NULL);
b7172414 1736 for (i = 0; i < adapter->uc_macs; i++)
988d44b1
SR
1737 be_uc_mac_del(adapter, adapter->pmac_id[i + 1]);
1738
f66b7cfd
SP
1739 adapter->uc_macs = 0;
1740}
fbc13f01 1741
b7172414 1742static void __be_set_rx_mode(struct be_adapter *adapter)
f66b7cfd 1743{
b7172414
SP
1744 struct net_device *netdev = adapter->netdev;
1745
1746 mutex_lock(&adapter->rx_filter_lock);
fbc13f01 1747
f66b7cfd 1748 if (netdev->flags & IFF_PROMISC) {
92fbb1df
SB
1749 if (!be_in_all_promisc(adapter))
1750 be_set_all_promisc(adapter);
1751 } else if (be_in_all_promisc(adapter)) {
1752 /* We need to re-program the vlan-list or clear
1753 * vlan-promisc mode (if needed) when the interface
1754 * comes out of promisc mode.
1755 */
1756 be_vid_config(adapter);
f66b7cfd 1757 }
a0794885 1758
92fbb1df 1759 be_set_uc_list(adapter);
f66b7cfd 1760 be_set_mc_list(adapter);
b7172414
SP
1761
1762 mutex_unlock(&adapter->rx_filter_lock);
1763}
1764
1765static void be_work_set_rx_mode(struct work_struct *work)
1766{
1767 struct be_cmd_work *cmd_work =
1768 container_of(work, struct be_cmd_work, work);
1769
1770 __be_set_rx_mode(cmd_work->adapter);
1771 kfree(cmd_work);
6b7c5b94
SP
1772}
1773
ba343c77
SB
1774static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
1775{
1776 struct be_adapter *adapter = netdev_priv(netdev);
11ac75ed 1777 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
ba343c77
SB
1778 int status;
1779
11ac75ed 1780 if (!sriov_enabled(adapter))
ba343c77
SB
1781 return -EPERM;
1782
11ac75ed 1783 if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
ba343c77
SB
1784 return -EINVAL;
1785
3c31aaf3
VV
1786 /* Proceed further only if user provided MAC is different
1787 * from active MAC
1788 */
1789 if (ether_addr_equal(mac, vf_cfg->mac_addr))
1790 return 0;
1791
3175d8c2
SP
1792 if (BEx_chip(adapter)) {
1793 be_cmd_pmac_del(adapter, vf_cfg->if_handle, vf_cfg->pmac_id,
1794 vf + 1);
ba343c77 1795
11ac75ed
SP
1796 status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
1797 &vf_cfg->pmac_id, vf + 1);
3175d8c2
SP
1798 } else {
1799 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
1800 vf + 1);
590c391d
PR
1801 }
1802
abccf23e
KA
1803 if (status) {
1804 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed: %#x",
1805 mac, vf, status);
1806 return be_cmd_status(status);
1807 }
64600ea5 1808
abccf23e
KA
1809 ether_addr_copy(vf_cfg->mac_addr, mac);
1810
1811 return 0;
ba343c77
SB
1812}
1813
64600ea5 1814static int be_get_vf_config(struct net_device *netdev, int vf,
748b539a 1815 struct ifla_vf_info *vi)
64600ea5
AK
1816{
1817 struct be_adapter *adapter = netdev_priv(netdev);
11ac75ed 1818 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
64600ea5 1819
11ac75ed 1820 if (!sriov_enabled(adapter))
64600ea5
AK
1821 return -EPERM;
1822
11ac75ed 1823 if (vf >= adapter->num_vfs)
64600ea5
AK
1824 return -EINVAL;
1825
1826 vi->vf = vf;
ed616689
SC
1827 vi->max_tx_rate = vf_cfg->tx_rate;
1828 vi->min_tx_rate = 0;
a60b3a13
AK
1829 vi->vlan = vf_cfg->vlan_tag & VLAN_VID_MASK;
1830 vi->qos = vf_cfg->vlan_tag >> VLAN_PRIO_SHIFT;
11ac75ed 1831 memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
bdce2ad7 1832 vi->linkstate = adapter->vf_cfg[vf].plink_tracking;
e7bcbd7b 1833 vi->spoofchk = adapter->vf_cfg[vf].spoofchk;
64600ea5
AK
1834
1835 return 0;
1836}
1837
435452aa
VV
1838static int be_set_vf_tvt(struct be_adapter *adapter, int vf, u16 vlan)
1839{
1840 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1841 u16 vids[BE_NUM_VLANS_SUPPORTED];
1842 int vf_if_id = vf_cfg->if_handle;
1843 int status;
1844
1845 /* Enable Transparent VLAN Tagging */
e7bcbd7b 1846 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1, vf_if_id, 0, 0);
435452aa
VV
1847 if (status)
1848 return status;
1849
1850 /* Clear pre-programmed VLAN filters on VF if any, if TVT is enabled */
1851 vids[0] = 0;
1852 status = be_cmd_vlan_config(adapter, vf_if_id, vids, 1, vf + 1);
1853 if (!status)
1854 dev_info(&adapter->pdev->dev,
1855 "Cleared guest VLANs on VF%d", vf);
1856
1857 /* After TVT is enabled, disallow VFs to program VLAN filters */
1858 if (vf_cfg->privileges & BE_PRIV_FILTMGMT) {
1859 status = be_cmd_set_fn_privileges(adapter, vf_cfg->privileges &
1860 ~BE_PRIV_FILTMGMT, vf + 1);
1861 if (!status)
1862 vf_cfg->privileges &= ~BE_PRIV_FILTMGMT;
1863 }
1864 return 0;
1865}
1866
1867static int be_clear_vf_tvt(struct be_adapter *adapter, int vf)
1868{
1869 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1870 struct device *dev = &adapter->pdev->dev;
1871 int status;
1872
1873 /* Reset Transparent VLAN Tagging. */
1874 status = be_cmd_set_hsw_config(adapter, BE_RESET_VLAN_TAG_ID, vf + 1,
e7bcbd7b 1875 vf_cfg->if_handle, 0, 0);
435452aa
VV
1876 if (status)
1877 return status;
1878
1879 /* Allow VFs to program VLAN filtering */
1880 if (!(vf_cfg->privileges & BE_PRIV_FILTMGMT)) {
1881 status = be_cmd_set_fn_privileges(adapter, vf_cfg->privileges |
1882 BE_PRIV_FILTMGMT, vf + 1);
1883 if (!status) {
1884 vf_cfg->privileges |= BE_PRIV_FILTMGMT;
1885 dev_info(dev, "VF%d: FILTMGMT priv enabled", vf);
1886 }
1887 }
1888
1889 dev_info(dev,
1890 "Disable/re-enable i/f in VM to clear Transparent VLAN tag");
1891 return 0;
1892}
1893
79aab093
MS
1894static int be_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos,
1895 __be16 vlan_proto)
1da87b7f
AK
1896{
1897 struct be_adapter *adapter = netdev_priv(netdev);
b9fc0e53 1898 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
435452aa 1899 int status;
1da87b7f 1900
11ac75ed 1901 if (!sriov_enabled(adapter))
1da87b7f
AK
1902 return -EPERM;
1903
b9fc0e53 1904 if (vf >= adapter->num_vfs || vlan > 4095 || qos > 7)
1da87b7f
AK
1905 return -EINVAL;
1906
79aab093
MS
1907 if (vlan_proto != htons(ETH_P_8021Q))
1908 return -EPROTONOSUPPORT;
1909
b9fc0e53
AK
1910 if (vlan || qos) {
1911 vlan |= qos << VLAN_PRIO_SHIFT;
435452aa 1912 status = be_set_vf_tvt(adapter, vf, vlan);
1da87b7f 1913 } else {
435452aa 1914 status = be_clear_vf_tvt(adapter, vf);
1da87b7f
AK
1915 }
1916
abccf23e
KA
1917 if (status) {
1918 dev_err(&adapter->pdev->dev,
435452aa
VV
1919 "VLAN %d config on VF %d failed : %#x\n", vlan, vf,
1920 status);
abccf23e
KA
1921 return be_cmd_status(status);
1922 }
1923
1924 vf_cfg->vlan_tag = vlan;
abccf23e 1925 return 0;
1da87b7f
AK
1926}
1927
ed616689
SC
1928static int be_set_vf_tx_rate(struct net_device *netdev, int vf,
1929 int min_tx_rate, int max_tx_rate)
e1d18735
AK
1930{
1931 struct be_adapter *adapter = netdev_priv(netdev);
0f77ba73
RN
1932 struct device *dev = &adapter->pdev->dev;
1933 int percent_rate, status = 0;
1934 u16 link_speed = 0;
1935 u8 link_status;
e1d18735 1936
11ac75ed 1937 if (!sriov_enabled(adapter))
e1d18735
AK
1938 return -EPERM;
1939
94f434c2 1940 if (vf >= adapter->num_vfs)
e1d18735
AK
1941 return -EINVAL;
1942
ed616689
SC
1943 if (min_tx_rate)
1944 return -EINVAL;
1945
0f77ba73
RN
1946 if (!max_tx_rate)
1947 goto config_qos;
1948
1949 status = be_cmd_link_status_query(adapter, &link_speed,
1950 &link_status, 0);
1951 if (status)
1952 goto err;
1953
1954 if (!link_status) {
1955 dev_err(dev, "TX-rate setting not allowed when link is down\n");
940a3fcd 1956 status = -ENETDOWN;
0f77ba73
RN
1957 goto err;
1958 }
1959
1960 if (max_tx_rate < 100 || max_tx_rate > link_speed) {
1961 dev_err(dev, "TX-rate must be between 100 and %d Mbps\n",
1962 link_speed);
1963 status = -EINVAL;
1964 goto err;
1965 }
1966
1967 /* On Skyhawk the QOS setting must be done only as a % value */
1968 percent_rate = link_speed / 100;
1969 if (skyhawk_chip(adapter) && (max_tx_rate % percent_rate)) {
1970 dev_err(dev, "TX-rate must be a multiple of %d Mbps\n",
1971 percent_rate);
1972 status = -EINVAL;
1973 goto err;
94f434c2 1974 }
e1d18735 1975
0f77ba73
RN
1976config_qos:
1977 status = be_cmd_config_qos(adapter, max_tx_rate, link_speed, vf + 1);
e1d18735 1978 if (status)
0f77ba73
RN
1979 goto err;
1980
1981 adapter->vf_cfg[vf].tx_rate = max_tx_rate;
1982 return 0;
1983
1984err:
1985 dev_err(dev, "TX-rate setting of %dMbps on VF%d failed\n",
1986 max_tx_rate, vf);
abccf23e 1987 return be_cmd_status(status);
e1d18735 1988}
e2fb1afa 1989
bdce2ad7
SR
1990static int be_set_vf_link_state(struct net_device *netdev, int vf,
1991 int link_state)
1992{
1993 struct be_adapter *adapter = netdev_priv(netdev);
1994 int status;
1995
1996 if (!sriov_enabled(adapter))
1997 return -EPERM;
1998
1999 if (vf >= adapter->num_vfs)
2000 return -EINVAL;
2001
2002 status = be_cmd_set_logical_link_config(adapter, link_state, vf+1);
abccf23e
KA
2003 if (status) {
2004 dev_err(&adapter->pdev->dev,
2005 "Link state change on VF %d failed: %#x\n", vf, status);
2006 return be_cmd_status(status);
2007 }
bdce2ad7 2008
abccf23e
KA
2009 adapter->vf_cfg[vf].plink_tracking = link_state;
2010
2011 return 0;
bdce2ad7 2012}
e1d18735 2013
e7bcbd7b
KA
2014static int be_set_vf_spoofchk(struct net_device *netdev, int vf, bool enable)
2015{
2016 struct be_adapter *adapter = netdev_priv(netdev);
2017 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
2018 u8 spoofchk;
2019 int status;
2020
2021 if (!sriov_enabled(adapter))
2022 return -EPERM;
2023
2024 if (vf >= adapter->num_vfs)
2025 return -EINVAL;
2026
2027 if (BEx_chip(adapter))
2028 return -EOPNOTSUPP;
2029
2030 if (enable == vf_cfg->spoofchk)
2031 return 0;
2032
2033 spoofchk = enable ? ENABLE_MAC_SPOOFCHK : DISABLE_MAC_SPOOFCHK;
2034
2035 status = be_cmd_set_hsw_config(adapter, 0, vf + 1, vf_cfg->if_handle,
2036 0, spoofchk);
2037 if (status) {
2038 dev_err(&adapter->pdev->dev,
2039 "Spoofchk change on VF %d failed: %#x\n", vf, status);
2040 return be_cmd_status(status);
2041 }
2042
2043 vf_cfg->spoofchk = enable;
2044 return 0;
2045}
2046
2632bafd
SP
2047static void be_aic_update(struct be_aic_obj *aic, u64 rx_pkts, u64 tx_pkts,
2048 ulong now)
6b7c5b94 2049{
2632bafd
SP
2050 aic->rx_pkts_prev = rx_pkts;
2051 aic->tx_reqs_prev = tx_pkts;
2052 aic->jiffies = now;
2053}
ac124ff9 2054
20947770 2055static int be_get_new_eqd(struct be_eq_obj *eqo)
2632bafd 2056{
20947770
PR
2057 struct be_adapter *adapter = eqo->adapter;
2058 int eqd, start;
2632bafd 2059 struct be_aic_obj *aic;
2632bafd
SP
2060 struct be_rx_obj *rxo;
2061 struct be_tx_obj *txo;
20947770 2062 u64 rx_pkts = 0, tx_pkts = 0;
2632bafd
SP
2063 ulong now;
2064 u32 pps, delta;
20947770 2065 int i;
10ef9ab4 2066
20947770
PR
2067 aic = &adapter->aic_obj[eqo->idx];
2068 if (!aic->enable) {
2069 if (aic->jiffies)
2070 aic->jiffies = 0;
2071 eqd = aic->et_eqd;
2072 return eqd;
2073 }
6b7c5b94 2074
20947770 2075 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
2632bafd 2076 do {
57a7744e 2077 start = u64_stats_fetch_begin_irq(&rxo->stats.sync);
20947770 2078 rx_pkts += rxo->stats.rx_pkts;
57a7744e 2079 } while (u64_stats_fetch_retry_irq(&rxo->stats.sync, start));
20947770 2080 }
10ef9ab4 2081
20947770 2082 for_all_tx_queues_on_eq(adapter, eqo, txo, i) {
2632bafd 2083 do {
57a7744e 2084 start = u64_stats_fetch_begin_irq(&txo->stats.sync);
20947770 2085 tx_pkts += txo->stats.tx_reqs;
57a7744e 2086 } while (u64_stats_fetch_retry_irq(&txo->stats.sync, start));
20947770 2087 }
6b7c5b94 2088
20947770
PR
2089 /* Skip, if wrapped around or first calculation */
2090 now = jiffies;
2091 if (!aic->jiffies || time_before(now, aic->jiffies) ||
2092 rx_pkts < aic->rx_pkts_prev ||
2093 tx_pkts < aic->tx_reqs_prev) {
2094 be_aic_update(aic, rx_pkts, tx_pkts, now);
2095 return aic->prev_eqd;
2096 }
2632bafd 2097
20947770
PR
2098 delta = jiffies_to_msecs(now - aic->jiffies);
2099 if (delta == 0)
2100 return aic->prev_eqd;
10ef9ab4 2101
20947770
PR
2102 pps = (((u32)(rx_pkts - aic->rx_pkts_prev) * 1000) / delta) +
2103 (((u32)(tx_pkts - aic->tx_reqs_prev) * 1000) / delta);
2104 eqd = (pps / 15000) << 2;
2632bafd 2105
20947770
PR
2106 if (eqd < 8)
2107 eqd = 0;
2108 eqd = min_t(u32, eqd, aic->max_eqd);
2109 eqd = max_t(u32, eqd, aic->min_eqd);
2110
2111 be_aic_update(aic, rx_pkts, tx_pkts, now);
2112
2113 return eqd;
2114}
2115
2116/* For Skyhawk-R only */
2117static u32 be_get_eq_delay_mult_enc(struct be_eq_obj *eqo)
2118{
2119 struct be_adapter *adapter = eqo->adapter;
2120 struct be_aic_obj *aic = &adapter->aic_obj[eqo->idx];
2121 ulong now = jiffies;
2122 int eqd;
2123 u32 mult_enc;
2124
2125 if (!aic->enable)
2126 return 0;
2127
3c0d49aa 2128 if (jiffies_to_msecs(now - aic->jiffies) < 1)
20947770
PR
2129 eqd = aic->prev_eqd;
2130 else
2131 eqd = be_get_new_eqd(eqo);
2132
2133 if (eqd > 100)
2134 mult_enc = R2I_DLY_ENC_1;
2135 else if (eqd > 60)
2136 mult_enc = R2I_DLY_ENC_2;
2137 else if (eqd > 20)
2138 mult_enc = R2I_DLY_ENC_3;
2139 else
2140 mult_enc = R2I_DLY_ENC_0;
2141
2142 aic->prev_eqd = eqd;
2143
2144 return mult_enc;
2145}
2146
2147void be_eqd_update(struct be_adapter *adapter, bool force_update)
2148{
2149 struct be_set_eqd set_eqd[MAX_EVT_QS];
2150 struct be_aic_obj *aic;
2151 struct be_eq_obj *eqo;
2152 int i, num = 0, eqd;
2153
2154 for_all_evt_queues(adapter, eqo, i) {
2155 aic = &adapter->aic_obj[eqo->idx];
2156 eqd = be_get_new_eqd(eqo);
2157 if (force_update || eqd != aic->prev_eqd) {
2632bafd
SP
2158 set_eqd[num].delay_multiplier = (eqd * 65)/100;
2159 set_eqd[num].eq_id = eqo->q.id;
2160 aic->prev_eqd = eqd;
2161 num++;
2162 }
ac124ff9 2163 }
2632bafd
SP
2164
2165 if (num)
2166 be_cmd_modify_eqd(adapter, set_eqd, num);
6b7c5b94
SP
2167}
2168
3abcdeda 2169static void be_rx_stats_update(struct be_rx_obj *rxo,
748b539a 2170 struct be_rx_compl_info *rxcp)
4097f663 2171{
ac124ff9 2172 struct be_rx_stats *stats = rx_stats(rxo);
1ef78abe 2173
ab1594e9 2174 u64_stats_update_begin(&stats->sync);
3abcdeda 2175 stats->rx_compl++;
2e588f84 2176 stats->rx_bytes += rxcp->pkt_size;
3abcdeda 2177 stats->rx_pkts++;
8670f2a5
SB
2178 if (rxcp->tunneled)
2179 stats->rx_vxlan_offload_pkts++;
2e588f84 2180 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
3abcdeda 2181 stats->rx_mcast_pkts++;
2e588f84 2182 if (rxcp->err)
ac124ff9 2183 stats->rx_compl_err++;
ab1594e9 2184 u64_stats_update_end(&stats->sync);
4097f663
SP
2185}
2186
2e588f84 2187static inline bool csum_passed(struct be_rx_compl_info *rxcp)
728a9972 2188{
19fad86f 2189 /* L4 checksum is not reliable for non TCP/UDP packets.
c9c47142
SP
2190 * Also ignore ipcksm for ipv6 pkts
2191 */
2e588f84 2192 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
c9c47142 2193 (rxcp->ip_csum || rxcp->ipv6) && !rxcp->err;
728a9972
AK
2194}
2195
0b0ef1d0 2196static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo)
6b7c5b94 2197{
10ef9ab4 2198 struct be_adapter *adapter = rxo->adapter;
6b7c5b94 2199 struct be_rx_page_info *rx_page_info;
3abcdeda 2200 struct be_queue_info *rxq = &rxo->q;
b0fd2eb2 2201 u32 frag_idx = rxq->tail;
6b7c5b94 2202
3abcdeda 2203 rx_page_info = &rxo->page_info_tbl[frag_idx];
6b7c5b94
SP
2204 BUG_ON(!rx_page_info->page);
2205
e50287be 2206 if (rx_page_info->last_frag) {
2b7bcebf
IV
2207 dma_unmap_page(&adapter->pdev->dev,
2208 dma_unmap_addr(rx_page_info, bus),
2209 adapter->big_page_size, DMA_FROM_DEVICE);
e50287be
SP
2210 rx_page_info->last_frag = false;
2211 } else {
2212 dma_sync_single_for_cpu(&adapter->pdev->dev,
2213 dma_unmap_addr(rx_page_info, bus),
2214 rx_frag_size, DMA_FROM_DEVICE);
205859a2 2215 }
6b7c5b94 2216
0b0ef1d0 2217 queue_tail_inc(rxq);
6b7c5b94
SP
2218 atomic_dec(&rxq->used);
2219 return rx_page_info;
2220}
2221
2222/* Throwaway the data in the Rx completion */
10ef9ab4
SP
2223static void be_rx_compl_discard(struct be_rx_obj *rxo,
2224 struct be_rx_compl_info *rxcp)
6b7c5b94 2225{
6b7c5b94 2226 struct be_rx_page_info *page_info;
2e588f84 2227 u16 i, num_rcvd = rxcp->num_rcvd;
6b7c5b94 2228
e80d9da6 2229 for (i = 0; i < num_rcvd; i++) {
0b0ef1d0 2230 page_info = get_rx_page_info(rxo);
e80d9da6
PR
2231 put_page(page_info->page);
2232 memset(page_info, 0, sizeof(*page_info));
6b7c5b94
SP
2233 }
2234}
2235
2236/*
2237 * skb_fill_rx_data forms a complete skb for an ether frame
2238 * indicated by rxcp.
2239 */
10ef9ab4
SP
2240static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
2241 struct be_rx_compl_info *rxcp)
6b7c5b94 2242{
6b7c5b94 2243 struct be_rx_page_info *page_info;
2e588f84
SP
2244 u16 i, j;
2245 u16 hdr_len, curr_frag_len, remaining;
6b7c5b94 2246 u8 *start;
6b7c5b94 2247
0b0ef1d0 2248 page_info = get_rx_page_info(rxo);
6b7c5b94
SP
2249 start = page_address(page_info->page) + page_info->page_offset;
2250 prefetch(start);
2251
2252 /* Copy data in the first descriptor of this completion */
2e588f84 2253 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
6b7c5b94 2254
6b7c5b94
SP
2255 skb->len = curr_frag_len;
2256 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
ac1ae5f3 2257 memcpy(skb->data, start, curr_frag_len);
6b7c5b94
SP
2258 /* Complete packet has now been moved to data */
2259 put_page(page_info->page);
2260 skb->data_len = 0;
2261 skb->tail += curr_frag_len;
2262 } else {
ac1ae5f3
ED
2263 hdr_len = ETH_HLEN;
2264 memcpy(skb->data, start, hdr_len);
6b7c5b94 2265 skb_shinfo(skb)->nr_frags = 1;
b061b39e 2266 skb_frag_set_page(skb, 0, page_info->page);
6b7c5b94
SP
2267 skb_shinfo(skb)->frags[0].page_offset =
2268 page_info->page_offset + hdr_len;
748b539a
SP
2269 skb_frag_size_set(&skb_shinfo(skb)->frags[0],
2270 curr_frag_len - hdr_len);
6b7c5b94 2271 skb->data_len = curr_frag_len - hdr_len;
bdb28a97 2272 skb->truesize += rx_frag_size;
6b7c5b94
SP
2273 skb->tail += hdr_len;
2274 }
205859a2 2275 page_info->page = NULL;
6b7c5b94 2276
2e588f84
SP
2277 if (rxcp->pkt_size <= rx_frag_size) {
2278 BUG_ON(rxcp->num_rcvd != 1);
2279 return;
6b7c5b94
SP
2280 }
2281
2282 /* More frags present for this completion */
2e588f84
SP
2283 remaining = rxcp->pkt_size - curr_frag_len;
2284 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
0b0ef1d0 2285 page_info = get_rx_page_info(rxo);
2e588f84 2286 curr_frag_len = min(remaining, rx_frag_size);
6b7c5b94 2287
bd46cb6c
AK
2288 /* Coalesce all frags from the same physical page in one slot */
2289 if (page_info->page_offset == 0) {
2290 /* Fresh page */
2291 j++;
b061b39e 2292 skb_frag_set_page(skb, j, page_info->page);
bd46cb6c
AK
2293 skb_shinfo(skb)->frags[j].page_offset =
2294 page_info->page_offset;
9e903e08 2295 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
bd46cb6c
AK
2296 skb_shinfo(skb)->nr_frags++;
2297 } else {
2298 put_page(page_info->page);
2299 }
2300
9e903e08 2301 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
6b7c5b94
SP
2302 skb->len += curr_frag_len;
2303 skb->data_len += curr_frag_len;
bdb28a97 2304 skb->truesize += rx_frag_size;
2e588f84 2305 remaining -= curr_frag_len;
205859a2 2306 page_info->page = NULL;
6b7c5b94 2307 }
bd46cb6c 2308 BUG_ON(j > MAX_SKB_FRAGS);
6b7c5b94
SP
2309}
2310
5be93b9a 2311/* Process the RX completion indicated by rxcp when GRO is disabled */
6384a4d0 2312static void be_rx_compl_process(struct be_rx_obj *rxo, struct napi_struct *napi,
10ef9ab4 2313 struct be_rx_compl_info *rxcp)
6b7c5b94 2314{
10ef9ab4 2315 struct be_adapter *adapter = rxo->adapter;
6332c8d3 2316 struct net_device *netdev = adapter->netdev;
6b7c5b94 2317 struct sk_buff *skb;
89420424 2318
bb349bb4 2319 skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE);
a058a632 2320 if (unlikely(!skb)) {
ac124ff9 2321 rx_stats(rxo)->rx_drops_no_skbs++;
10ef9ab4 2322 be_rx_compl_discard(rxo, rxcp);
6b7c5b94
SP
2323 return;
2324 }
2325
10ef9ab4 2326 skb_fill_rx_data(rxo, skb, rxcp);
6b7c5b94 2327
6332c8d3 2328 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
728a9972 2329 skb->ip_summed = CHECKSUM_UNNECESSARY;
c6ce2f4b
SK
2330 else
2331 skb_checksum_none_assert(skb);
6b7c5b94 2332
6332c8d3 2333 skb->protocol = eth_type_trans(skb, netdev);
aaa6daec 2334 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
10ef9ab4 2335 if (netdev->features & NETIF_F_RXHASH)
d2464c8c 2336 skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
c9c47142 2337
b6c0e89d 2338 skb->csum_level = rxcp->tunneled;
6384a4d0 2339 skb_mark_napi_id(skb, napi);
6b7c5b94 2340
343e43c0 2341 if (rxcp->vlanf)
86a9bad3 2342 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
4c5102f9
AK
2343
2344 netif_receive_skb(skb);
6b7c5b94
SP
2345}
2346
5be93b9a 2347/* Process the RX completion indicated by rxcp when GRO is enabled */
4188e7df
JH
2348static void be_rx_compl_process_gro(struct be_rx_obj *rxo,
2349 struct napi_struct *napi,
2350 struct be_rx_compl_info *rxcp)
6b7c5b94 2351{
10ef9ab4 2352 struct be_adapter *adapter = rxo->adapter;
6b7c5b94 2353 struct be_rx_page_info *page_info;
5be93b9a 2354 struct sk_buff *skb = NULL;
2e588f84
SP
2355 u16 remaining, curr_frag_len;
2356 u16 i, j;
3968fa1e 2357
10ef9ab4 2358 skb = napi_get_frags(napi);
5be93b9a 2359 if (!skb) {
10ef9ab4 2360 be_rx_compl_discard(rxo, rxcp);
5be93b9a
AK
2361 return;
2362 }
2363
2e588f84
SP
2364 remaining = rxcp->pkt_size;
2365 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
0b0ef1d0 2366 page_info = get_rx_page_info(rxo);
6b7c5b94
SP
2367
2368 curr_frag_len = min(remaining, rx_frag_size);
2369
bd46cb6c
AK
2370 /* Coalesce all frags from the same physical page in one slot */
2371 if (i == 0 || page_info->page_offset == 0) {
2372 /* First frag or Fresh page */
2373 j++;
b061b39e 2374 skb_frag_set_page(skb, j, page_info->page);
5be93b9a
AK
2375 skb_shinfo(skb)->frags[j].page_offset =
2376 page_info->page_offset;
9e903e08 2377 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
bd46cb6c
AK
2378 } else {
2379 put_page(page_info->page);
2380 }
9e903e08 2381 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
bdb28a97 2382 skb->truesize += rx_frag_size;
bd46cb6c 2383 remaining -= curr_frag_len;
6b7c5b94
SP
2384 memset(page_info, 0, sizeof(*page_info));
2385 }
bd46cb6c 2386 BUG_ON(j > MAX_SKB_FRAGS);
6b7c5b94 2387
5be93b9a 2388 skb_shinfo(skb)->nr_frags = j + 1;
2e588f84
SP
2389 skb->len = rxcp->pkt_size;
2390 skb->data_len = rxcp->pkt_size;
5be93b9a 2391 skb->ip_summed = CHECKSUM_UNNECESSARY;
aaa6daec 2392 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
4b972914 2393 if (adapter->netdev->features & NETIF_F_RXHASH)
d2464c8c 2394 skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
c9c47142 2395
b6c0e89d 2396 skb->csum_level = rxcp->tunneled;
5be93b9a 2397
343e43c0 2398 if (rxcp->vlanf)
86a9bad3 2399 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
4c5102f9 2400
10ef9ab4 2401 napi_gro_frags(napi);
2e588f84
SP
2402}
2403
10ef9ab4
SP
2404static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
2405 struct be_rx_compl_info *rxcp)
2e588f84 2406{
c3c18bc1
SP
2407 rxcp->pkt_size = GET_RX_COMPL_V1_BITS(pktsize, compl);
2408 rxcp->vlanf = GET_RX_COMPL_V1_BITS(vtp, compl);
2409 rxcp->err = GET_RX_COMPL_V1_BITS(err, compl);
2410 rxcp->tcpf = GET_RX_COMPL_V1_BITS(tcpf, compl);
2411 rxcp->udpf = GET_RX_COMPL_V1_BITS(udpf, compl);
2412 rxcp->ip_csum = GET_RX_COMPL_V1_BITS(ipcksm, compl);
2413 rxcp->l4_csum = GET_RX_COMPL_V1_BITS(l4_cksm, compl);
2414 rxcp->ipv6 = GET_RX_COMPL_V1_BITS(ip_version, compl);
2415 rxcp->num_rcvd = GET_RX_COMPL_V1_BITS(numfrags, compl);
2416 rxcp->pkt_type = GET_RX_COMPL_V1_BITS(cast_enc, compl);
2417 rxcp->rss_hash = GET_RX_COMPL_V1_BITS(rsshash, compl);
15d72184 2418 if (rxcp->vlanf) {
c3c18bc1
SP
2419 rxcp->qnq = GET_RX_COMPL_V1_BITS(qnq, compl);
2420 rxcp->vlan_tag = GET_RX_COMPL_V1_BITS(vlan_tag, compl);
15d72184 2421 }
c3c18bc1 2422 rxcp->port = GET_RX_COMPL_V1_BITS(port, compl);
c9c47142 2423 rxcp->tunneled =
c3c18bc1 2424 GET_RX_COMPL_V1_BITS(tunneled, compl);
2e588f84
SP
2425}
2426
10ef9ab4
SP
2427static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
2428 struct be_rx_compl_info *rxcp)
2e588f84 2429{
c3c18bc1
SP
2430 rxcp->pkt_size = GET_RX_COMPL_V0_BITS(pktsize, compl);
2431 rxcp->vlanf = GET_RX_COMPL_V0_BITS(vtp, compl);
2432 rxcp->err = GET_RX_COMPL_V0_BITS(err, compl);
2433 rxcp->tcpf = GET_RX_COMPL_V0_BITS(tcpf, compl);
2434 rxcp->udpf = GET_RX_COMPL_V0_BITS(udpf, compl);
2435 rxcp->ip_csum = GET_RX_COMPL_V0_BITS(ipcksm, compl);
2436 rxcp->l4_csum = GET_RX_COMPL_V0_BITS(l4_cksm, compl);
2437 rxcp->ipv6 = GET_RX_COMPL_V0_BITS(ip_version, compl);
2438 rxcp->num_rcvd = GET_RX_COMPL_V0_BITS(numfrags, compl);
2439 rxcp->pkt_type = GET_RX_COMPL_V0_BITS(cast_enc, compl);
2440 rxcp->rss_hash = GET_RX_COMPL_V0_BITS(rsshash, compl);
15d72184 2441 if (rxcp->vlanf) {
c3c18bc1
SP
2442 rxcp->qnq = GET_RX_COMPL_V0_BITS(qnq, compl);
2443 rxcp->vlan_tag = GET_RX_COMPL_V0_BITS(vlan_tag, compl);
15d72184 2444 }
c3c18bc1
SP
2445 rxcp->port = GET_RX_COMPL_V0_BITS(port, compl);
2446 rxcp->ip_frag = GET_RX_COMPL_V0_BITS(ip_frag, compl);
2e588f84
SP
2447}
2448
2449static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
2450{
2451 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
2452 struct be_rx_compl_info *rxcp = &rxo->rxcp;
2453 struct be_adapter *adapter = rxo->adapter;
6b7c5b94 2454
2e588f84
SP
2455 /* For checking the valid bit it is Ok to use either definition as the
2456 * valid bit is at the same position in both v0 and v1 Rx compl */
2457 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
2458 return NULL;
6b7c5b94 2459
2e588f84
SP
2460 rmb();
2461 be_dws_le_to_cpu(compl, sizeof(*compl));
6b7c5b94 2462
2e588f84 2463 if (adapter->be3_native)
10ef9ab4 2464 be_parse_rx_compl_v1(compl, rxcp);
2e588f84 2465 else
10ef9ab4 2466 be_parse_rx_compl_v0(compl, rxcp);
6b7c5b94 2467
e38b1706
SK
2468 if (rxcp->ip_frag)
2469 rxcp->l4_csum = 0;
2470
15d72184 2471 if (rxcp->vlanf) {
f93f160b
VV
2472 /* In QNQ modes, if qnq bit is not set, then the packet was
2473 * tagged only with the transparent outer vlan-tag and must
2474 * not be treated as a vlan packet by host
2475 */
2476 if (be_is_qnq_mode(adapter) && !rxcp->qnq)
15d72184 2477 rxcp->vlanf = 0;
6b7c5b94 2478
15d72184 2479 if (!lancer_chip(adapter))
3c709f8f 2480 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
6b7c5b94 2481
939cf306 2482 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
f6cbd364 2483 !test_bit(rxcp->vlan_tag, adapter->vids))
15d72184
SP
2484 rxcp->vlanf = 0;
2485 }
2e588f84
SP
2486
2487 /* As the compl has been parsed, reset it; we wont touch it again */
2488 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
6b7c5b94 2489
3abcdeda 2490 queue_tail_inc(&rxo->cq);
6b7c5b94
SP
2491 return rxcp;
2492}
2493
1829b086 2494static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
6b7c5b94 2495{
6b7c5b94 2496 u32 order = get_order(size);
1829b086 2497
6b7c5b94 2498 if (order > 0)
1829b086
ED
2499 gfp |= __GFP_COMP;
2500 return alloc_pages(gfp, order);
6b7c5b94
SP
2501}
2502
2503/*
2504 * Allocate a page, split it to fragments of size rx_frag_size and post as
2505 * receive buffers to BE
2506 */
c30d7266 2507static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp, u32 frags_needed)
6b7c5b94 2508{
3abcdeda 2509 struct be_adapter *adapter = rxo->adapter;
26d92f92 2510 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
3abcdeda 2511 struct be_queue_info *rxq = &rxo->q;
6b7c5b94 2512 struct page *pagep = NULL;
ba42fad0 2513 struct device *dev = &adapter->pdev->dev;
6b7c5b94
SP
2514 struct be_eth_rx_d *rxd;
2515 u64 page_dmaaddr = 0, frag_dmaaddr;
c30d7266 2516 u32 posted, page_offset = 0, notify = 0;
6b7c5b94 2517
3abcdeda 2518 page_info = &rxo->page_info_tbl[rxq->head];
c30d7266 2519 for (posted = 0; posted < frags_needed && !page_info->page; posted++) {
6b7c5b94 2520 if (!pagep) {
1829b086 2521 pagep = be_alloc_pages(adapter->big_page_size, gfp);
6b7c5b94 2522 if (unlikely(!pagep)) {
ac124ff9 2523 rx_stats(rxo)->rx_post_fail++;
6b7c5b94
SP
2524 break;
2525 }
ba42fad0
IV
2526 page_dmaaddr = dma_map_page(dev, pagep, 0,
2527 adapter->big_page_size,
2b7bcebf 2528 DMA_FROM_DEVICE);
ba42fad0
IV
2529 if (dma_mapping_error(dev, page_dmaaddr)) {
2530 put_page(pagep);
2531 pagep = NULL;
d3de1540 2532 adapter->drv_stats.dma_map_errors++;
ba42fad0
IV
2533 break;
2534 }
e50287be 2535 page_offset = 0;
6b7c5b94
SP
2536 } else {
2537 get_page(pagep);
e50287be 2538 page_offset += rx_frag_size;
6b7c5b94 2539 }
e50287be 2540 page_info->page_offset = page_offset;
6b7c5b94 2541 page_info->page = pagep;
6b7c5b94
SP
2542
2543 rxd = queue_head_node(rxq);
e50287be 2544 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
6b7c5b94
SP
2545 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
2546 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
6b7c5b94
SP
2547
2548 /* Any space left in the current big page for another frag? */
2549 if ((page_offset + rx_frag_size + rx_frag_size) >
2550 adapter->big_page_size) {
2551 pagep = NULL;
e50287be
SP
2552 page_info->last_frag = true;
2553 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
2554 } else {
2555 dma_unmap_addr_set(page_info, bus, frag_dmaaddr);
6b7c5b94 2556 }
26d92f92
SP
2557
2558 prev_page_info = page_info;
2559 queue_head_inc(rxq);
10ef9ab4 2560 page_info = &rxo->page_info_tbl[rxq->head];
6b7c5b94 2561 }
e50287be
SP
2562
2563 /* Mark the last frag of a page when we break out of the above loop
2564 * with no more slots available in the RXQ
2565 */
2566 if (pagep) {
2567 prev_page_info->last_frag = true;
2568 dma_unmap_addr_set(prev_page_info, bus, page_dmaaddr);
2569 }
6b7c5b94
SP
2570
2571 if (posted) {
6b7c5b94 2572 atomic_add(posted, &rxq->used);
6384a4d0
SP
2573 if (rxo->rx_post_starved)
2574 rxo->rx_post_starved = false;
c30d7266 2575 do {
69304cc9 2576 notify = min(MAX_NUM_POST_ERX_DB, posted);
c30d7266
AK
2577 be_rxq_notify(adapter, rxq->id, notify);
2578 posted -= notify;
2579 } while (posted);
ea1dae11
SP
2580 } else if (atomic_read(&rxq->used) == 0) {
2581 /* Let be_worker replenish when memory is available */
3abcdeda 2582 rxo->rx_post_starved = true;
6b7c5b94 2583 }
6b7c5b94
SP
2584}
2585
ffc39620
SR
2586static inline void be_update_tx_err(struct be_tx_obj *txo, u8 status)
2587{
2588 switch (status) {
2589 case BE_TX_COMP_HDR_PARSE_ERR:
2590 tx_stats(txo)->tx_hdr_parse_err++;
2591 break;
2592 case BE_TX_COMP_NDMA_ERR:
2593 tx_stats(txo)->tx_dma_err++;
2594 break;
2595 case BE_TX_COMP_ACL_ERR:
2596 tx_stats(txo)->tx_spoof_check_err++;
2597 break;
2598 }
2599}
2600
2601static inline void lancer_update_tx_err(struct be_tx_obj *txo, u8 status)
2602{
2603 switch (status) {
2604 case LANCER_TX_COMP_LSO_ERR:
2605 tx_stats(txo)->tx_tso_err++;
2606 break;
2607 case LANCER_TX_COMP_HSW_DROP_MAC_ERR:
2608 case LANCER_TX_COMP_HSW_DROP_VLAN_ERR:
2609 tx_stats(txo)->tx_spoof_check_err++;
2610 break;
2611 case LANCER_TX_COMP_QINQ_ERR:
2612 tx_stats(txo)->tx_qinq_err++;
2613 break;
2614 case LANCER_TX_COMP_PARITY_ERR:
2615 tx_stats(txo)->tx_internal_parity_err++;
2616 break;
2617 case LANCER_TX_COMP_DMA_ERR:
2618 tx_stats(txo)->tx_dma_err++;
2619 break;
2620 case LANCER_TX_COMP_SGE_ERR:
2621 tx_stats(txo)->tx_sge_err++;
2622 break;
2623 }
2624}
2625
2626static struct be_tx_compl_info *be_tx_compl_get(struct be_adapter *adapter,
2627 struct be_tx_obj *txo)
6b7c5b94 2628{
152ffe5b
SB
2629 struct be_queue_info *tx_cq = &txo->cq;
2630 struct be_tx_compl_info *txcp = &txo->txcp;
2631 struct be_eth_tx_compl *compl = queue_tail_node(tx_cq);
6b7c5b94 2632
152ffe5b 2633 if (compl->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
6b7c5b94
SP
2634 return NULL;
2635
152ffe5b 2636 /* Ensure load ordering of valid bit dword and other dwords below */
f3eb62d2 2637 rmb();
152ffe5b 2638 be_dws_le_to_cpu(compl, sizeof(*compl));
6b7c5b94 2639
152ffe5b
SB
2640 txcp->status = GET_TX_COMPL_BITS(status, compl);
2641 txcp->end_index = GET_TX_COMPL_BITS(wrb_index, compl);
6b7c5b94 2642
ffc39620
SR
2643 if (txcp->status) {
2644 if (lancer_chip(adapter)) {
2645 lancer_update_tx_err(txo, txcp->status);
2646 /* Reset the adapter incase of TSO,
2647 * SGE or Parity error
2648 */
2649 if (txcp->status == LANCER_TX_COMP_LSO_ERR ||
2650 txcp->status == LANCER_TX_COMP_PARITY_ERR ||
2651 txcp->status == LANCER_TX_COMP_SGE_ERR)
2652 be_set_error(adapter, BE_ERROR_TX);
2653 } else {
2654 be_update_tx_err(txo, txcp->status);
2655 }
2656 }
2657
2658 if (be_check_error(adapter, BE_ERROR_TX))
2659 return NULL;
2660
152ffe5b 2661 compl->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
6b7c5b94
SP
2662 queue_tail_inc(tx_cq);
2663 return txcp;
2664}
2665
3c8def97 2666static u16 be_tx_compl_process(struct be_adapter *adapter,
748b539a 2667 struct be_tx_obj *txo, u16 last_index)
6b7c5b94 2668{
5f07b3c5 2669 struct sk_buff **sent_skbs = txo->sent_skb_list;
3c8def97 2670 struct be_queue_info *txq = &txo->q;
5f07b3c5
SP
2671 struct sk_buff *skb = NULL;
2672 bool unmap_skb_hdr = false;
a73b796e 2673 struct be_eth_wrb *wrb;
b0fd2eb2 2674 u16 num_wrbs = 0;
2675 u32 frag_index;
6b7c5b94 2676
ec43b1a6 2677 do {
5f07b3c5
SP
2678 if (sent_skbs[txq->tail]) {
2679 /* Free skb from prev req */
2680 if (skb)
2681 dev_consume_skb_any(skb);
2682 skb = sent_skbs[txq->tail];
2683 sent_skbs[txq->tail] = NULL;
2684 queue_tail_inc(txq); /* skip hdr wrb */
2685 num_wrbs++;
2686 unmap_skb_hdr = true;
2687 }
a73b796e 2688 wrb = queue_tail_node(txq);
5f07b3c5 2689 frag_index = txq->tail;
2b7bcebf 2690 unmap_tx_frag(&adapter->pdev->dev, wrb,
5f07b3c5 2691 (unmap_skb_hdr && skb_headlen(skb)));
ec43b1a6 2692 unmap_skb_hdr = false;
6b7c5b94 2693 queue_tail_inc(txq);
5f07b3c5
SP
2694 num_wrbs++;
2695 } while (frag_index != last_index);
2696 dev_consume_skb_any(skb);
6b7c5b94 2697
4d586b82 2698 return num_wrbs;
6b7c5b94
SP
2699}
2700
10ef9ab4
SP
2701/* Return the number of events in the event queue */
2702static inline int events_get(struct be_eq_obj *eqo)
859b1e4e 2703{
10ef9ab4
SP
2704 struct be_eq_entry *eqe;
2705 int num = 0;
859b1e4e 2706
10ef9ab4
SP
2707 do {
2708 eqe = queue_tail_node(&eqo->q);
2709 if (eqe->evt == 0)
2710 break;
859b1e4e 2711
10ef9ab4
SP
2712 rmb();
2713 eqe->evt = 0;
2714 num++;
2715 queue_tail_inc(&eqo->q);
2716 } while (true);
2717
2718 return num;
859b1e4e
SP
2719}
2720
10ef9ab4
SP
2721/* Leaves the EQ is disarmed state */
2722static void be_eq_clean(struct be_eq_obj *eqo)
859b1e4e 2723{
10ef9ab4 2724 int num = events_get(eqo);
859b1e4e 2725
20947770 2726 be_eq_notify(eqo->adapter, eqo->q.id, false, true, num, 0);
859b1e4e
SP
2727}
2728
99b44304
KA
2729/* Free posted rx buffers that were not used */
2730static void be_rxq_clean(struct be_rx_obj *rxo)
6b7c5b94 2731{
3abcdeda 2732 struct be_queue_info *rxq = &rxo->q;
99b44304
KA
2733 struct be_rx_page_info *page_info;
2734
2735 while (atomic_read(&rxq->used) > 0) {
2736 page_info = get_rx_page_info(rxo);
2737 put_page(page_info->page);
2738 memset(page_info, 0, sizeof(*page_info));
2739 }
2740 BUG_ON(atomic_read(&rxq->used));
2741 rxq->tail = 0;
2742 rxq->head = 0;
2743}
2744
2745static void be_rx_cq_clean(struct be_rx_obj *rxo)
2746{
3abcdeda 2747 struct be_queue_info *rx_cq = &rxo->cq;
2e588f84 2748 struct be_rx_compl_info *rxcp;
d23e946c
SP
2749 struct be_adapter *adapter = rxo->adapter;
2750 int flush_wait = 0;
6b7c5b94 2751
d23e946c
SP
2752 /* Consume pending rx completions.
2753 * Wait for the flush completion (identified by zero num_rcvd)
2754 * to arrive. Notify CQ even when there are no more CQ entries
2755 * for HW to flush partially coalesced CQ entries.
2756 * In Lancer, there is no need to wait for flush compl.
2757 */
2758 for (;;) {
2759 rxcp = be_rx_compl_get(rxo);
ddf1169f 2760 if (!rxcp) {
d23e946c
SP
2761 if (lancer_chip(adapter))
2762 break;
2763
954f6825
VD
2764 if (flush_wait++ > 50 ||
2765 be_check_error(adapter,
2766 BE_ERROR_HW)) {
d23e946c
SP
2767 dev_warn(&adapter->pdev->dev,
2768 "did not receive flush compl\n");
2769 break;
2770 }
2771 be_cq_notify(adapter, rx_cq->id, true, 0);
2772 mdelay(1);
2773 } else {
2774 be_rx_compl_discard(rxo, rxcp);
3f5dffe6 2775 be_cq_notify(adapter, rx_cq->id, false, 1);
d23e946c
SP
2776 if (rxcp->num_rcvd == 0)
2777 break;
2778 }
6b7c5b94
SP
2779 }
2780
d23e946c
SP
2781 /* After cleanup, leave the CQ in unarmed state */
2782 be_cq_notify(adapter, rx_cq->id, false, 0);
6b7c5b94
SP
2783}
2784
0ae57bb3 2785static void be_tx_compl_clean(struct be_adapter *adapter)
6b7c5b94 2786{
5f07b3c5 2787 struct device *dev = &adapter->pdev->dev;
b0fd2eb2 2788 u16 cmpl = 0, timeo = 0, num_wrbs = 0;
152ffe5b 2789 struct be_tx_compl_info *txcp;
0ae57bb3 2790 struct be_queue_info *txq;
b0fd2eb2 2791 u32 end_idx, notified_idx;
152ffe5b 2792 struct be_tx_obj *txo;
0ae57bb3 2793 int i, pending_txqs;
a8e9179a 2794
1a3d0717 2795 /* Stop polling for compls when HW has been silent for 10ms */
a8e9179a 2796 do {
0ae57bb3
SP
2797 pending_txqs = adapter->num_tx_qs;
2798
2799 for_all_tx_queues(adapter, txo, i) {
1a3d0717
VV
2800 cmpl = 0;
2801 num_wrbs = 0;
0ae57bb3 2802 txq = &txo->q;
ffc39620 2803 while ((txcp = be_tx_compl_get(adapter, txo))) {
152ffe5b
SB
2804 num_wrbs +=
2805 be_tx_compl_process(adapter, txo,
2806 txcp->end_index);
0ae57bb3
SP
2807 cmpl++;
2808 }
2809 if (cmpl) {
2810 be_cq_notify(adapter, txo->cq.id, false, cmpl);
2811 atomic_sub(num_wrbs, &txq->used);
1a3d0717 2812 timeo = 0;
0ae57bb3 2813 }
cf5671e6 2814 if (!be_is_tx_compl_pending(txo))
0ae57bb3 2815 pending_txqs--;
a8e9179a
SP
2816 }
2817
954f6825
VD
2818 if (pending_txqs == 0 || ++timeo > 10 ||
2819 be_check_error(adapter, BE_ERROR_HW))
a8e9179a
SP
2820 break;
2821
2822 mdelay(1);
2823 } while (true);
2824
5f07b3c5 2825 /* Free enqueued TX that was never notified to HW */
0ae57bb3
SP
2826 for_all_tx_queues(adapter, txo, i) {
2827 txq = &txo->q;
0ae57bb3 2828
5f07b3c5
SP
2829 if (atomic_read(&txq->used)) {
2830 dev_info(dev, "txq%d: cleaning %d pending tx-wrbs\n",
2831 i, atomic_read(&txq->used));
2832 notified_idx = txq->tail;
0ae57bb3 2833 end_idx = txq->tail;
5f07b3c5
SP
2834 index_adv(&end_idx, atomic_read(&txq->used) - 1,
2835 txq->len);
2836 /* Use the tx-compl process logic to handle requests
2837 * that were not sent to the HW.
2838 */
0ae57bb3
SP
2839 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
2840 atomic_sub(num_wrbs, &txq->used);
5f07b3c5
SP
2841 BUG_ON(atomic_read(&txq->used));
2842 txo->pend_wrb_cnt = 0;
2843 /* Since hw was never notified of these requests,
2844 * reset TXQ indices
2845 */
2846 txq->head = notified_idx;
2847 txq->tail = notified_idx;
0ae57bb3 2848 }
b03388d6 2849 }
6b7c5b94
SP
2850}
2851
10ef9ab4
SP
2852static void be_evt_queues_destroy(struct be_adapter *adapter)
2853{
2854 struct be_eq_obj *eqo;
2855 int i;
2856
2857 for_all_evt_queues(adapter, eqo, i) {
19d59aa7
PR
2858 if (eqo->q.created) {
2859 be_eq_clean(eqo);
10ef9ab4 2860 be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
68d7bdcb 2861 netif_napi_del(&eqo->napi);
649886a3 2862 free_cpumask_var(eqo->affinity_mask);
19d59aa7 2863 }
10ef9ab4
SP
2864 be_queue_free(adapter, &eqo->q);
2865 }
2866}
2867
2868static int be_evt_queues_create(struct be_adapter *adapter)
2869{
2870 struct be_queue_info *eq;
2871 struct be_eq_obj *eqo;
2632bafd 2872 struct be_aic_obj *aic;
10ef9ab4
SP
2873 int i, rc;
2874
e261768e 2875 /* need enough EQs to service both RX and TX queues */
92bf14ab 2876 adapter->num_evt_qs = min_t(u16, num_irqs(adapter),
e261768e
SP
2877 max(adapter->cfg_num_rx_irqs,
2878 adapter->cfg_num_tx_irqs));
10ef9ab4
SP
2879
2880 for_all_evt_queues(adapter, eqo, i) {
f36963c9 2881 int numa_node = dev_to_node(&adapter->pdev->dev);
649886a3 2882
2632bafd 2883 aic = &adapter->aic_obj[i];
10ef9ab4 2884 eqo->adapter = adapter;
10ef9ab4 2885 eqo->idx = i;
2632bafd
SP
2886 aic->max_eqd = BE_MAX_EQD;
2887 aic->enable = true;
10ef9ab4
SP
2888
2889 eq = &eqo->q;
2890 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
748b539a 2891 sizeof(struct be_eq_entry));
10ef9ab4
SP
2892 if (rc)
2893 return rc;
2894
f2f781a7 2895 rc = be_cmd_eq_create(adapter, eqo);
10ef9ab4
SP
2896 if (rc)
2897 return rc;
649886a3
KA
2898
2899 if (!zalloc_cpumask_var(&eqo->affinity_mask, GFP_KERNEL))
2900 return -ENOMEM;
2901 cpumask_set_cpu(cpumask_local_spread(i, numa_node),
2902 eqo->affinity_mask);
2903 netif_napi_add(adapter->netdev, &eqo->napi, be_poll,
2904 BE_NAPI_WEIGHT);
10ef9ab4 2905 }
1cfafab9 2906 return 0;
10ef9ab4
SP
2907}
2908
5fb379ee
SP
2909static void be_mcc_queues_destroy(struct be_adapter *adapter)
2910{
2911 struct be_queue_info *q;
5fb379ee 2912
8788fdc2 2913 q = &adapter->mcc_obj.q;
5fb379ee 2914 if (q->created)
8788fdc2 2915 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
5fb379ee
SP
2916 be_queue_free(adapter, q);
2917
8788fdc2 2918 q = &adapter->mcc_obj.cq;
5fb379ee 2919 if (q->created)
8788fdc2 2920 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
5fb379ee
SP
2921 be_queue_free(adapter, q);
2922}
2923
2924/* Must be called only after TX qs are created as MCC shares TX EQ */
2925static int be_mcc_queues_create(struct be_adapter *adapter)
2926{
2927 struct be_queue_info *q, *cq;
5fb379ee 2928
8788fdc2 2929 cq = &adapter->mcc_obj.cq;
5fb379ee 2930 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
748b539a 2931 sizeof(struct be_mcc_compl)))
5fb379ee
SP
2932 goto err;
2933
10ef9ab4
SP
2934 /* Use the default EQ for MCC completions */
2935 if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0))
5fb379ee
SP
2936 goto mcc_cq_free;
2937
8788fdc2 2938 q = &adapter->mcc_obj.q;
5fb379ee
SP
2939 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
2940 goto mcc_cq_destroy;
2941
8788fdc2 2942 if (be_cmd_mccq_create(adapter, q, cq))
5fb379ee
SP
2943 goto mcc_q_free;
2944
2945 return 0;
2946
2947mcc_q_free:
2948 be_queue_free(adapter, q);
2949mcc_cq_destroy:
8788fdc2 2950 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
5fb379ee
SP
2951mcc_cq_free:
2952 be_queue_free(adapter, cq);
2953err:
2954 return -1;
2955}
2956
6b7c5b94
SP
2957static void be_tx_queues_destroy(struct be_adapter *adapter)
2958{
2959 struct be_queue_info *q;
3c8def97
SP
2960 struct be_tx_obj *txo;
2961 u8 i;
6b7c5b94 2962
3c8def97
SP
2963 for_all_tx_queues(adapter, txo, i) {
2964 q = &txo->q;
2965 if (q->created)
2966 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
2967 be_queue_free(adapter, q);
6b7c5b94 2968
3c8def97
SP
2969 q = &txo->cq;
2970 if (q->created)
2971 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2972 be_queue_free(adapter, q);
2973 }
6b7c5b94
SP
2974}
2975
7707133c 2976static int be_tx_qs_create(struct be_adapter *adapter)
6b7c5b94 2977{
73f394e6 2978 struct be_queue_info *cq;
3c8def97 2979 struct be_tx_obj *txo;
73f394e6 2980 struct be_eq_obj *eqo;
92bf14ab 2981 int status, i;
6b7c5b94 2982
e261768e 2983 adapter->num_tx_qs = min(adapter->num_evt_qs, adapter->cfg_num_tx_irqs);
dafc0fe3 2984
10ef9ab4
SP
2985 for_all_tx_queues(adapter, txo, i) {
2986 cq = &txo->cq;
2987 status = be_queue_alloc(adapter, cq, TX_CQ_LEN,
2988 sizeof(struct be_eth_tx_compl));
2989 if (status)
2990 return status;
3c8def97 2991
827da44c
JS
2992 u64_stats_init(&txo->stats.sync);
2993 u64_stats_init(&txo->stats.sync_compl);
2994
10ef9ab4
SP
2995 /* If num_evt_qs is less than num_tx_qs, then more than
2996 * one txq share an eq
2997 */
73f394e6
SP
2998 eqo = &adapter->eq_obj[i % adapter->num_evt_qs];
2999 status = be_cmd_cq_create(adapter, cq, &eqo->q, false, 3);
10ef9ab4
SP
3000 if (status)
3001 return status;
6b7c5b94 3002
10ef9ab4
SP
3003 status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
3004 sizeof(struct be_eth_wrb));
3005 if (status)
3006 return status;
6b7c5b94 3007
94d73aaa 3008 status = be_cmd_txq_create(adapter, txo);
10ef9ab4
SP
3009 if (status)
3010 return status;
73f394e6
SP
3011
3012 netif_set_xps_queue(adapter->netdev, eqo->affinity_mask,
3013 eqo->idx);
3c8def97 3014 }
6b7c5b94 3015
d379142b
SP
3016 dev_info(&adapter->pdev->dev, "created %d TX queue(s)\n",
3017 adapter->num_tx_qs);
10ef9ab4 3018 return 0;
6b7c5b94
SP
3019}
3020
10ef9ab4 3021static void be_rx_cqs_destroy(struct be_adapter *adapter)
6b7c5b94
SP
3022{
3023 struct be_queue_info *q;
3abcdeda
SP
3024 struct be_rx_obj *rxo;
3025 int i;
3026
3027 for_all_rx_queues(adapter, rxo, i) {
3abcdeda
SP
3028 q = &rxo->cq;
3029 if (q->created)
3030 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
3031 be_queue_free(adapter, q);
ac6a0c4a
SP
3032 }
3033}
3034
10ef9ab4 3035static int be_rx_cqs_create(struct be_adapter *adapter)
6b7c5b94 3036{
10ef9ab4 3037 struct be_queue_info *eq, *cq;
3abcdeda
SP
3038 struct be_rx_obj *rxo;
3039 int rc, i;
6b7c5b94 3040
e261768e
SP
3041 adapter->num_rss_qs =
3042 min(adapter->num_evt_qs, adapter->cfg_num_rx_irqs);
92bf14ab 3043
71bb8bd0 3044 /* We'll use RSS only if atleast 2 RSS rings are supported. */
e261768e 3045 if (adapter->num_rss_qs < 2)
71bb8bd0
VV
3046 adapter->num_rss_qs = 0;
3047
3048 adapter->num_rx_qs = adapter->num_rss_qs + adapter->need_def_rxq;
3049
3050 /* When the interface is not capable of RSS rings (and there is no
3051 * need to create a default RXQ) we'll still need one RXQ
10ef9ab4 3052 */
71bb8bd0
VV
3053 if (adapter->num_rx_qs == 0)
3054 adapter->num_rx_qs = 1;
92bf14ab 3055
6b7c5b94 3056 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
3abcdeda
SP
3057 for_all_rx_queues(adapter, rxo, i) {
3058 rxo->adapter = adapter;
3abcdeda
SP
3059 cq = &rxo->cq;
3060 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
748b539a 3061 sizeof(struct be_eth_rx_compl));
3abcdeda 3062 if (rc)
10ef9ab4 3063 return rc;
3abcdeda 3064
827da44c 3065 u64_stats_init(&rxo->stats.sync);
10ef9ab4
SP
3066 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
3067 rc = be_cmd_cq_create(adapter, cq, eq, false, 3);
3abcdeda 3068 if (rc)
10ef9ab4 3069 return rc;
3abcdeda 3070 }
6b7c5b94 3071
d379142b 3072 dev_info(&adapter->pdev->dev,
71bb8bd0 3073 "created %d RX queue(s)\n", adapter->num_rx_qs);
10ef9ab4 3074 return 0;
b628bde2
SP
3075}
3076
6b7c5b94
SP
3077static irqreturn_t be_intx(int irq, void *dev)
3078{
e49cc34f
SP
3079 struct be_eq_obj *eqo = dev;
3080 struct be_adapter *adapter = eqo->adapter;
3081 int num_evts = 0;
6b7c5b94 3082
d0b9cec3
SP
3083 /* IRQ is not expected when NAPI is scheduled as the EQ
3084 * will not be armed.
3085 * But, this can happen on Lancer INTx where it takes
3086 * a while to de-assert INTx or in BE2 where occasionaly
3087 * an interrupt may be raised even when EQ is unarmed.
3088 * If NAPI is already scheduled, then counting & notifying
3089 * events will orphan them.
e49cc34f 3090 */
d0b9cec3 3091 if (napi_schedule_prep(&eqo->napi)) {
e49cc34f 3092 num_evts = events_get(eqo);
d0b9cec3
SP
3093 __napi_schedule(&eqo->napi);
3094 if (num_evts)
3095 eqo->spurious_intr = 0;
3096 }
20947770 3097 be_eq_notify(adapter, eqo->q.id, false, true, num_evts, 0);
e49cc34f 3098
d0b9cec3
SP
3099 /* Return IRQ_HANDLED only for the the first spurious intr
3100 * after a valid intr to stop the kernel from branding
3101 * this irq as a bad one!
e49cc34f 3102 */
d0b9cec3
SP
3103 if (num_evts || eqo->spurious_intr++ == 0)
3104 return IRQ_HANDLED;
3105 else
3106 return IRQ_NONE;
6b7c5b94
SP
3107}
3108
10ef9ab4 3109static irqreturn_t be_msix(int irq, void *dev)
6b7c5b94 3110{
10ef9ab4 3111 struct be_eq_obj *eqo = dev;
6b7c5b94 3112
20947770 3113 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0, 0);
0b545a62 3114 napi_schedule(&eqo->napi);
6b7c5b94
SP
3115 return IRQ_HANDLED;
3116}
3117
2e588f84 3118static inline bool do_gro(struct be_rx_compl_info *rxcp)
6b7c5b94 3119{
e38b1706 3120 return (rxcp->tcpf && !rxcp->err && rxcp->l4_csum) ? true : false;
6b7c5b94
SP
3121}
3122
10ef9ab4 3123static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
fb6113e6 3124 int budget)
6b7c5b94 3125{
3abcdeda
SP
3126 struct be_adapter *adapter = rxo->adapter;
3127 struct be_queue_info *rx_cq = &rxo->cq;
2e588f84 3128 struct be_rx_compl_info *rxcp;
6b7c5b94 3129 u32 work_done;
c30d7266 3130 u32 frags_consumed = 0;
6b7c5b94
SP
3131
3132 for (work_done = 0; work_done < budget; work_done++) {
3abcdeda 3133 rxcp = be_rx_compl_get(rxo);
6b7c5b94
SP
3134 if (!rxcp)
3135 break;
3136
12004ae9
SP
3137 /* Is it a flush compl that has no data */
3138 if (unlikely(rxcp->num_rcvd == 0))
3139 goto loop_continue;
3140
3141 /* Discard compl with partial DMA Lancer B0 */
3142 if (unlikely(!rxcp->pkt_size)) {
10ef9ab4 3143 be_rx_compl_discard(rxo, rxcp);
12004ae9
SP
3144 goto loop_continue;
3145 }
3146
3147 /* On BE drop pkts that arrive due to imperfect filtering in
3148 * promiscuous mode on some skews
3149 */
3150 if (unlikely(rxcp->port != adapter->port_num &&
748b539a 3151 !lancer_chip(adapter))) {
10ef9ab4 3152 be_rx_compl_discard(rxo, rxcp);
12004ae9 3153 goto loop_continue;
64642811 3154 }
009dd872 3155
fb6113e6 3156 if (do_gro(rxcp))
10ef9ab4 3157 be_rx_compl_process_gro(rxo, napi, rxcp);
12004ae9 3158 else
6384a4d0
SP
3159 be_rx_compl_process(rxo, napi, rxcp);
3160
12004ae9 3161loop_continue:
c30d7266 3162 frags_consumed += rxcp->num_rcvd;
2e588f84 3163 be_rx_stats_update(rxo, rxcp);
6b7c5b94
SP
3164 }
3165
10ef9ab4
SP
3166 if (work_done) {
3167 be_cq_notify(adapter, rx_cq->id, true, work_done);
9372cacb 3168
6384a4d0
SP
3169 /* When an rx-obj gets into post_starved state, just
3170 * let be_worker do the posting.
3171 */
3172 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM &&
3173 !rxo->rx_post_starved)
c30d7266
AK
3174 be_post_rx_frags(rxo, GFP_ATOMIC,
3175 max_t(u32, MAX_RX_POST,
3176 frags_consumed));
6b7c5b94 3177 }
10ef9ab4 3178
6b7c5b94
SP
3179 return work_done;
3180}
3181
512bb8a2 3182
c8f64615
SP
3183static void be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
3184 int idx)
6b7c5b94 3185{
c8f64615 3186 int num_wrbs = 0, work_done = 0;
152ffe5b 3187 struct be_tx_compl_info *txcp;
c8f64615 3188
ffc39620 3189 while ((txcp = be_tx_compl_get(adapter, txo))) {
152ffe5b 3190 num_wrbs += be_tx_compl_process(adapter, txo, txcp->end_index);
c8f64615 3191 work_done++;
10ef9ab4 3192 }
6b7c5b94 3193
10ef9ab4
SP
3194 if (work_done) {
3195 be_cq_notify(adapter, txo->cq.id, true, work_done);
3196 atomic_sub(num_wrbs, &txo->q.used);
3c8def97 3197
10ef9ab4
SP
3198 /* As Tx wrbs have been freed up, wake up netdev queue
3199 * if it was stopped due to lack of tx wrbs. */
3200 if (__netif_subqueue_stopped(adapter->netdev, idx) &&
cf5671e6 3201 be_can_txq_wake(txo)) {
10ef9ab4 3202 netif_wake_subqueue(adapter->netdev, idx);
3c8def97 3203 }
10ef9ab4
SP
3204
3205 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
3206 tx_stats(txo)->tx_compl += work_done;
3207 u64_stats_update_end(&tx_stats(txo)->sync_compl);
6b7c5b94 3208 }
10ef9ab4 3209}
6b7c5b94 3210
68d7bdcb 3211int be_poll(struct napi_struct *napi, int budget)
10ef9ab4
SP
3212{
3213 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
3214 struct be_adapter *adapter = eqo->adapter;
0b545a62 3215 int max_work = 0, work, i, num_evts;
6384a4d0 3216 struct be_rx_obj *rxo;
a4906ea0 3217 struct be_tx_obj *txo;
20947770 3218 u32 mult_enc = 0;
f31e50a8 3219
0b545a62
SP
3220 num_evts = events_get(eqo);
3221
a4906ea0
SP
3222 for_all_tx_queues_on_eq(adapter, eqo, txo, i)
3223 be_process_tx(adapter, txo, i);
f31e50a8 3224
fb6113e6
ED
3225 /* This loop will iterate twice for EQ0 in which
3226 * completions of the last RXQ (default one) are also processed
3227 * For other EQs the loop iterates only once
3228 */
3229 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
3230 work = be_process_rx(rxo, napi, budget);
3231 max_work = max(work, max_work);
10ef9ab4 3232 }
6b7c5b94 3233
10ef9ab4
SP
3234 if (is_mcc_eqo(eqo))
3235 be_process_mcc(adapter);
93c86700 3236
10ef9ab4 3237 if (max_work < budget) {
6ad20165 3238 napi_complete_done(napi, max_work);
20947770
PR
3239
3240 /* Skyhawk EQ_DB has a provision to set the rearm to interrupt
3241 * delay via a delay multiplier encoding value
3242 */
3243 if (skyhawk_chip(adapter))
3244 mult_enc = be_get_eq_delay_mult_enc(eqo);
3245
3246 be_eq_notify(adapter, eqo->q.id, true, false, num_evts,
3247 mult_enc);
10ef9ab4
SP
3248 } else {
3249 /* As we'll continue in polling mode, count and clear events */
20947770 3250 be_eq_notify(adapter, eqo->q.id, false, false, num_evts, 0);
93c86700 3251 }
10ef9ab4 3252 return max_work;
6b7c5b94
SP
3253}
3254
f67ef7ba 3255void be_detect_error(struct be_adapter *adapter)
7c185276 3256{
e1cfb67a
PR
3257 u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
3258 u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
eb0eecc1 3259 struct device *dev = &adapter->pdev->dev;
673c96e5
SR
3260 u16 val;
3261 u32 i;
7c185276 3262
954f6825 3263 if (be_check_error(adapter, BE_ERROR_HW))
72f02485
SP
3264 return;
3265
e1cfb67a
PR
3266 if (lancer_chip(adapter)) {
3267 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3268 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
954f6825 3269 be_set_error(adapter, BE_ERROR_UE);
e1cfb67a 3270 sliport_err1 = ioread32(adapter->db +
748b539a 3271 SLIPORT_ERROR1_OFFSET);
e1cfb67a 3272 sliport_err2 = ioread32(adapter->db +
748b539a 3273 SLIPORT_ERROR2_OFFSET);
eb0eecc1
SK
3274 /* Do not log error messages if its a FW reset */
3275 if (sliport_err1 == SLIPORT_ERROR_FW_RESET1 &&
3276 sliport_err2 == SLIPORT_ERROR_FW_RESET2) {
3277 dev_info(dev, "Firmware update in progress\n");
3278 } else {
eb0eecc1
SK
3279 dev_err(dev, "Error detected in the card\n");
3280 dev_err(dev, "ERR: sliport status 0x%x\n",
3281 sliport_status);
3282 dev_err(dev, "ERR: sliport error1 0x%x\n",
3283 sliport_err1);
3284 dev_err(dev, "ERR: sliport error2 0x%x\n",
3285 sliport_err2);
3286 }
e1cfb67a
PR
3287 }
3288 } else {
25848c90
SR
3289 ue_lo = ioread32(adapter->pcicfg + PCICFG_UE_STATUS_LOW);
3290 ue_hi = ioread32(adapter->pcicfg + PCICFG_UE_STATUS_HIGH);
3291 ue_lo_mask = ioread32(adapter->pcicfg +
3292 PCICFG_UE_STATUS_LOW_MASK);
3293 ue_hi_mask = ioread32(adapter->pcicfg +
3294 PCICFG_UE_STATUS_HI_MASK);
e1cfb67a 3295
f67ef7ba
PR
3296 ue_lo = (ue_lo & ~ue_lo_mask);
3297 ue_hi = (ue_hi & ~ue_hi_mask);
7c185276 3298
eb0eecc1 3299 if (ue_lo || ue_hi) {
673c96e5
SR
3300 /* On certain platforms BE3 hardware can indicate
3301 * spurious UEs. In case of a UE in the chip,
3302 * the POST register correctly reports either a
3303 * FAT_LOG_START state (FW is currently dumping
3304 * FAT log data) or a ARMFW_UE state. Check for the
3305 * above states to ascertain if the UE is valid or not.
3306 */
3307 if (BE3_chip(adapter)) {
3308 val = be_POST_stage_get(adapter);
3309 if ((val & POST_STAGE_FAT_LOG_START)
3310 != POST_STAGE_FAT_LOG_START &&
3311 (val & POST_STAGE_ARMFW_UE)
d2c2725c
SR
3312 != POST_STAGE_ARMFW_UE &&
3313 (val & POST_STAGE_RECOVERABLE_ERR)
3314 != POST_STAGE_RECOVERABLE_ERR)
673c96e5
SR
3315 return;
3316 }
3317
710f3e59 3318 dev_err(dev, "Error detected in the adapter");
673c96e5 3319 be_set_error(adapter, BE_ERROR_UE);
954f6825 3320
eb0eecc1
SK
3321 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
3322 if (ue_lo & 1)
3323 dev_err(dev, "UE: %s bit set\n",
3324 ue_status_low_desc[i]);
3325 }
3326 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
3327 if (ue_hi & 1)
3328 dev_err(dev, "UE: %s bit set\n",
3329 ue_status_hi_desc[i]);
3330 }
7c185276
AK
3331 }
3332 }
7c185276
AK
3333}
3334
8d56ff11
SP
3335static void be_msix_disable(struct be_adapter *adapter)
3336{
ac6a0c4a 3337 if (msix_enabled(adapter)) {
8d56ff11 3338 pci_disable_msix(adapter->pdev);
ac6a0c4a 3339 adapter->num_msix_vec = 0;
68d7bdcb 3340 adapter->num_msix_roce_vec = 0;
3abcdeda
SP
3341 }
3342}
3343
c2bba3df 3344static int be_msix_enable(struct be_adapter *adapter)
6b7c5b94 3345{
6fde0e63 3346 unsigned int i, max_roce_eqs;
d379142b 3347 struct device *dev = &adapter->pdev->dev;
6fde0e63 3348 int num_vec;
6b7c5b94 3349
ce7faf0a
SP
3350 /* If RoCE is supported, program the max number of vectors that
3351 * could be used for NIC and RoCE, else, just program the number
3352 * we'll use initially.
92bf14ab 3353 */
e261768e
SP
3354 if (be_roce_supported(adapter)) {
3355 max_roce_eqs =
3356 be_max_func_eqs(adapter) - be_max_nic_eqs(adapter);
3357 max_roce_eqs = min(max_roce_eqs, num_online_cpus());
3358 num_vec = be_max_any_irqs(adapter) + max_roce_eqs;
3359 } else {
3360 num_vec = max(adapter->cfg_num_rx_irqs,
3361 adapter->cfg_num_tx_irqs);
3362 }
3abcdeda 3363
ac6a0c4a 3364 for (i = 0; i < num_vec; i++)
6b7c5b94
SP
3365 adapter->msix_entries[i].entry = i;
3366
7dc4c064
AG
3367 num_vec = pci_enable_msix_range(adapter->pdev, adapter->msix_entries,
3368 MIN_MSIX_VECTORS, num_vec);
3369 if (num_vec < 0)
3370 goto fail;
92bf14ab 3371
92bf14ab
SP
3372 if (be_roce_supported(adapter) && num_vec > MIN_MSIX_VECTORS) {
3373 adapter->num_msix_roce_vec = num_vec / 2;
3374 dev_info(dev, "enabled %d MSI-x vector(s) for RoCE\n",
3375 adapter->num_msix_roce_vec);
3376 }
3377
3378 adapter->num_msix_vec = num_vec - adapter->num_msix_roce_vec;
3379
3380 dev_info(dev, "enabled %d MSI-x vector(s) for NIC\n",
3381 adapter->num_msix_vec);
c2bba3df 3382 return 0;
7dc4c064
AG
3383
3384fail:
3385 dev_warn(dev, "MSIx enable failed\n");
3386
3387 /* INTx is not supported in VFs, so fail probe if enable_msix fails */
18c57c74 3388 if (be_virtfn(adapter))
7dc4c064
AG
3389 return num_vec;
3390 return 0;
6b7c5b94
SP
3391}
3392
fe6d2a38 3393static inline int be_msix_vec_get(struct be_adapter *adapter,
748b539a 3394 struct be_eq_obj *eqo)
b628bde2 3395{
f2f781a7 3396 return adapter->msix_entries[eqo->msix_idx].vector;
b628bde2 3397}
6b7c5b94 3398
b628bde2
SP
3399static int be_msix_register(struct be_adapter *adapter)
3400{
10ef9ab4
SP
3401 struct net_device *netdev = adapter->netdev;
3402 struct be_eq_obj *eqo;
3403 int status, i, vec;
6b7c5b94 3404
10ef9ab4
SP
3405 for_all_evt_queues(adapter, eqo, i) {
3406 sprintf(eqo->desc, "%s-q%d", netdev->name, i);
3407 vec = be_msix_vec_get(adapter, eqo);
3408 status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
3abcdeda
SP
3409 if (status)
3410 goto err_msix;
d658d98a
PR
3411
3412 irq_set_affinity_hint(vec, eqo->affinity_mask);
3abcdeda 3413 }
b628bde2 3414
6b7c5b94 3415 return 0;
3abcdeda 3416err_msix:
6e3cd5fa
VD
3417 for (i--; i >= 0; i--) {
3418 eqo = &adapter->eq_obj[i];
10ef9ab4 3419 free_irq(be_msix_vec_get(adapter, eqo), eqo);
6e3cd5fa 3420 }
10ef9ab4 3421 dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
748b539a 3422 status);
ac6a0c4a 3423 be_msix_disable(adapter);
6b7c5b94
SP
3424 return status;
3425}
3426
3427static int be_irq_register(struct be_adapter *adapter)
3428{
3429 struct net_device *netdev = adapter->netdev;
3430 int status;
3431
ac6a0c4a 3432 if (msix_enabled(adapter)) {
6b7c5b94
SP
3433 status = be_msix_register(adapter);
3434 if (status == 0)
3435 goto done;
ba343c77 3436 /* INTx is not supported for VF */
18c57c74 3437 if (be_virtfn(adapter))
ba343c77 3438 return status;
6b7c5b94
SP
3439 }
3440
e49cc34f 3441 /* INTx: only the first EQ is used */
6b7c5b94
SP
3442 netdev->irq = adapter->pdev->irq;
3443 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
e49cc34f 3444 &adapter->eq_obj[0]);
6b7c5b94
SP
3445 if (status) {
3446 dev_err(&adapter->pdev->dev,
3447 "INTx request IRQ failed - err %d\n", status);
3448 return status;
3449 }
3450done:
3451 adapter->isr_registered = true;
3452 return 0;
3453}
3454
3455static void be_irq_unregister(struct be_adapter *adapter)
3456{
3457 struct net_device *netdev = adapter->netdev;
10ef9ab4 3458 struct be_eq_obj *eqo;
d658d98a 3459 int i, vec;
6b7c5b94
SP
3460
3461 if (!adapter->isr_registered)
3462 return;
3463
3464 /* INTx */
ac6a0c4a 3465 if (!msix_enabled(adapter)) {
e49cc34f 3466 free_irq(netdev->irq, &adapter->eq_obj[0]);
6b7c5b94
SP
3467 goto done;
3468 }
3469
3470 /* MSIx */
d658d98a
PR
3471 for_all_evt_queues(adapter, eqo, i) {
3472 vec = be_msix_vec_get(adapter, eqo);
3473 irq_set_affinity_hint(vec, NULL);
3474 free_irq(vec, eqo);
3475 }
3abcdeda 3476
6b7c5b94
SP
3477done:
3478 adapter->isr_registered = false;
6b7c5b94
SP
3479}
3480
10ef9ab4 3481static void be_rx_qs_destroy(struct be_adapter *adapter)
482c9e79 3482{
62219066 3483 struct rss_info *rss = &adapter->rss_info;
482c9e79
SP
3484 struct be_queue_info *q;
3485 struct be_rx_obj *rxo;
3486 int i;
3487
3488 for_all_rx_queues(adapter, rxo, i) {
3489 q = &rxo->q;
3490 if (q->created) {
99b44304
KA
3491 /* If RXQs are destroyed while in an "out of buffer"
3492 * state, there is a possibility of an HW stall on
3493 * Lancer. So, post 64 buffers to each queue to relieve
3494 * the "out of buffer" condition.
3495 * Make sure there's space in the RXQ before posting.
3496 */
3497 if (lancer_chip(adapter)) {
3498 be_rx_cq_clean(rxo);
3499 if (atomic_read(&q->used) == 0)
3500 be_post_rx_frags(rxo, GFP_KERNEL,
3501 MAX_RX_POST);
3502 }
3503
482c9e79 3504 be_cmd_rxq_destroy(adapter, q);
10ef9ab4 3505 be_rx_cq_clean(rxo);
99b44304 3506 be_rxq_clean(rxo);
482c9e79 3507 }
10ef9ab4 3508 be_queue_free(adapter, q);
482c9e79 3509 }
62219066
AK
3510
3511 if (rss->rss_flags) {
3512 rss->rss_flags = RSS_ENABLE_NONE;
3513 be_cmd_rss_config(adapter, rss->rsstable, rss->rss_flags,
3514 128, rss->rss_hkey);
3515 }
482c9e79
SP
3516}
3517
bcc84140
KA
3518static void be_disable_if_filters(struct be_adapter *adapter)
3519{
6d928ae5
IV
3520 /* Don't delete MAC on BE3 VFs without FILTMGMT privilege */
3521 if (!BEx_chip(adapter) || !be_virtfn(adapter) ||
4993b39a 3522 check_privilege(adapter, BE_PRIV_FILTMGMT)) {
6d928ae5 3523 be_dev_mac_del(adapter, adapter->pmac_id[0]);
4993b39a
IV
3524 eth_zero_addr(adapter->dev_mac);
3525 }
6d928ae5 3526
bcc84140 3527 be_clear_uc_list(adapter);
92fbb1df 3528 be_clear_mc_list(adapter);
bcc84140
KA
3529
3530 /* The IFACE flags are enabled in the open path and cleared
3531 * in the close path. When a VF gets detached from the host and
3532 * assigned to a VM the following happens:
3533 * - VF's IFACE flags get cleared in the detach path
3534 * - IFACE create is issued by the VF in the attach path
3535 * Due to a bug in the BE3/Skyhawk-R FW
3536 * (Lancer FW doesn't have the bug), the IFACE capability flags
3537 * specified along with the IFACE create cmd issued by a VF are not
3538 * honoured by FW. As a consequence, if a *new* driver
3539 * (that enables/disables IFACE flags in open/close)
3540 * is loaded in the host and an *old* driver is * used by a VM/VF,
3541 * the IFACE gets created *without* the needed flags.
3542 * To avoid this, disable RX-filter flags only for Lancer.
3543 */
3544 if (lancer_chip(adapter)) {
3545 be_cmd_rx_filter(adapter, BE_IF_ALL_FILT_FLAGS, OFF);
3546 adapter->if_flags &= ~BE_IF_ALL_FILT_FLAGS;
3547 }
3548}
3549
889cd4b2
SP
3550static int be_close(struct net_device *netdev)
3551{
3552 struct be_adapter *adapter = netdev_priv(netdev);
10ef9ab4
SP
3553 struct be_eq_obj *eqo;
3554 int i;
889cd4b2 3555
e1ad8e33
KA
3556 /* This protection is needed as be_close() may be called even when the
3557 * adapter is in cleared state (after eeh perm failure)
3558 */
3559 if (!(adapter->flags & BE_FLAGS_SETUP_DONE))
3560 return 0;
3561
b7172414
SP
3562 /* Before attempting cleanup ensure all the pending cmds in the
3563 * config_wq have finished execution
3564 */
3565 flush_workqueue(be_wq);
3566
bcc84140
KA
3567 be_disable_if_filters(adapter);
3568
dff345c5
IV
3569 if (adapter->flags & BE_FLAGS_NAPI_ENABLED) {
3570 for_all_evt_queues(adapter, eqo, i) {
04d3d624 3571 napi_disable(&eqo->napi);
6384a4d0 3572 }
71237b6f 3573 adapter->flags &= ~BE_FLAGS_NAPI_ENABLED;
04d3d624 3574 }
a323d9bf
SP
3575
3576 be_async_mcc_disable(adapter);
3577
3578 /* Wait for all pending tx completions to arrive so that
3579 * all tx skbs are freed.
3580 */
fba87559 3581 netif_tx_disable(netdev);
6e1f9975 3582 be_tx_compl_clean(adapter);
a323d9bf
SP
3583
3584 be_rx_qs_destroy(adapter);
d11a347d 3585
a323d9bf 3586 for_all_evt_queues(adapter, eqo, i) {
10ef9ab4
SP
3587 if (msix_enabled(adapter))
3588 synchronize_irq(be_msix_vec_get(adapter, eqo));
3589 else
3590 synchronize_irq(netdev->irq);
3591 be_eq_clean(eqo);
63fcb27f
PR
3592 }
3593
889cd4b2
SP
3594 be_irq_unregister(adapter);
3595
482c9e79
SP
3596 return 0;
3597}
3598
10ef9ab4 3599static int be_rx_qs_create(struct be_adapter *adapter)
482c9e79 3600{
1dcf7b1c
ED
3601 struct rss_info *rss = &adapter->rss_info;
3602 u8 rss_key[RSS_HASH_KEY_LEN];
482c9e79 3603 struct be_rx_obj *rxo;
e9008ee9 3604 int rc, i, j;
482c9e79
SP
3605
3606 for_all_rx_queues(adapter, rxo, i) {
10ef9ab4
SP
3607 rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
3608 sizeof(struct be_eth_rx_d));
3609 if (rc)
3610 return rc;
3611 }
3612
71bb8bd0
VV
3613 if (adapter->need_def_rxq || !adapter->num_rss_qs) {
3614 rxo = default_rxo(adapter);
3615 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
3616 rx_frag_size, adapter->if_handle,
3617 false, &rxo->rss_id);
3618 if (rc)
3619 return rc;
3620 }
10ef9ab4
SP
3621
3622 for_all_rss_queues(adapter, rxo, i) {
482c9e79 3623 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
10ef9ab4
SP
3624 rx_frag_size, adapter->if_handle,
3625 true, &rxo->rss_id);
482c9e79
SP
3626 if (rc)
3627 return rc;
3628 }
3629
3630 if (be_multi_rxq(adapter)) {
71bb8bd0 3631 for (j = 0; j < RSS_INDIR_TABLE_LEN; j += adapter->num_rss_qs) {
e9008ee9 3632 for_all_rss_queues(adapter, rxo, i) {
e2557877 3633 if ((j + i) >= RSS_INDIR_TABLE_LEN)
e9008ee9 3634 break;
e2557877
VD
3635 rss->rsstable[j + i] = rxo->rss_id;
3636 rss->rss_queue[j + i] = i;
e9008ee9
PR
3637 }
3638 }
e2557877
VD
3639 rss->rss_flags = RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_IPV4 |
3640 RSS_ENABLE_TCP_IPV6 | RSS_ENABLE_IPV6;
594ad54a
SR
3641
3642 if (!BEx_chip(adapter))
e2557877
VD
3643 rss->rss_flags |= RSS_ENABLE_UDP_IPV4 |
3644 RSS_ENABLE_UDP_IPV6;
62219066
AK
3645
3646 netdev_rss_key_fill(rss_key, RSS_HASH_KEY_LEN);
3647 rc = be_cmd_rss_config(adapter, rss->rsstable, rss->rss_flags,
3648 RSS_INDIR_TABLE_LEN, rss_key);
3649 if (rc) {
3650 rss->rss_flags = RSS_ENABLE_NONE;
3651 return rc;
3652 }
3653
3654 memcpy(rss->rss_hkey, rss_key, RSS_HASH_KEY_LEN);
da1388d6
VV
3655 } else {
3656 /* Disable RSS, if only default RX Q is created */
e2557877 3657 rss->rss_flags = RSS_ENABLE_NONE;
da1388d6 3658 }
594ad54a 3659
e2557877 3660
b02e60c8
SR
3661 /* Post 1 less than RXQ-len to avoid head being equal to tail,
3662 * which is a queue empty condition
3663 */
10ef9ab4 3664 for_all_rx_queues(adapter, rxo, i)
b02e60c8
SR
3665 be_post_rx_frags(rxo, GFP_KERNEL, RX_Q_LEN - 1);
3666
889cd4b2
SP
3667 return 0;
3668}
3669
bcc84140
KA
3670static int be_enable_if_filters(struct be_adapter *adapter)
3671{
3672 int status;
3673
c1bb0a55 3674 status = be_cmd_rx_filter(adapter, BE_IF_FILT_FLAGS_BASIC, ON);
bcc84140
KA
3675 if (status)
3676 return status;
3677
4993b39a
IV
3678 /* Normally this condition usually true as the ->dev_mac is zeroed.
3679 * But on BE3 VFs the initial MAC is pre-programmed by PF and
3680 * subsequent be_dev_mac_add() can fail (after fresh boot)
3681 */
3682 if (!ether_addr_equal(adapter->dev_mac, adapter->netdev->dev_addr)) {
3683 int old_pmac_id = -1;
3684
3685 /* Remember old programmed MAC if any - can happen on BE3 VF */
3686 if (!is_zero_ether_addr(adapter->dev_mac))
3687 old_pmac_id = adapter->pmac_id[0];
3688
988d44b1 3689 status = be_dev_mac_add(adapter, adapter->netdev->dev_addr);
bcc84140
KA
3690 if (status)
3691 return status;
4993b39a
IV
3692
3693 /* Delete the old programmed MAC as we successfully programmed
3694 * a new MAC
3695 */
3696 if (old_pmac_id >= 0 && old_pmac_id != adapter->pmac_id[0])
3697 be_dev_mac_del(adapter, old_pmac_id);
3698
c27ebf58 3699 ether_addr_copy(adapter->dev_mac, adapter->netdev->dev_addr);
bcc84140
KA
3700 }
3701
3702 if (adapter->vlans_added)
3703 be_vid_config(adapter);
3704
b7172414 3705 __be_set_rx_mode(adapter);
bcc84140
KA
3706
3707 return 0;
3708}
3709
6b7c5b94
SP
3710static int be_open(struct net_device *netdev)
3711{
3712 struct be_adapter *adapter = netdev_priv(netdev);
10ef9ab4 3713 struct be_eq_obj *eqo;
3abcdeda 3714 struct be_rx_obj *rxo;
10ef9ab4 3715 struct be_tx_obj *txo;
b236916a 3716 u8 link_status;
3abcdeda 3717 int status, i;
5fb379ee 3718
10ef9ab4 3719 status = be_rx_qs_create(adapter);
482c9e79
SP
3720 if (status)
3721 goto err;
3722
bcc84140
KA
3723 status = be_enable_if_filters(adapter);
3724 if (status)
3725 goto err;
3726
c2bba3df
SK
3727 status = be_irq_register(adapter);
3728 if (status)
3729 goto err;
5fb379ee 3730
10ef9ab4 3731 for_all_rx_queues(adapter, rxo, i)
3abcdeda 3732 be_cq_notify(adapter, rxo->cq.id, true, 0);
5fb379ee 3733
10ef9ab4
SP
3734 for_all_tx_queues(adapter, txo, i)
3735 be_cq_notify(adapter, txo->cq.id, true, 0);
3736
7a1e9b20
SP
3737 be_async_mcc_enable(adapter);
3738
10ef9ab4
SP
3739 for_all_evt_queues(adapter, eqo, i) {
3740 napi_enable(&eqo->napi);
20947770 3741 be_eq_notify(adapter, eqo->q.id, true, true, 0, 0);
10ef9ab4 3742 }
04d3d624 3743 adapter->flags |= BE_FLAGS_NAPI_ENABLED;
10ef9ab4 3744
323ff71e 3745 status = be_cmd_link_status_query(adapter, NULL, &link_status, 0);
b236916a
AK
3746 if (!status)
3747 be_link_status_update(adapter, link_status);
3748
fba87559 3749 netif_tx_start_all_queues(netdev);
c9c47142 3750 if (skyhawk_chip(adapter))
bde6b7cd 3751 udp_tunnel_get_rx_info(netdev);
c5abe7c0 3752
889cd4b2
SP
3753 return 0;
3754err:
3755 be_close(adapter->netdev);
3756 return -EIO;
5fb379ee
SP
3757}
3758
f7062ee5
SP
3759static void be_vf_eth_addr_generate(struct be_adapter *adapter, u8 *mac)
3760{
3761 u32 addr;
3762
3763 addr = jhash(adapter->netdev->dev_addr, ETH_ALEN, 0);
3764
3765 mac[5] = (u8)(addr & 0xFF);
3766 mac[4] = (u8)((addr >> 8) & 0xFF);
3767 mac[3] = (u8)((addr >> 16) & 0xFF);
3768 /* Use the OUI from the current MAC address */
3769 memcpy(mac, adapter->netdev->dev_addr, 3);
3770}
3771
6d87f5c3
AK
3772/*
3773 * Generate a seed MAC address from the PF MAC Address using jhash.
3774 * MAC Address for VFs are assigned incrementally starting from the seed.
3775 * These addresses are programmed in the ASIC by the PF and the VF driver
3776 * queries for the MAC address during its probe.
3777 */
4c876616 3778static int be_vf_eth_addr_config(struct be_adapter *adapter)
6d87f5c3 3779{
f9449ab7 3780 u32 vf;
3abcdeda 3781 int status = 0;
6d87f5c3 3782 u8 mac[ETH_ALEN];
11ac75ed 3783 struct be_vf_cfg *vf_cfg;
6d87f5c3
AK
3784
3785 be_vf_eth_addr_generate(adapter, mac);
3786
11ac75ed 3787 for_all_vfs(adapter, vf_cfg, vf) {
3175d8c2 3788 if (BEx_chip(adapter))
590c391d 3789 status = be_cmd_pmac_add(adapter, mac,
11ac75ed
SP
3790 vf_cfg->if_handle,
3791 &vf_cfg->pmac_id, vf + 1);
3175d8c2
SP
3792 else
3793 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
3794 vf + 1);
590c391d 3795
6d87f5c3
AK
3796 if (status)
3797 dev_err(&adapter->pdev->dev,
748b539a
SP
3798 "Mac address assignment failed for VF %d\n",
3799 vf);
6d87f5c3 3800 else
11ac75ed 3801 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
6d87f5c3
AK
3802
3803 mac[5] += 1;
3804 }
3805 return status;
3806}
3807
4c876616
SP
3808static int be_vfs_mac_query(struct be_adapter *adapter)
3809{
3810 int status, vf;
3811 u8 mac[ETH_ALEN];
3812 struct be_vf_cfg *vf_cfg;
4c876616
SP
3813
3814 for_all_vfs(adapter, vf_cfg, vf) {
b188f090
SR
3815 status = be_cmd_get_active_mac(adapter, vf_cfg->pmac_id,
3816 mac, vf_cfg->if_handle,
3817 false, vf+1);
4c876616
SP
3818 if (status)
3819 return status;
3820 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
3821 }
3822 return 0;
3823}
3824
f9449ab7 3825static void be_vf_clear(struct be_adapter *adapter)
6d87f5c3 3826{
11ac75ed 3827 struct be_vf_cfg *vf_cfg;
6d87f5c3
AK
3828 u32 vf;
3829
257a3feb 3830 if (pci_vfs_assigned(adapter->pdev)) {
4c876616
SP
3831 dev_warn(&adapter->pdev->dev,
3832 "VFs are assigned to VMs: not disabling VFs\n");
39f1d94d
SP
3833 goto done;
3834 }
3835
b4c1df93
SP
3836 pci_disable_sriov(adapter->pdev);
3837
11ac75ed 3838 for_all_vfs(adapter, vf_cfg, vf) {
3175d8c2 3839 if (BEx_chip(adapter))
11ac75ed
SP
3840 be_cmd_pmac_del(adapter, vf_cfg->if_handle,
3841 vf_cfg->pmac_id, vf + 1);
3175d8c2
SP
3842 else
3843 be_cmd_set_mac(adapter, NULL, vf_cfg->if_handle,
3844 vf + 1);
f9449ab7 3845
11ac75ed
SP
3846 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
3847 }
884476be
SK
3848
3849 if (BE3_chip(adapter))
3850 be_cmd_set_hsw_config(adapter, 0, 0,
3851 adapter->if_handle,
3852 PORT_FWD_TYPE_PASSTHRU, 0);
39f1d94d
SP
3853done:
3854 kfree(adapter->vf_cfg);
3855 adapter->num_vfs = 0;
f174c7ec 3856 adapter->flags &= ~BE_FLAGS_SRIOV_ENABLED;
6d87f5c3
AK
3857}
3858
7707133c
SP
3859static void be_clear_queues(struct be_adapter *adapter)
3860{
3861 be_mcc_queues_destroy(adapter);
3862 be_rx_cqs_destroy(adapter);
3863 be_tx_queues_destroy(adapter);
3864 be_evt_queues_destroy(adapter);
3865}
3866
68d7bdcb 3867static void be_cancel_worker(struct be_adapter *adapter)
a54769f5 3868{
191eb756
SP
3869 if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) {
3870 cancel_delayed_work_sync(&adapter->work);
3871 adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED;
3872 }
68d7bdcb
SP
3873}
3874
eb7dd46c
SP
3875static void be_cancel_err_detection(struct be_adapter *adapter)
3876{
710f3e59
SB
3877 struct be_error_recovery *err_rec = &adapter->error_recovery;
3878
3879 if (!be_err_recovery_workq)
3880 return;
3881
eb7dd46c 3882 if (adapter->flags & BE_FLAGS_ERR_DETECTION_SCHEDULED) {
710f3e59 3883 cancel_delayed_work_sync(&err_rec->err_detection_work);
eb7dd46c
SP
3884 adapter->flags &= ~BE_FLAGS_ERR_DETECTION_SCHEDULED;
3885 }
3886}
3887
bf8d9dfb
SB
3888static int be_enable_vxlan_offloads(struct be_adapter *adapter)
3889{
3890 struct net_device *netdev = adapter->netdev;
3891 struct device *dev = &adapter->pdev->dev;
3892 struct be_vxlan_port *vxlan_port;
3893 __be16 port;
3894 int status;
3895
3896 vxlan_port = list_first_entry(&adapter->vxlan_port_list,
3897 struct be_vxlan_port, list);
3898 port = vxlan_port->port;
3899
3900 status = be_cmd_manage_iface(adapter, adapter->if_handle,
3901 OP_CONVERT_NORMAL_TO_TUNNEL);
3902 if (status) {
3903 dev_warn(dev, "Failed to convert normal interface to tunnel\n");
3904 return status;
3905 }
3906 adapter->flags |= BE_FLAGS_VXLAN_OFFLOADS;
3907
3908 status = be_cmd_set_vxlan_port(adapter, port);
3909 if (status) {
3910 dev_warn(dev, "Failed to add VxLAN port\n");
3911 return status;
3912 }
3913 adapter->vxlan_port = port;
3914
3915 netdev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
3916 NETIF_F_TSO | NETIF_F_TSO6 |
3917 NETIF_F_GSO_UDP_TUNNEL;
3918 netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL;
3919 netdev->features |= NETIF_F_GSO_UDP_TUNNEL;
3920
3921 dev_info(dev, "Enabled VxLAN offloads for UDP port %d\n",
3922 be16_to_cpu(port));
3923 return 0;
3924}
3925
c9c47142
SP
3926static void be_disable_vxlan_offloads(struct be_adapter *adapter)
3927{
630f4b70
SB
3928 struct net_device *netdev = adapter->netdev;
3929
c9c47142
SP
3930 if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS)
3931 be_cmd_manage_iface(adapter, adapter->if_handle,
3932 OP_CONVERT_TUNNEL_TO_NORMAL);
3933
3934 if (adapter->vxlan_port)
3935 be_cmd_set_vxlan_port(adapter, 0);
3936
3937 adapter->flags &= ~BE_FLAGS_VXLAN_OFFLOADS;
3938 adapter->vxlan_port = 0;
630f4b70
SB
3939
3940 netdev->hw_enc_features = 0;
3941 netdev->hw_features &= ~(NETIF_F_GSO_UDP_TUNNEL);
ac9a3d84 3942 netdev->features &= ~(NETIF_F_GSO_UDP_TUNNEL);
c9c47142
SP
3943}
3944
b9263cbf
SR
3945static void be_calculate_vf_res(struct be_adapter *adapter, u16 num_vfs,
3946 struct be_resources *vft_res)
f2858738
VV
3947{
3948 struct be_resources res = adapter->pool_res;
b9263cbf
SR
3949 u32 vf_if_cap_flags = res.vf_if_cap_flags;
3950 struct be_resources res_mod = {0};
f2858738
VV
3951 u16 num_vf_qs = 1;
3952
de2b1e03
SK
3953 /* Distribute the queue resources among the PF and it's VFs */
3954 if (num_vfs) {
3955 /* Divide the rx queues evenly among the VFs and the PF, capped
3956 * at VF-EQ-count. Any remainder queues belong to the PF.
3957 */
ee9ad280
SB
3958 num_vf_qs = min(SH_VF_MAX_NIC_EQS,
3959 res.max_rss_qs / (num_vfs + 1));
f2858738 3960
de2b1e03
SK
3961 /* Skyhawk-R chip supports only MAX_PORT_RSS_TABLES
3962 * RSS Tables per port. Provide RSS on VFs, only if number of
3963 * VFs requested is less than it's PF Pool's RSS Tables limit.
f2858738 3964 */
de2b1e03 3965 if (num_vfs >= be_max_pf_pool_rss_tables(adapter))
f2858738
VV
3966 num_vf_qs = 1;
3967 }
b9263cbf
SR
3968
3969 /* Resource with fields set to all '1's by GET_PROFILE_CONFIG cmd,
3970 * which are modifiable using SET_PROFILE_CONFIG cmd.
3971 */
de2b1e03
SK
3972 be_cmd_get_profile_config(adapter, &res_mod, NULL, ACTIVE_PROFILE_TYPE,
3973 RESOURCE_MODIFIABLE, 0);
b9263cbf
SR
3974
3975 /* If RSS IFACE capability flags are modifiable for a VF, set the
3976 * capability flag as valid and set RSS and DEFQ_RSS IFACE flags if
3977 * more than 1 RSSQ is available for a VF.
3978 * Otherwise, provision only 1 queue pair for VF.
3979 */
3980 if (res_mod.vf_if_cap_flags & BE_IF_FLAGS_RSS) {
3981 vft_res->flags |= BIT(IF_CAPS_FLAGS_VALID_SHIFT);
3982 if (num_vf_qs > 1) {
3983 vf_if_cap_flags |= BE_IF_FLAGS_RSS;
3984 if (res.if_cap_flags & BE_IF_FLAGS_DEFQ_RSS)
3985 vf_if_cap_flags |= BE_IF_FLAGS_DEFQ_RSS;
3986 } else {
3987 vf_if_cap_flags &= ~(BE_IF_FLAGS_RSS |
3988 BE_IF_FLAGS_DEFQ_RSS);
3989 }
3990 } else {
3991 num_vf_qs = 1;
3992 }
3993
3994 if (res_mod.vf_if_cap_flags & BE_IF_FLAGS_VLAN_PROMISCUOUS) {
3995 vft_res->flags |= BIT(IF_CAPS_FLAGS_VALID_SHIFT);
3996 vf_if_cap_flags &= ~BE_IF_FLAGS_VLAN_PROMISCUOUS;
3997 }
3998
3999 vft_res->vf_if_cap_flags = vf_if_cap_flags;
4000 vft_res->max_rx_qs = num_vf_qs;
4001 vft_res->max_rss_qs = num_vf_qs;
4002 vft_res->max_tx_qs = res.max_tx_qs / (num_vfs + 1);
4003 vft_res->max_cq_count = res.max_cq_count / (num_vfs + 1);
4004
4005 /* Distribute unicast MACs, VLANs, IFACE count and MCCQ count equally
4006 * among the PF and it's VFs, if the fields are changeable
4007 */
4008 if (res_mod.max_uc_mac == FIELD_MODIFIABLE)
4009 vft_res->max_uc_mac = res.max_uc_mac / (num_vfs + 1);
4010
4011 if (res_mod.max_vlans == FIELD_MODIFIABLE)
4012 vft_res->max_vlans = res.max_vlans / (num_vfs + 1);
4013
4014 if (res_mod.max_iface_count == FIELD_MODIFIABLE)
4015 vft_res->max_iface_count = res.max_iface_count / (num_vfs + 1);
4016
4017 if (res_mod.max_mcc_count == FIELD_MODIFIABLE)
4018 vft_res->max_mcc_count = res.max_mcc_count / (num_vfs + 1);
f2858738
VV
4019}
4020
b7172414
SP
4021static void be_if_destroy(struct be_adapter *adapter)
4022{
4023 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
4024
4025 kfree(adapter->pmac_id);
4026 adapter->pmac_id = NULL;
4027
4028 kfree(adapter->mc_list);
4029 adapter->mc_list = NULL;
4030
4031 kfree(adapter->uc_list);
4032 adapter->uc_list = NULL;
4033}
4034
b05004ad
SK
4035static int be_clear(struct be_adapter *adapter)
4036{
f2858738 4037 struct pci_dev *pdev = adapter->pdev;
b9263cbf 4038 struct be_resources vft_res = {0};
f2858738 4039
68d7bdcb 4040 be_cancel_worker(adapter);
191eb756 4041
b7172414
SP
4042 flush_workqueue(be_wq);
4043
11ac75ed 4044 if (sriov_enabled(adapter))
f9449ab7
SP
4045 be_vf_clear(adapter);
4046
bec84e6b
VV
4047 /* Re-configure FW to distribute resources evenly across max-supported
4048 * number of VFs, only when VFs are not already enabled.
4049 */
ace40aff
VV
4050 if (skyhawk_chip(adapter) && be_physfn(adapter) &&
4051 !pci_vfs_assigned(pdev)) {
b9263cbf
SR
4052 be_calculate_vf_res(adapter,
4053 pci_sriov_get_totalvfs(pdev),
4054 &vft_res);
bec84e6b 4055 be_cmd_set_sriov_config(adapter, adapter->pool_res,
f2858738 4056 pci_sriov_get_totalvfs(pdev),
b9263cbf 4057 &vft_res);
f2858738 4058 }
bec84e6b 4059
c9c47142 4060 be_disable_vxlan_offloads(adapter);
fbc13f01 4061
b7172414 4062 be_if_destroy(adapter);
a54769f5 4063
7707133c 4064 be_clear_queues(adapter);
a54769f5 4065
10ef9ab4 4066 be_msix_disable(adapter);
e1ad8e33 4067 adapter->flags &= ~BE_FLAGS_SETUP_DONE;
a54769f5
SP
4068 return 0;
4069}
4070
4c876616 4071static int be_vfs_if_create(struct be_adapter *adapter)
abb93951 4072{
92bf14ab 4073 struct be_resources res = {0};
bcc84140 4074 u32 cap_flags, en_flags, vf;
4c876616 4075 struct be_vf_cfg *vf_cfg;
0700d816 4076 int status;
abb93951 4077
0700d816 4078 /* If a FW profile exists, then cap_flags are updated */
c1bb0a55 4079 cap_flags = BE_VF_IF_EN_FLAGS;
abb93951 4080
4c876616 4081 for_all_vfs(adapter, vf_cfg, vf) {
92bf14ab 4082 if (!BE3_chip(adapter)) {
de2b1e03
SK
4083 status = be_cmd_get_profile_config(adapter, &res, NULL,
4084 ACTIVE_PROFILE_TYPE,
f2858738 4085 RESOURCE_LIMITS,
92bf14ab 4086 vf + 1);
435452aa 4087 if (!status) {
92bf14ab 4088 cap_flags = res.if_cap_flags;
435452aa
VV
4089 /* Prevent VFs from enabling VLAN promiscuous
4090 * mode
4091 */
4092 cap_flags &= ~BE_IF_FLAGS_VLAN_PROMISCUOUS;
4093 }
92bf14ab 4094 }
4c876616 4095
c1bb0a55
VD
4096 /* PF should enable IF flags during proxy if_create call */
4097 en_flags = cap_flags & BE_VF_IF_EN_FLAGS;
bcc84140
KA
4098 status = be_cmd_if_create(adapter, cap_flags, en_flags,
4099 &vf_cfg->if_handle, vf + 1);
4c876616 4100 if (status)
0700d816 4101 return status;
4c876616 4102 }
0700d816
KA
4103
4104 return 0;
abb93951
PR
4105}
4106
39f1d94d 4107static int be_vf_setup_init(struct be_adapter *adapter)
30128031 4108{
11ac75ed 4109 struct be_vf_cfg *vf_cfg;
30128031
SP
4110 int vf;
4111
39f1d94d
SP
4112 adapter->vf_cfg = kcalloc(adapter->num_vfs, sizeof(*vf_cfg),
4113 GFP_KERNEL);
4114 if (!adapter->vf_cfg)
4115 return -ENOMEM;
4116
11ac75ed
SP
4117 for_all_vfs(adapter, vf_cfg, vf) {
4118 vf_cfg->if_handle = -1;
4119 vf_cfg->pmac_id = -1;
30128031 4120 }
39f1d94d 4121 return 0;
30128031
SP
4122}
4123
f9449ab7
SP
4124static int be_vf_setup(struct be_adapter *adapter)
4125{
c502224e 4126 struct device *dev = &adapter->pdev->dev;
11ac75ed 4127 struct be_vf_cfg *vf_cfg;
4c876616 4128 int status, old_vfs, vf;
e7bcbd7b 4129 bool spoofchk;
39f1d94d 4130
257a3feb 4131 old_vfs = pci_num_vf(adapter->pdev);
39f1d94d
SP
4132
4133 status = be_vf_setup_init(adapter);
4134 if (status)
4135 goto err;
30128031 4136
4c876616
SP
4137 if (old_vfs) {
4138 for_all_vfs(adapter, vf_cfg, vf) {
4139 status = be_cmd_get_if_id(adapter, vf_cfg, vf);
4140 if (status)
4141 goto err;
4142 }
f9449ab7 4143
4c876616
SP
4144 status = be_vfs_mac_query(adapter);
4145 if (status)
4146 goto err;
4147 } else {
bec84e6b
VV
4148 status = be_vfs_if_create(adapter);
4149 if (status)
4150 goto err;
4151
39f1d94d
SP
4152 status = be_vf_eth_addr_config(adapter);
4153 if (status)
4154 goto err;
4155 }
f9449ab7 4156
11ac75ed 4157 for_all_vfs(adapter, vf_cfg, vf) {
04a06028 4158 /* Allow VFs to programs MAC/VLAN filters */
435452aa
VV
4159 status = be_cmd_get_fn_privileges(adapter, &vf_cfg->privileges,
4160 vf + 1);
4161 if (!status && !(vf_cfg->privileges & BE_PRIV_FILTMGMT)) {
04a06028 4162 status = be_cmd_set_fn_privileges(adapter,
435452aa 4163 vf_cfg->privileges |
04a06028
SP
4164 BE_PRIV_FILTMGMT,
4165 vf + 1);
435452aa
VV
4166 if (!status) {
4167 vf_cfg->privileges |= BE_PRIV_FILTMGMT;
04a06028
SP
4168 dev_info(dev, "VF%d has FILTMGMT privilege\n",
4169 vf);
435452aa 4170 }
04a06028
SP
4171 }
4172
0f77ba73
RN
4173 /* Allow full available bandwidth */
4174 if (!old_vfs)
4175 be_cmd_config_qos(adapter, 0, 0, vf + 1);
f1f3ee1b 4176
e7bcbd7b
KA
4177 status = be_cmd_get_hsw_config(adapter, NULL, vf + 1,
4178 vf_cfg->if_handle, NULL,
4179 &spoofchk);
4180 if (!status)
4181 vf_cfg->spoofchk = spoofchk;
4182
bdce2ad7 4183 if (!old_vfs) {
0599863d 4184 be_cmd_enable_vf(adapter, vf + 1);
bdce2ad7
SR
4185 be_cmd_set_logical_link_config(adapter,
4186 IFLA_VF_LINK_STATE_AUTO,
4187 vf+1);
4188 }
f9449ab7 4189 }
b4c1df93
SP
4190
4191 if (!old_vfs) {
4192 status = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
4193 if (status) {
4194 dev_err(dev, "SRIOV enable failed\n");
4195 adapter->num_vfs = 0;
4196 goto err;
4197 }
4198 }
f174c7ec 4199
884476be
SK
4200 if (BE3_chip(adapter)) {
4201 /* On BE3, enable VEB only when SRIOV is enabled */
4202 status = be_cmd_set_hsw_config(adapter, 0, 0,
4203 adapter->if_handle,
4204 PORT_FWD_TYPE_VEB, 0);
4205 if (status)
4206 goto err;
4207 }
4208
f174c7ec 4209 adapter->flags |= BE_FLAGS_SRIOV_ENABLED;
f9449ab7
SP
4210 return 0;
4211err:
4c876616
SP
4212 dev_err(dev, "VF setup failed\n");
4213 be_vf_clear(adapter);
f9449ab7
SP
4214 return status;
4215}
4216
f93f160b
VV
4217/* Converting function_mode bits on BE3 to SH mc_type enums */
4218
4219static u8 be_convert_mc_type(u32 function_mode)
4220{
66064dbc 4221 if (function_mode & VNIC_MODE && function_mode & QNQ_MODE)
f93f160b 4222 return vNIC1;
66064dbc 4223 else if (function_mode & QNQ_MODE)
f93f160b
VV
4224 return FLEX10;
4225 else if (function_mode & VNIC_MODE)
4226 return vNIC2;
4227 else if (function_mode & UMC_ENABLED)
4228 return UMC;
4229 else
4230 return MC_NONE;
4231}
4232
92bf14ab
SP
4233/* On BE2/BE3 FW does not suggest the supported limits */
4234static void BEx_get_resources(struct be_adapter *adapter,
4235 struct be_resources *res)
4236{
bec84e6b 4237 bool use_sriov = adapter->num_vfs ? 1 : 0;
92bf14ab
SP
4238
4239 if (be_physfn(adapter))
4240 res->max_uc_mac = BE_UC_PMAC_COUNT;
4241 else
4242 res->max_uc_mac = BE_VF_UC_PMAC_COUNT;
4243
f93f160b
VV
4244 adapter->mc_type = be_convert_mc_type(adapter->function_mode);
4245
4246 if (be_is_mc(adapter)) {
4247 /* Assuming that there are 4 channels per port,
4248 * when multi-channel is enabled
4249 */
4250 if (be_is_qnq_mode(adapter))
4251 res->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
4252 else
4253 /* In a non-qnq multichannel mode, the pvid
4254 * takes up one vlan entry
4255 */
4256 res->max_vlans = (BE_NUM_VLANS_SUPPORTED / 4) - 1;
4257 } else {
92bf14ab 4258 res->max_vlans = BE_NUM_VLANS_SUPPORTED;
f93f160b
VV
4259 }
4260
92bf14ab
SP
4261 res->max_mcast_mac = BE_MAX_MC;
4262
a5243dab
VV
4263 /* 1) For BE3 1Gb ports, FW does not support multiple TXQs
4264 * 2) Create multiple TX rings on a BE3-R multi-channel interface
4265 * *only* if it is RSS-capable.
4266 */
4267 if (BE2_chip(adapter) || use_sriov || (adapter->port_num > 1) ||
18c57c74
KA
4268 be_virtfn(adapter) ||
4269 (be_is_mc(adapter) &&
4270 !(adapter->function_caps & BE_FUNCTION_CAPS_RSS))) {
92bf14ab 4271 res->max_tx_qs = 1;
a28277dc
SR
4272 } else if (adapter->function_caps & BE_FUNCTION_CAPS_SUPER_NIC) {
4273 struct be_resources super_nic_res = {0};
4274
4275 /* On a SuperNIC profile, the driver needs to use the
4276 * GET_PROFILE_CONFIG cmd to query the per-function TXQ limits
4277 */
de2b1e03
SK
4278 be_cmd_get_profile_config(adapter, &super_nic_res, NULL,
4279 ACTIVE_PROFILE_TYPE, RESOURCE_LIMITS,
4280 0);
a28277dc
SR
4281 /* Some old versions of BE3 FW don't report max_tx_qs value */
4282 res->max_tx_qs = super_nic_res.max_tx_qs ? : BE3_MAX_TX_QS;
4283 } else {
92bf14ab 4284 res->max_tx_qs = BE3_MAX_TX_QS;
a28277dc 4285 }
92bf14ab
SP
4286
4287 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
4288 !use_sriov && be_physfn(adapter))
4289 res->max_rss_qs = (adapter->be3_native) ?
4290 BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
4291 res->max_rx_qs = res->max_rss_qs + 1;
4292
e3dc867c 4293 if (be_physfn(adapter))
d3518e21 4294 res->max_evt_qs = (be_max_vfs(adapter) > 0) ?
e3dc867c
SR
4295 BE3_SRIOV_MAX_EVT_QS : BE3_MAX_EVT_QS;
4296 else
4297 res->max_evt_qs = 1;
92bf14ab
SP
4298
4299 res->if_cap_flags = BE_IF_CAP_FLAGS_WANT;
71bb8bd0 4300 res->if_cap_flags &= ~BE_IF_FLAGS_DEFQ_RSS;
92bf14ab
SP
4301 if (!(adapter->function_caps & BE_FUNCTION_CAPS_RSS))
4302 res->if_cap_flags &= ~BE_IF_FLAGS_RSS;
4303}
4304
30128031
SP
4305static void be_setup_init(struct be_adapter *adapter)
4306{
4307 adapter->vlan_prio_bmap = 0xff;
42f11cf2 4308 adapter->phy.link_speed = -1;
30128031
SP
4309 adapter->if_handle = -1;
4310 adapter->be3_native = false;
f66b7cfd 4311 adapter->if_flags = 0;
51d1f98a 4312 adapter->phy_state = BE_UNKNOWN_PHY_STATE;
f25b119c
PR
4313 if (be_physfn(adapter))
4314 adapter->cmd_privileges = MAX_PRIVILEGES;
4315 else
4316 adapter->cmd_privileges = MIN_PRIVILEGES;
30128031
SP
4317}
4318
de2b1e03
SK
4319/* HW supports only MAX_PORT_RSS_TABLES RSS Policy Tables per port.
4320 * However, this HW limitation is not exposed to the host via any SLI cmd.
4321 * As a result, in the case of SRIOV and in particular multi-partition configs
4322 * the driver needs to calcuate a proportional share of RSS Tables per PF-pool
4323 * for distribution between the VFs. This self-imposed limit will determine the
4324 * no: of VFs for which RSS can be enabled.
4325 */
d766e7e6 4326static void be_calculate_pf_pool_rss_tables(struct be_adapter *adapter)
de2b1e03
SK
4327{
4328 struct be_port_resources port_res = {0};
4329 u8 rss_tables_on_port;
4330 u16 max_vfs = be_max_vfs(adapter);
4331
4332 be_cmd_get_profile_config(adapter, NULL, &port_res, SAVED_PROFILE_TYPE,
4333 RESOURCE_LIMITS, 0);
4334
4335 rss_tables_on_port = MAX_PORT_RSS_TABLES - port_res.nic_pfs;
4336
4337 /* Each PF Pool's RSS Tables limit =
4338 * PF's Max VFs / Total_Max_VFs on Port * RSS Tables on Port
4339 */
4340 adapter->pool_res.max_rss_tables =
4341 max_vfs * rss_tables_on_port / port_res.max_vfs;
4342}
4343
bec84e6b
VV
4344static int be_get_sriov_config(struct be_adapter *adapter)
4345{
bec84e6b 4346 struct be_resources res = {0};
d3d18312 4347 int max_vfs, old_vfs;
bec84e6b 4348
de2b1e03
SK
4349 be_cmd_get_profile_config(adapter, &res, NULL, ACTIVE_PROFILE_TYPE,
4350 RESOURCE_LIMITS, 0);
d3d18312 4351
ace40aff 4352 /* Some old versions of BE3 FW don't report max_vfs value */
bec84e6b
VV
4353 if (BE3_chip(adapter) && !res.max_vfs) {
4354 max_vfs = pci_sriov_get_totalvfs(adapter->pdev);
4355 res.max_vfs = max_vfs > 0 ? min(MAX_VFS, max_vfs) : 0;
4356 }
4357
d3d18312 4358 adapter->pool_res = res;
bec84e6b 4359
ace40aff
VV
4360 /* If during previous unload of the driver, the VFs were not disabled,
4361 * then we cannot rely on the PF POOL limits for the TotalVFs value.
4362 * Instead use the TotalVFs value stored in the pci-dev struct.
4363 */
bec84e6b
VV
4364 old_vfs = pci_num_vf(adapter->pdev);
4365 if (old_vfs) {
ace40aff
VV
4366 dev_info(&adapter->pdev->dev, "%d VFs are already enabled\n",
4367 old_vfs);
4368
4369 adapter->pool_res.max_vfs =
4370 pci_sriov_get_totalvfs(adapter->pdev);
bec84e6b 4371 adapter->num_vfs = old_vfs;
bec84e6b
VV
4372 }
4373
de2b1e03
SK
4374 if (skyhawk_chip(adapter) && be_max_vfs(adapter) && !old_vfs) {
4375 be_calculate_pf_pool_rss_tables(adapter);
4376 dev_info(&adapter->pdev->dev,
4377 "RSS can be enabled for all VFs if num_vfs <= %d\n",
4378 be_max_pf_pool_rss_tables(adapter));
4379 }
bec84e6b
VV
4380 return 0;
4381}
4382
ace40aff
VV
4383static void be_alloc_sriov_res(struct be_adapter *adapter)
4384{
4385 int old_vfs = pci_num_vf(adapter->pdev);
b9263cbf 4386 struct be_resources vft_res = {0};
ace40aff
VV
4387 int status;
4388
4389 be_get_sriov_config(adapter);
4390
4391 if (!old_vfs)
4392 pci_sriov_set_totalvfs(adapter->pdev, be_max_vfs(adapter));
4393
4394 /* When the HW is in SRIOV capable configuration, the PF-pool
4395 * resources are given to PF during driver load, if there are no
4396 * old VFs. This facility is not available in BE3 FW.
4397 * Also, this is done by FW in Lancer chip.
4398 */
4399 if (skyhawk_chip(adapter) && be_max_vfs(adapter) && !old_vfs) {
b9263cbf 4400 be_calculate_vf_res(adapter, 0, &vft_res);
ace40aff 4401 status = be_cmd_set_sriov_config(adapter, adapter->pool_res, 0,
b9263cbf 4402 &vft_res);
ace40aff
VV
4403 if (status)
4404 dev_err(&adapter->pdev->dev,
4405 "Failed to optimize SRIOV resources\n");
4406 }
4407}
4408
92bf14ab 4409static int be_get_resources(struct be_adapter *adapter)
abb93951 4410{
92bf14ab
SP
4411 struct device *dev = &adapter->pdev->dev;
4412 struct be_resources res = {0};
4413 int status;
abb93951 4414
92bf14ab
SP
4415 /* For Lancer, SH etc read per-function resource limits from FW.
4416 * GET_FUNC_CONFIG returns per function guaranteed limits.
4417 * GET_PROFILE_CONFIG returns PCI-E related limits PF-pool limits
4418 */
ce7faf0a
SP
4419 if (BEx_chip(adapter)) {
4420 BEx_get_resources(adapter, &res);
4421 } else {
92bf14ab
SP
4422 status = be_cmd_get_func_config(adapter, &res);
4423 if (status)
4424 return status;
abb93951 4425
71bb8bd0
VV
4426 /* If a deafault RXQ must be created, we'll use up one RSSQ*/
4427 if (res.max_rss_qs && res.max_rss_qs == res.max_rx_qs &&
4428 !(res.if_cap_flags & BE_IF_FLAGS_DEFQ_RSS))
4429 res.max_rss_qs -= 1;
abb93951 4430 }
4c876616 4431
ce7faf0a
SP
4432 /* If RoCE is supported stash away half the EQs for RoCE */
4433 res.max_nic_evt_qs = be_roce_supported(adapter) ?
4434 res.max_evt_qs / 2 : res.max_evt_qs;
4435 adapter->res = res;
4436
71bb8bd0
VV
4437 /* If FW supports RSS default queue, then skip creating non-RSS
4438 * queue for non-IP traffic.
4439 */
4440 adapter->need_def_rxq = (be_if_cap_flags(adapter) &
4441 BE_IF_FLAGS_DEFQ_RSS) ? 0 : 1;
4442
acbafeb1
SP
4443 dev_info(dev, "Max: txqs %d, rxqs %d, rss %d, eqs %d, vfs %d\n",
4444 be_max_txqs(adapter), be_max_rxqs(adapter),
ce7faf0a 4445 be_max_rss(adapter), be_max_nic_eqs(adapter),
acbafeb1
SP
4446 be_max_vfs(adapter));
4447 dev_info(dev, "Max: uc-macs %d, mc-macs %d, vlans %d\n",
4448 be_max_uc(adapter), be_max_mc(adapter),
4449 be_max_vlans(adapter));
4450
e261768e
SP
4451 /* Ensure RX and TX queues are created in pairs at init time */
4452 adapter->cfg_num_rx_irqs =
4453 min_t(u16, netif_get_num_default_rss_queues(),
4454 be_max_qp_irqs(adapter));
4455 adapter->cfg_num_tx_irqs = adapter->cfg_num_rx_irqs;
92bf14ab 4456 return 0;
abb93951
PR
4457}
4458
39f1d94d
SP
4459static int be_get_config(struct be_adapter *adapter)
4460{
6b085ba9 4461 int status, level;
542963b7 4462 u16 profile_id;
6b085ba9 4463
980df249
SR
4464 status = be_cmd_get_cntl_attributes(adapter);
4465 if (status)
4466 return status;
4467
e97e3cda 4468 status = be_cmd_query_fw_cfg(adapter);
abb93951 4469 if (status)
92bf14ab 4470 return status;
abb93951 4471
fd7ff6f0
VD
4472 if (!lancer_chip(adapter) && be_physfn(adapter))
4473 be_cmd_get_fat_dump_len(adapter, &adapter->fat_dump_len);
4474
6b085ba9
SP
4475 if (BEx_chip(adapter)) {
4476 level = be_cmd_get_fw_log_level(adapter);
4477 adapter->msg_enable =
4478 level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0;
4479 }
4480
4481 be_cmd_get_acpi_wol_cap(adapter);
45f13df7
SB
4482 pci_enable_wake(adapter->pdev, PCI_D3hot, adapter->wol_en);
4483 pci_enable_wake(adapter->pdev, PCI_D3cold, adapter->wol_en);
6b085ba9 4484
21252377
VV
4485 be_cmd_query_port_name(adapter);
4486
4487 if (be_physfn(adapter)) {
542963b7
VV
4488 status = be_cmd_get_active_profile(adapter, &profile_id);
4489 if (!status)
4490 dev_info(&adapter->pdev->dev,
4491 "Using profile 0x%x\n", profile_id);
962bcb75 4492 }
bec84e6b 4493
92bf14ab 4494 return 0;
39f1d94d
SP
4495}
4496
95046b92
SP
4497static int be_mac_setup(struct be_adapter *adapter)
4498{
4499 u8 mac[ETH_ALEN];
4500 int status;
4501
4502 if (is_zero_ether_addr(adapter->netdev->dev_addr)) {
4503 status = be_cmd_get_perm_mac(adapter, mac);
4504 if (status)
4505 return status;
4506
4507 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
4508 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
4993b39a
IV
4509
4510 /* Initial MAC for BE3 VFs is already programmed by PF */
4511 if (BEx_chip(adapter) && be_virtfn(adapter))
4512 memcpy(adapter->dev_mac, mac, ETH_ALEN);
95046b92
SP
4513 }
4514
95046b92
SP
4515 return 0;
4516}
4517
68d7bdcb
SP
4518static void be_schedule_worker(struct be_adapter *adapter)
4519{
b7172414 4520 queue_delayed_work(be_wq, &adapter->work, msecs_to_jiffies(1000));
68d7bdcb
SP
4521 adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
4522}
4523
710f3e59
SB
4524static void be_destroy_err_recovery_workq(void)
4525{
4526 if (!be_err_recovery_workq)
4527 return;
4528
4529 flush_workqueue(be_err_recovery_workq);
4530 destroy_workqueue(be_err_recovery_workq);
4531 be_err_recovery_workq = NULL;
4532}
4533
972f37b4 4534static void be_schedule_err_detection(struct be_adapter *adapter, u32 delay)
eb7dd46c 4535{
710f3e59
SB
4536 struct be_error_recovery *err_rec = &adapter->error_recovery;
4537
4538 if (!be_err_recovery_workq)
4539 return;
4540
4541 queue_delayed_work(be_err_recovery_workq, &err_rec->err_detection_work,
4542 msecs_to_jiffies(delay));
eb7dd46c
SP
4543 adapter->flags |= BE_FLAGS_ERR_DETECTION_SCHEDULED;
4544}
4545
7707133c 4546static int be_setup_queues(struct be_adapter *adapter)
5fb379ee 4547{
68d7bdcb 4548 struct net_device *netdev = adapter->netdev;
10ef9ab4 4549 int status;
ba343c77 4550
7707133c 4551 status = be_evt_queues_create(adapter);
abb93951
PR
4552 if (status)
4553 goto err;
73d540f2 4554
7707133c 4555 status = be_tx_qs_create(adapter);
c2bba3df
SK
4556 if (status)
4557 goto err;
10ef9ab4 4558
7707133c 4559 status = be_rx_cqs_create(adapter);
10ef9ab4 4560 if (status)
a54769f5 4561 goto err;
6b7c5b94 4562
7707133c 4563 status = be_mcc_queues_create(adapter);
10ef9ab4
SP
4564 if (status)
4565 goto err;
4566
68d7bdcb
SP
4567 status = netif_set_real_num_rx_queues(netdev, adapter->num_rx_qs);
4568 if (status)
4569 goto err;
4570
4571 status = netif_set_real_num_tx_queues(netdev, adapter->num_tx_qs);
4572 if (status)
4573 goto err;
4574
7707133c
SP
4575 return 0;
4576err:
4577 dev_err(&adapter->pdev->dev, "queue_setup failed\n");
4578 return status;
4579}
4580
62219066
AK
4581static int be_if_create(struct be_adapter *adapter)
4582{
4583 u32 en_flags = BE_IF_FLAGS_RSS | BE_IF_FLAGS_DEFQ_RSS;
4584 u32 cap_flags = be_if_cap_flags(adapter);
4585 int status;
4586
b7172414
SP
4587 /* alloc required memory for other filtering fields */
4588 adapter->pmac_id = kcalloc(be_max_uc(adapter),
4589 sizeof(*adapter->pmac_id), GFP_KERNEL);
4590 if (!adapter->pmac_id)
4591 return -ENOMEM;
4592
4593 adapter->mc_list = kcalloc(be_max_mc(adapter),
4594 sizeof(*adapter->mc_list), GFP_KERNEL);
4595 if (!adapter->mc_list)
4596 return -ENOMEM;
4597
4598 adapter->uc_list = kcalloc(be_max_uc(adapter),
4599 sizeof(*adapter->uc_list), GFP_KERNEL);
4600 if (!adapter->uc_list)
4601 return -ENOMEM;
4602
e261768e 4603 if (adapter->cfg_num_rx_irqs == 1)
62219066
AK
4604 cap_flags &= ~(BE_IF_FLAGS_DEFQ_RSS | BE_IF_FLAGS_RSS);
4605
4606 en_flags &= cap_flags;
4607 /* will enable all the needed filter flags in be_open() */
4608 status = be_cmd_if_create(adapter, be_if_cap_flags(adapter), en_flags,
4609 &adapter->if_handle, 0);
4610
b7172414
SP
4611 if (status)
4612 return status;
4613
4614 return 0;
62219066
AK
4615}
4616
68d7bdcb
SP
4617int be_update_queues(struct be_adapter *adapter)
4618{
4619 struct net_device *netdev = adapter->netdev;
4620 int status;
4621
4622 if (netif_running(netdev))
4623 be_close(netdev);
4624
4625 be_cancel_worker(adapter);
4626
4627 /* If any vectors have been shared with RoCE we cannot re-program
4628 * the MSIx table.
4629 */
4630 if (!adapter->num_msix_roce_vec)
4631 be_msix_disable(adapter);
4632
4633 be_clear_queues(adapter);
62219066
AK
4634 status = be_cmd_if_destroy(adapter, adapter->if_handle, 0);
4635 if (status)
4636 return status;
68d7bdcb
SP
4637
4638 if (!msix_enabled(adapter)) {
4639 status = be_msix_enable(adapter);
4640 if (status)
4641 return status;
4642 }
4643
62219066
AK
4644 status = be_if_create(adapter);
4645 if (status)
4646 return status;
4647
68d7bdcb
SP
4648 status = be_setup_queues(adapter);
4649 if (status)
4650 return status;
4651
4652 be_schedule_worker(adapter);
4653
5f834cf4 4654 /* The IF was destroyed and re-created. We need to clear
52acf064
IV
4655 * all promiscuous flags valid for the destroyed IF.
4656 * Without this promisc mode is not restored during
4657 * be_open() because the driver thinks that it is
4658 * already enabled in HW.
4659 */
4660 adapter->if_flags &= ~BE_IF_FLAGS_ALL_PROMISCUOUS;
4661
68d7bdcb
SP
4662 if (netif_running(netdev))
4663 status = be_open(netdev);
4664
4665 return status;
4666}
4667
f7062ee5
SP
4668static inline int fw_major_num(const char *fw_ver)
4669{
4670 int fw_major = 0, i;
4671
4672 i = sscanf(fw_ver, "%d.", &fw_major);
4673 if (i != 1)
4674 return 0;
4675
4676 return fw_major;
4677}
4678
710f3e59
SB
4679/* If it is error recovery, FLR the PF
4680 * Else if any VFs are already enabled don't FLR the PF
4681 */
f962f840
SP
4682static bool be_reset_required(struct be_adapter *adapter)
4683{
710f3e59
SB
4684 if (be_error_recovering(adapter))
4685 return true;
4686 else
4687 return pci_num_vf(adapter->pdev) == 0;
f962f840
SP
4688}
4689
4690/* Wait for the FW to be ready and perform the required initialization */
4691static int be_func_init(struct be_adapter *adapter)
4692{
4693 int status;
4694
4695 status = be_fw_wait_ready(adapter);
4696 if (status)
4697 return status;
4698
710f3e59
SB
4699 /* FW is now ready; clear errors to allow cmds/doorbell */
4700 be_clear_error(adapter, BE_CLEAR_ALL);
4701
f962f840
SP
4702 if (be_reset_required(adapter)) {
4703 status = be_cmd_reset_function(adapter);
4704 if (status)
4705 return status;
4706
4707 /* Wait for interrupts to quiesce after an FLR */
4708 msleep(100);
f962f840
SP
4709 }
4710
4711 /* Tell FW we're ready to fire cmds */
4712 status = be_cmd_fw_init(adapter);
4713 if (status)
4714 return status;
4715
4716 /* Allow interrupts for other ULPs running on NIC function */
4717 be_intr_set(adapter, true);
4718
4719 return 0;
4720}
4721
7707133c
SP
4722static int be_setup(struct be_adapter *adapter)
4723{
4724 struct device *dev = &adapter->pdev->dev;
7707133c
SP
4725 int status;
4726
f962f840
SP
4727 status = be_func_init(adapter);
4728 if (status)
4729 return status;
4730
7707133c
SP
4731 be_setup_init(adapter);
4732
4733 if (!lancer_chip(adapter))
4734 be_cmd_req_native_mode(adapter);
4735
980df249
SR
4736 /* invoke this cmd first to get pf_num and vf_num which are needed
4737 * for issuing profile related cmds
4738 */
4739 if (!BEx_chip(adapter)) {
4740 status = be_cmd_get_func_config(adapter, NULL);
4741 if (status)
4742 return status;
4743 }
72ef3a88 4744
de2b1e03
SK
4745 status = be_get_config(adapter);
4746 if (status)
4747 goto err;
4748
ace40aff
VV
4749 if (!BE2_chip(adapter) && be_physfn(adapter))
4750 be_alloc_sriov_res(adapter);
4751
de2b1e03 4752 status = be_get_resources(adapter);
10ef9ab4 4753 if (status)
a54769f5 4754 goto err;
6b7c5b94 4755
7707133c 4756 status = be_msix_enable(adapter);
10ef9ab4 4757 if (status)
a54769f5 4758 goto err;
6b7c5b94 4759
bcc84140 4760 /* will enable all the needed filter flags in be_open() */
62219066 4761 status = be_if_create(adapter);
7707133c 4762 if (status)
a54769f5 4763 goto err;
6b7c5b94 4764
68d7bdcb
SP
4765 /* Updating real_num_tx/rx_queues() requires rtnl_lock() */
4766 rtnl_lock();
7707133c 4767 status = be_setup_queues(adapter);
68d7bdcb 4768 rtnl_unlock();
95046b92 4769 if (status)
1578e777
PR
4770 goto err;
4771
7707133c 4772 be_cmd_get_fn_privileges(adapter, &adapter->cmd_privileges, 0);
7707133c
SP
4773
4774 status = be_mac_setup(adapter);
10ef9ab4
SP
4775 if (status)
4776 goto err;
4777
e97e3cda 4778 be_cmd_get_fw_ver(adapter);
acbafeb1 4779 dev_info(dev, "FW version is %s\n", adapter->fw_ver);
5a56eb10 4780
e9e2a904 4781 if (BE2_chip(adapter) && fw_major_num(adapter->fw_ver) < 4) {
50762667 4782 dev_err(dev, "Firmware on card is old(%s), IRQs may not work",
e9e2a904
SK
4783 adapter->fw_ver);
4784 dev_err(dev, "Please upgrade firmware to version >= 4.0\n");
4785 }
4786
00d594c3
KA
4787 status = be_cmd_set_flow_control(adapter, adapter->tx_fc,
4788 adapter->rx_fc);
4789 if (status)
4790 be_cmd_get_flow_control(adapter, &adapter->tx_fc,
4791 &adapter->rx_fc);
590c391d 4792
00d594c3
KA
4793 dev_info(&adapter->pdev->dev, "HW Flow control - TX:%d RX:%d\n",
4794 adapter->tx_fc, adapter->rx_fc);
2dc1deb6 4795
bdce2ad7
SR
4796 if (be_physfn(adapter))
4797 be_cmd_set_logical_link_config(adapter,
4798 IFLA_VF_LINK_STATE_AUTO, 0);
4799
884476be
SK
4800 /* BE3 EVB echoes broadcast/multicast packets back to PF's vport
4801 * confusing a linux bridge or OVS that it might be connected to.
4802 * Set the EVB to PASSTHRU mode which effectively disables the EVB
4803 * when SRIOV is not enabled.
4804 */
4805 if (BE3_chip(adapter))
4806 be_cmd_set_hsw_config(adapter, 0, 0, adapter->if_handle,
4807 PORT_FWD_TYPE_PASSTHRU, 0);
4808
bec84e6b
VV
4809 if (adapter->num_vfs)
4810 be_vf_setup(adapter);
f9449ab7 4811
f25b119c
PR
4812 status = be_cmd_get_phy_info(adapter);
4813 if (!status && be_pause_supported(adapter))
42f11cf2
AK
4814 adapter->phy.fc_autoneg = 1;
4815
710f3e59
SB
4816 if (be_physfn(adapter) && !lancer_chip(adapter))
4817 be_cmd_set_features(adapter);
4818
68d7bdcb 4819 be_schedule_worker(adapter);
e1ad8e33 4820 adapter->flags |= BE_FLAGS_SETUP_DONE;
f9449ab7 4821 return 0;
a54769f5
SP
4822err:
4823 be_clear(adapter);
4824 return status;
4825}
6b7c5b94 4826
66268739
IV
4827#ifdef CONFIG_NET_POLL_CONTROLLER
4828static void be_netpoll(struct net_device *netdev)
4829{
4830 struct be_adapter *adapter = netdev_priv(netdev);
10ef9ab4 4831 struct be_eq_obj *eqo;
66268739
IV
4832 int i;
4833
e49cc34f 4834 for_all_evt_queues(adapter, eqo, i) {
20947770 4835 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0, 0);
e49cc34f
SP
4836 napi_schedule(&eqo->napi);
4837 }
66268739
IV
4838}
4839#endif
4840
485bf569
SN
4841int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
4842{
4843 const struct firmware *fw;
4844 int status;
4845
4846 if (!netif_running(adapter->netdev)) {
4847 dev_err(&adapter->pdev->dev,
4848 "Firmware load not allowed (interface is down)\n");
940a3fcd 4849 return -ENETDOWN;
485bf569
SN
4850 }
4851
4852 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
4853 if (status)
4854 goto fw_exit;
4855
4856 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
4857
4858 if (lancer_chip(adapter))
4859 status = lancer_fw_download(adapter, fw);
4860 else
4861 status = be_fw_download(adapter, fw);
4862
eeb65ced 4863 if (!status)
e97e3cda 4864 be_cmd_get_fw_ver(adapter);
eeb65ced 4865
84517482
AK
4866fw_exit:
4867 release_firmware(fw);
4868 return status;
4869}
4870
add511b3
RP
4871static int be_ndo_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
4872 u16 flags)
a77dcb8c
AK
4873{
4874 struct be_adapter *adapter = netdev_priv(dev);
4875 struct nlattr *attr, *br_spec;
4876 int rem;
4877 int status = 0;
4878 u16 mode = 0;
4879
4880 if (!sriov_enabled(adapter))
4881 return -EOPNOTSUPP;
4882
4883 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
4ea85e83
TG
4884 if (!br_spec)
4885 return -EINVAL;
a77dcb8c
AK
4886
4887 nla_for_each_nested(attr, br_spec, rem) {
4888 if (nla_type(attr) != IFLA_BRIDGE_MODE)
4889 continue;
4890
b7c1a314
TG
4891 if (nla_len(attr) < sizeof(mode))
4892 return -EINVAL;
4893
a77dcb8c 4894 mode = nla_get_u16(attr);
ac0f5fba
SR
4895 if (BE3_chip(adapter) && mode == BRIDGE_MODE_VEPA)
4896 return -EOPNOTSUPP;
4897
a77dcb8c
AK
4898 if (mode != BRIDGE_MODE_VEPA && mode != BRIDGE_MODE_VEB)
4899 return -EINVAL;
4900
4901 status = be_cmd_set_hsw_config(adapter, 0, 0,
4902 adapter->if_handle,
4903 mode == BRIDGE_MODE_VEPA ?
4904 PORT_FWD_TYPE_VEPA :
e7bcbd7b 4905 PORT_FWD_TYPE_VEB, 0);
a77dcb8c
AK
4906 if (status)
4907 goto err;
4908
4909 dev_info(&adapter->pdev->dev, "enabled switch mode: %s\n",
4910 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
4911
4912 return status;
4913 }
4914err:
4915 dev_err(&adapter->pdev->dev, "Failed to set switch mode %s\n",
4916 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
4917
4918 return status;
4919}
4920
4921static int be_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
46c264da
ND
4922 struct net_device *dev, u32 filter_mask,
4923 int nlflags)
a77dcb8c
AK
4924{
4925 struct be_adapter *adapter = netdev_priv(dev);
4926 int status = 0;
4927 u8 hsw_mode;
4928
a77dcb8c
AK
4929 /* BE and Lancer chips support VEB mode only */
4930 if (BEx_chip(adapter) || lancer_chip(adapter)) {
8431706b
IV
4931 /* VEB is disabled in non-SR-IOV profiles on BE3/Lancer */
4932 if (!pci_sriov_get_totalvfs(adapter->pdev))
4933 return 0;
a77dcb8c
AK
4934 hsw_mode = PORT_FWD_TYPE_VEB;
4935 } else {
4936 status = be_cmd_get_hsw_config(adapter, NULL, 0,
e7bcbd7b
KA
4937 adapter->if_handle, &hsw_mode,
4938 NULL);
a77dcb8c
AK
4939 if (status)
4940 return 0;
ff9ed19d
KP
4941
4942 if (hsw_mode == PORT_FWD_TYPE_PASSTHRU)
4943 return 0;
a77dcb8c
AK
4944 }
4945
4946 return ndo_dflt_bridge_getlink(skb, pid, seq, dev,
4947 hsw_mode == PORT_FWD_TYPE_VEPA ?
2c3c031c 4948 BRIDGE_MODE_VEPA : BRIDGE_MODE_VEB,
7d4f8d87 4949 0, 0, nlflags, filter_mask, NULL);
a77dcb8c
AK
4950}
4951
b7172414
SP
4952static struct be_cmd_work *be_alloc_work(struct be_adapter *adapter,
4953 void (*func)(struct work_struct *))
4954{
4955 struct be_cmd_work *work;
4956
4957 work = kzalloc(sizeof(*work), GFP_ATOMIC);
4958 if (!work) {
4959 dev_err(&adapter->pdev->dev,
4960 "be_work memory allocation failed\n");
4961 return NULL;
4962 }
4963
4964 INIT_WORK(&work->work, func);
4965 work->adapter = adapter;
4966 return work;
4967}
4968
630f4b70
SB
4969/* VxLAN offload Notes:
4970 *
4971 * The stack defines tunnel offload flags (hw_enc_features) for IP and doesn't
4972 * distinguish various types of transports (VxLAN, GRE, NVGRE ..). So, offload
4973 * is expected to work across all types of IP tunnels once exported. Skyhawk
4974 * supports offloads for either VxLAN or NVGRE, exclusively. So we export VxLAN
16dde0d6
SB
4975 * offloads in hw_enc_features only when a VxLAN port is added. If other (non
4976 * VxLAN) tunnels are configured while VxLAN offloads are enabled, offloads for
4977 * those other tunnels are unexported on the fly through ndo_features_check().
630f4b70
SB
4978 *
4979 * Skyhawk supports VxLAN offloads only for one UDP dport. So, if the stack
bf8d9dfb
SB
4980 * adds more than one port, disable offloads and re-enable them again when
4981 * there's only one port left. We maintain a list of ports for this purpose.
630f4b70 4982 */
b7172414 4983static void be_work_add_vxlan_port(struct work_struct *work)
c9c47142 4984{
b7172414
SP
4985 struct be_cmd_work *cmd_work =
4986 container_of(work, struct be_cmd_work, work);
4987 struct be_adapter *adapter = cmd_work->adapter;
c9c47142 4988 struct device *dev = &adapter->pdev->dev;
b7172414 4989 __be16 port = cmd_work->info.vxlan_port;
bf8d9dfb 4990 struct be_vxlan_port *vxlan_port;
c9c47142
SP
4991 int status;
4992
bf8d9dfb
SB
4993 /* Bump up the alias count if it is an existing port */
4994 list_for_each_entry(vxlan_port, &adapter->vxlan_port_list, list) {
4995 if (vxlan_port->port == port) {
4996 vxlan_port->port_aliases++;
4997 goto done;
4998 }
1e5b311a
JB
4999 }
5000
bf8d9dfb
SB
5001 /* Add a new port to our list. We don't need a lock here since port
5002 * add/delete are done only in the context of a single-threaded work
5003 * queue (be_wq).
5004 */
5005 vxlan_port = kzalloc(sizeof(*vxlan_port), GFP_KERNEL);
5006 if (!vxlan_port)
5007 goto done;
5008
5009 vxlan_port->port = port;
5010 INIT_LIST_HEAD(&vxlan_port->list);
5011 list_add_tail(&vxlan_port->list, &adapter->vxlan_port_list);
5012 adapter->vxlan_port_count++;
5013
c9c47142 5014 if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS) {
c9c47142
SP
5015 dev_info(dev,
5016 "Only one UDP port supported for VxLAN offloads\n");
630f4b70 5017 dev_info(dev, "Disabling VxLAN offloads\n");
630f4b70 5018 goto err;
c9c47142
SP
5019 }
5020
bf8d9dfb 5021 if (adapter->vxlan_port_count > 1)
b7172414 5022 goto done;
630f4b70 5023
bf8d9dfb
SB
5024 status = be_enable_vxlan_offloads(adapter);
5025 if (!status)
5026 goto done;
630f4b70 5027
c9c47142
SP
5028err:
5029 be_disable_vxlan_offloads(adapter);
b7172414
SP
5030done:
5031 kfree(cmd_work);
bf8d9dfb 5032 return;
c9c47142
SP
5033}
5034
b7172414 5035static void be_work_del_vxlan_port(struct work_struct *work)
c9c47142 5036{
b7172414
SP
5037 struct be_cmd_work *cmd_work =
5038 container_of(work, struct be_cmd_work, work);
5039 struct be_adapter *adapter = cmd_work->adapter;
5040 __be16 port = cmd_work->info.vxlan_port;
bf8d9dfb 5041 struct be_vxlan_port *vxlan_port;
c9c47142 5042
bf8d9dfb
SB
5043 /* Nothing to be done if a port alias is being deleted */
5044 list_for_each_entry(vxlan_port, &adapter->vxlan_port_list, list) {
5045 if (vxlan_port->port == port) {
5046 if (vxlan_port->port_aliases) {
5047 vxlan_port->port_aliases--;
5048 goto done;
5049 }
5050 break;
5051 }
5052 }
5053
5054 /* No port aliases left; delete the port from the list */
5055 list_del(&vxlan_port->list);
5056 adapter->vxlan_port_count--;
c9c47142 5057
bf8d9dfb
SB
5058 /* Disable VxLAN offload if this is the offloaded port */
5059 if (adapter->vxlan_port == vxlan_port->port) {
5060 WARN_ON(adapter->vxlan_port_count);
5061 be_disable_vxlan_offloads(adapter);
5062 dev_info(&adapter->pdev->dev,
5063 "Disabled VxLAN offloads for UDP port %d\n",
5064 be16_to_cpu(port));
b7172414 5065 goto out;
1e5b311a
JB
5066 }
5067
bf8d9dfb
SB
5068 /* If only 1 port is left, re-enable VxLAN offload */
5069 if (adapter->vxlan_port_count == 1)
5070 be_enable_vxlan_offloads(adapter);
c9c47142 5071
b7172414 5072out:
bf8d9dfb
SB
5073 kfree(vxlan_port);
5074done:
b7172414
SP
5075 kfree(cmd_work);
5076}
5077
5078static void be_cfg_vxlan_port(struct net_device *netdev,
5079 struct udp_tunnel_info *ti,
5080 void (*func)(struct work_struct *))
5081{
5082 struct be_adapter *adapter = netdev_priv(netdev);
5083 struct be_cmd_work *cmd_work;
5084
5085 if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
5086 return;
5087
5088 if (lancer_chip(adapter) || BEx_chip(adapter) || be_is_mc(adapter))
5089 return;
5090
5091 cmd_work = be_alloc_work(adapter, func);
5092 if (cmd_work) {
5093 cmd_work->info.vxlan_port = ti->port;
5094 queue_work(be_wq, &cmd_work->work);
5095 }
5096}
5097
5098static void be_del_vxlan_port(struct net_device *netdev,
5099 struct udp_tunnel_info *ti)
5100{
5101 be_cfg_vxlan_port(netdev, ti, be_work_del_vxlan_port);
5102}
5103
5104static void be_add_vxlan_port(struct net_device *netdev,
5105 struct udp_tunnel_info *ti)
5106{
5107 be_cfg_vxlan_port(netdev, ti, be_work_add_vxlan_port);
c9c47142 5108}
725d548f 5109
5f35227e
JG
5110static netdev_features_t be_features_check(struct sk_buff *skb,
5111 struct net_device *dev,
5112 netdev_features_t features)
725d548f 5113{
16dde0d6
SB
5114 struct be_adapter *adapter = netdev_priv(dev);
5115 u8 l4_hdr = 0;
5116
822f8565
SR
5117 if (skb_is_gso(skb)) {
5118 /* IPv6 TSO requests with extension hdrs are a problem
5119 * to Lancer and BE3 HW. Disable TSO6 feature.
5120 */
5121 if (!skyhawk_chip(adapter) && is_ipv6_ext_hdr(skb))
5122 features &= ~NETIF_F_TSO6;
5123
5124 /* Lancer cannot handle the packet with MSS less than 256.
3df40aad 5125 * Also it can't handle a TSO packet with a single segment
822f8565
SR
5126 * Disable the GSO support in such cases
5127 */
3df40aad
SR
5128 if (lancer_chip(adapter) &&
5129 (skb_shinfo(skb)->gso_size < 256 ||
5130 skb_shinfo(skb)->gso_segs == 1))
822f8565
SR
5131 features &= ~NETIF_F_GSO_MASK;
5132 }
5133
cc6e9de6
VY
5134 /* The code below restricts offload features for some tunneled and
5135 * Q-in-Q packets.
16dde0d6
SB
5136 * Offload features for normal (non tunnel) packets are unchanged.
5137 */
cc6e9de6 5138 features = vlan_features_check(skb, features);
16dde0d6
SB
5139 if (!skb->encapsulation ||
5140 !(adapter->flags & BE_FLAGS_VXLAN_OFFLOADS))
5141 return features;
5142
5143 /* It's an encapsulated packet and VxLAN offloads are enabled. We
5144 * should disable tunnel offload features if it's not a VxLAN packet,
5145 * as tunnel offloads have been enabled only for VxLAN. This is done to
5146 * allow other tunneled traffic like GRE work fine while VxLAN
5147 * offloads are configured in Skyhawk-R.
5148 */
5149 switch (vlan_get_protocol(skb)) {
5150 case htons(ETH_P_IP):
5151 l4_hdr = ip_hdr(skb)->protocol;
5152 break;
5153 case htons(ETH_P_IPV6):
5154 l4_hdr = ipv6_hdr(skb)->nexthdr;
5155 break;
5156 default:
5157 return features;
5158 }
5159
5160 if (l4_hdr != IPPROTO_UDP ||
5161 skb->inner_protocol_type != ENCAP_TYPE_ETHER ||
5162 skb->inner_protocol != htons(ETH_P_TEB) ||
5163 skb_inner_mac_header(skb) - skb_transport_header(skb) !=
096de2f8
SD
5164 sizeof(struct udphdr) + sizeof(struct vxlanhdr) ||
5165 !adapter->vxlan_port ||
5166 udp_hdr(skb)->dest != adapter->vxlan_port)
a188222b 5167 return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
16dde0d6
SB
5168
5169 return features;
725d548f 5170}
c9c47142 5171
a155a5db
SB
5172static int be_get_phys_port_id(struct net_device *dev,
5173 struct netdev_phys_item_id *ppid)
5174{
5175 int i, id_len = CNTL_SERIAL_NUM_WORDS * CNTL_SERIAL_NUM_WORD_SZ + 1;
5176 struct be_adapter *adapter = netdev_priv(dev);
5177 u8 *id;
5178
5179 if (MAX_PHYS_ITEM_ID_LEN < id_len)
5180 return -ENOSPC;
5181
5182 ppid->id[0] = adapter->hba_port_num + 1;
5183 id = &ppid->id[1];
5184 for (i = CNTL_SERIAL_NUM_WORDS - 1; i >= 0;
5185 i--, id += CNTL_SERIAL_NUM_WORD_SZ)
5186 memcpy(id, &adapter->serial_num[i], CNTL_SERIAL_NUM_WORD_SZ);
5187
5188 ppid->id_len = id_len;
5189
5190 return 0;
5191}
5192
b7172414
SP
5193static void be_set_rx_mode(struct net_device *dev)
5194{
5195 struct be_adapter *adapter = netdev_priv(dev);
5196 struct be_cmd_work *work;
5197
5198 work = be_alloc_work(adapter, be_work_set_rx_mode);
5199 if (work)
5200 queue_work(be_wq, &work->work);
5201}
5202
e5686ad8 5203static const struct net_device_ops be_netdev_ops = {
6b7c5b94
SP
5204 .ndo_open = be_open,
5205 .ndo_stop = be_close,
5206 .ndo_start_xmit = be_xmit,
a54769f5 5207 .ndo_set_rx_mode = be_set_rx_mode,
6b7c5b94 5208 .ndo_set_mac_address = be_mac_addr_set,
ab1594e9 5209 .ndo_get_stats64 = be_get_stats64,
6b7c5b94 5210 .ndo_validate_addr = eth_validate_addr,
6b7c5b94
SP
5211 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
5212 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
64600ea5 5213 .ndo_set_vf_mac = be_set_vf_mac,
1da87b7f 5214 .ndo_set_vf_vlan = be_set_vf_vlan,
ed616689 5215 .ndo_set_vf_rate = be_set_vf_tx_rate,
66268739 5216 .ndo_get_vf_config = be_get_vf_config,
bdce2ad7 5217 .ndo_set_vf_link_state = be_set_vf_link_state,
e7bcbd7b 5218 .ndo_set_vf_spoofchk = be_set_vf_spoofchk,
66268739
IV
5219#ifdef CONFIG_NET_POLL_CONTROLLER
5220 .ndo_poll_controller = be_netpoll,
5221#endif
a77dcb8c
AK
5222 .ndo_bridge_setlink = be_ndo_bridge_setlink,
5223 .ndo_bridge_getlink = be_ndo_bridge_getlink,
bde6b7cd
AD
5224 .ndo_udp_tunnel_add = be_add_vxlan_port,
5225 .ndo_udp_tunnel_del = be_del_vxlan_port,
5f35227e 5226 .ndo_features_check = be_features_check,
a155a5db 5227 .ndo_get_phys_port_id = be_get_phys_port_id,
6b7c5b94
SP
5228};
5229
5230static void be_netdev_init(struct net_device *netdev)
5231{
5232 struct be_adapter *adapter = netdev_priv(netdev);
5233
6332c8d3 5234 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
8b8ddc68 5235 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
f646968f 5236 NETIF_F_HW_VLAN_CTAG_TX;
62219066 5237 if ((be_if_cap_flags(adapter) & BE_IF_FLAGS_RSS))
8b8ddc68 5238 netdev->hw_features |= NETIF_F_RXHASH;
6332c8d3
MM
5239
5240 netdev->features |= netdev->hw_features |
f646968f 5241 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER;
4b972914 5242
eb8a50d9 5243 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
79032644 5244 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
51c59870 5245
fbc13f01
AK
5246 netdev->priv_flags |= IFF_UNICAST_FLT;
5247
6b7c5b94
SP
5248 netdev->flags |= IFF_MULTICAST;
5249
127bfce5 5250 netif_set_gso_max_size(netdev, BE_MAX_GSO_SIZE - ETH_HLEN);
c190e3c8 5251
10ef9ab4 5252 netdev->netdev_ops = &be_netdev_ops;
6b7c5b94 5253
7ad24ea4 5254 netdev->ethtool_ops = &be_ethtool_ops;
d894be57
JW
5255
5256 /* MTU range: 256 - 9000 */
5257 netdev->min_mtu = BE_MIN_MTU;
5258 netdev->max_mtu = BE_MAX_MTU;
6b7c5b94
SP
5259}
5260
87ac1a52
KA
5261static void be_cleanup(struct be_adapter *adapter)
5262{
5263 struct net_device *netdev = adapter->netdev;
5264
5265 rtnl_lock();
5266 netif_device_detach(netdev);
5267 if (netif_running(netdev))
5268 be_close(netdev);
5269 rtnl_unlock();
5270
5271 be_clear(adapter);
5272}
5273
484d76fd 5274static int be_resume(struct be_adapter *adapter)
78fad34e 5275{
d0e1b319 5276 struct net_device *netdev = adapter->netdev;
78fad34e
SP
5277 int status;
5278
78fad34e
SP
5279 status = be_setup(adapter);
5280 if (status)
484d76fd 5281 return status;
78fad34e 5282
08d9910c
HFS
5283 rtnl_lock();
5284 if (netif_running(netdev))
d0e1b319 5285 status = be_open(netdev);
08d9910c
HFS
5286 rtnl_unlock();
5287
5288 if (status)
5289 return status;
78fad34e 5290
d0e1b319
KA
5291 netif_device_attach(netdev);
5292
484d76fd
KA
5293 return 0;
5294}
5295
710f3e59
SB
5296static void be_soft_reset(struct be_adapter *adapter)
5297{
5298 u32 val;
5299
5300 dev_info(&adapter->pdev->dev, "Initiating chip soft reset\n");
5301 val = ioread32(adapter->pcicfg + SLIPORT_SOFTRESET_OFFSET);
5302 val |= SLIPORT_SOFTRESET_SR_MASK;
5303 iowrite32(val, adapter->pcicfg + SLIPORT_SOFTRESET_OFFSET);
5304}
5305
5306static bool be_err_is_recoverable(struct be_adapter *adapter)
5307{
5308 struct be_error_recovery *err_rec = &adapter->error_recovery;
5309 unsigned long initial_idle_time =
5310 msecs_to_jiffies(ERR_RECOVERY_IDLE_TIME);
5311 unsigned long recovery_interval =
5312 msecs_to_jiffies(ERR_RECOVERY_INTERVAL);
5313 u16 ue_err_code;
5314 u32 val;
5315
5316 val = be_POST_stage_get(adapter);
5317 if ((val & POST_STAGE_RECOVERABLE_ERR) != POST_STAGE_RECOVERABLE_ERR)
5318 return false;
5319 ue_err_code = val & POST_ERR_RECOVERY_CODE_MASK;
5320 if (ue_err_code == 0)
5321 return false;
5322
5323 dev_err(&adapter->pdev->dev, "Recoverable HW error code: 0x%x\n",
5324 ue_err_code);
5325
2faf2657 5326 if (time_before_eq(jiffies - err_rec->probe_time, initial_idle_time)) {
710f3e59
SB
5327 dev_err(&adapter->pdev->dev,
5328 "Cannot recover within %lu sec from driver load\n",
5329 jiffies_to_msecs(initial_idle_time) / MSEC_PER_SEC);
5330 return false;
5331 }
5332
2faf2657
KE
5333 if (err_rec->last_recovery_time && time_before_eq(
5334 jiffies - err_rec->last_recovery_time, recovery_interval)) {
710f3e59
SB
5335 dev_err(&adapter->pdev->dev,
5336 "Cannot recover within %lu sec from last recovery\n",
5337 jiffies_to_msecs(recovery_interval) / MSEC_PER_SEC);
5338 return false;
5339 }
5340
5341 if (ue_err_code == err_rec->last_err_code) {
5342 dev_err(&adapter->pdev->dev,
5343 "Cannot recover from a consecutive TPE error\n");
5344 return false;
5345 }
5346
5347 err_rec->last_recovery_time = jiffies;
5348 err_rec->last_err_code = ue_err_code;
5349 return true;
5350}
5351
5352static int be_tpe_recover(struct be_adapter *adapter)
5353{
5354 struct be_error_recovery *err_rec = &adapter->error_recovery;
5355 int status = -EAGAIN;
5356 u32 val;
5357
5358 switch (err_rec->recovery_state) {
5359 case ERR_RECOVERY_ST_NONE:
5360 err_rec->recovery_state = ERR_RECOVERY_ST_DETECT;
5361 err_rec->resched_delay = ERR_RECOVERY_UE_DETECT_DURATION;
5362 break;
5363
5364 case ERR_RECOVERY_ST_DETECT:
5365 val = be_POST_stage_get(adapter);
5366 if ((val & POST_STAGE_RECOVERABLE_ERR) !=
5367 POST_STAGE_RECOVERABLE_ERR) {
5368 dev_err(&adapter->pdev->dev,
5369 "Unrecoverable HW error detected: 0x%x\n", val);
5370 status = -EINVAL;
5371 err_rec->resched_delay = 0;
5372 break;
5373 }
5374
5375 dev_err(&adapter->pdev->dev, "Recoverable HW error detected\n");
5376
5377 /* Only PF0 initiates Chip Soft Reset. But PF0 must wait UE2SR
5378 * milliseconds before it checks for final error status in
5379 * SLIPORT_SEMAPHORE to determine if recovery criteria is met.
5380 * If it does, then PF0 initiates a Soft Reset.
5381 */
5382 if (adapter->pf_num == 0) {
5383 err_rec->recovery_state = ERR_RECOVERY_ST_RESET;
5384 err_rec->resched_delay = err_rec->ue_to_reset_time -
5385 ERR_RECOVERY_UE_DETECT_DURATION;
5386 break;
5387 }
5388
5389 err_rec->recovery_state = ERR_RECOVERY_ST_PRE_POLL;
5390 err_rec->resched_delay = err_rec->ue_to_poll_time -
5391 ERR_RECOVERY_UE_DETECT_DURATION;
5392 break;
5393
5394 case ERR_RECOVERY_ST_RESET:
5395 if (!be_err_is_recoverable(adapter)) {
5396 dev_err(&adapter->pdev->dev,
5397 "Failed to meet recovery criteria\n");
5398 status = -EIO;
5399 err_rec->resched_delay = 0;
5400 break;
5401 }
5402 be_soft_reset(adapter);
5403 err_rec->recovery_state = ERR_RECOVERY_ST_PRE_POLL;
5404 err_rec->resched_delay = err_rec->ue_to_poll_time -
5405 err_rec->ue_to_reset_time;
5406 break;
5407
5408 case ERR_RECOVERY_ST_PRE_POLL:
5409 err_rec->recovery_state = ERR_RECOVERY_ST_REINIT;
5410 err_rec->resched_delay = 0;
5411 status = 0; /* done */
5412 break;
5413
5414 default:
5415 status = -EINVAL;
5416 err_rec->resched_delay = 0;
5417 break;
5418 }
5419
5420 return status;
5421}
5422
484d76fd
KA
5423static int be_err_recover(struct be_adapter *adapter)
5424{
484d76fd
KA
5425 int status;
5426
710f3e59
SB
5427 if (!lancer_chip(adapter)) {
5428 if (!adapter->error_recovery.recovery_supported ||
5429 adapter->priv_flags & BE_DISABLE_TPE_RECOVERY)
5430 return -EIO;
5431 status = be_tpe_recover(adapter);
5432 if (status)
5433 goto err;
5434 }
1babbad4
PR
5435
5436 /* Wait for adapter to reach quiescent state before
5437 * destroying queues
5438 */
5439 status = be_fw_wait_ready(adapter);
5440 if (status)
5441 goto err;
5442
710f3e59
SB
5443 adapter->flags |= BE_FLAGS_TRY_RECOVERY;
5444
1babbad4
PR
5445 be_cleanup(adapter);
5446
484d76fd
KA
5447 status = be_resume(adapter);
5448 if (status)
5449 goto err;
5450
710f3e59
SB
5451 adapter->flags &= ~BE_FLAGS_TRY_RECOVERY;
5452
78fad34e 5453err:
78fad34e
SP
5454 return status;
5455}
5456
eb7dd46c 5457static void be_err_detection_task(struct work_struct *work)
78fad34e 5458{
710f3e59
SB
5459 struct be_error_recovery *err_rec =
5460 container_of(work, struct be_error_recovery,
5461 err_detection_work.work);
78fad34e 5462 struct be_adapter *adapter =
710f3e59
SB
5463 container_of(err_rec, struct be_adapter,
5464 error_recovery);
5465 u32 resched_delay = ERR_RECOVERY_DETECTION_DELAY;
1babbad4
PR
5466 struct device *dev = &adapter->pdev->dev;
5467 int recovery_status;
78fad34e
SP
5468
5469 be_detect_error(adapter);
710f3e59 5470 if (!be_check_error(adapter, BE_ERROR_HW))
1babbad4
PR
5471 goto reschedule_task;
5472
710f3e59 5473 recovery_status = be_err_recover(adapter);
1babbad4 5474 if (!recovery_status) {
710f3e59
SB
5475 err_rec->recovery_retries = 0;
5476 err_rec->recovery_state = ERR_RECOVERY_ST_NONE;
1babbad4
PR
5477 dev_info(dev, "Adapter recovery successful\n");
5478 goto reschedule_task;
710f3e59
SB
5479 } else if (!lancer_chip(adapter) && err_rec->resched_delay) {
5480 /* BEx/SH recovery state machine */
5481 if (adapter->pf_num == 0 &&
5482 err_rec->recovery_state > ERR_RECOVERY_ST_DETECT)
5483 dev_err(&adapter->pdev->dev,
5484 "Adapter recovery in progress\n");
5485 resched_delay = err_rec->resched_delay;
5486 goto reschedule_task;
5487 } else if (lancer_chip(adapter) && be_virtfn(adapter)) {
1babbad4
PR
5488 /* For VFs, check if PF have allocated resources
5489 * every second.
5490 */
5491 dev_err(dev, "Re-trying adapter recovery\n");
5492 goto reschedule_task;
710f3e59
SB
5493 } else if (lancer_chip(adapter) && err_rec->recovery_retries++ <
5494 ERR_RECOVERY_MAX_RETRY_COUNT) {
972f37b4
PR
5495 /* In case of another error during recovery, it takes 30 sec
5496 * for adapter to come out of error. Retry error recovery after
5497 * this time interval.
5498 */
5499 dev_err(&adapter->pdev->dev, "Re-trying adapter recovery\n");
710f3e59 5500 resched_delay = ERR_RECOVERY_RETRY_DELAY;
972f37b4 5501 goto reschedule_task;
1babbad4
PR
5502 } else {
5503 dev_err(dev, "Adapter recovery failed\n");
710f3e59 5504 dev_err(dev, "Please reboot server to recover\n");
78fad34e
SP
5505 }
5506
1babbad4 5507 return;
710f3e59 5508
1babbad4 5509reschedule_task:
710f3e59 5510 be_schedule_err_detection(adapter, resched_delay);
78fad34e
SP
5511}
5512
5513static void be_log_sfp_info(struct be_adapter *adapter)
5514{
5515 int status;
5516
5517 status = be_cmd_query_sfp_info(adapter);
5518 if (!status) {
5519 dev_err(&adapter->pdev->dev,
51d1f98a
AK
5520 "Port %c: %s Vendor: %s part no: %s",
5521 adapter->port_name,
5522 be_misconfig_evt_port_state[adapter->phy_state],
5523 adapter->phy.vendor_name,
78fad34e
SP
5524 adapter->phy.vendor_pn);
5525 }
51d1f98a 5526 adapter->flags &= ~BE_FLAGS_PHY_MISCONFIGURED;
78fad34e
SP
5527}
5528
5529static void be_worker(struct work_struct *work)
5530{
5531 struct be_adapter *adapter =
5532 container_of(work, struct be_adapter, work.work);
5533 struct be_rx_obj *rxo;
5534 int i;
5535
d3480615
GP
5536 if (be_physfn(adapter) &&
5537 MODULO(adapter->work_counter, adapter->be_get_temp_freq) == 0)
5538 be_cmd_get_die_temperature(adapter);
5539
78fad34e
SP
5540 /* when interrupts are not yet enabled, just reap any pending
5541 * mcc completions
5542 */
5543 if (!netif_running(adapter->netdev)) {
5544 local_bh_disable();
5545 be_process_mcc(adapter);
5546 local_bh_enable();
5547 goto reschedule;
5548 }
5549
5550 if (!adapter->stats_cmd_sent) {
5551 if (lancer_chip(adapter))
5552 lancer_cmd_get_pport_stats(adapter,
5553 &adapter->stats_cmd);
5554 else
5555 be_cmd_get_stats(adapter, &adapter->stats_cmd);
5556 }
5557
78fad34e
SP
5558 for_all_rx_queues(adapter, rxo, i) {
5559 /* Replenish RX-queues starved due to memory
5560 * allocation failures.
5561 */
5562 if (rxo->rx_post_starved)
5563 be_post_rx_frags(rxo, GFP_KERNEL, MAX_RX_POST);
5564 }
5565
20947770
PR
5566 /* EQ-delay update for Skyhawk is done while notifying EQ */
5567 if (!skyhawk_chip(adapter))
5568 be_eqd_update(adapter, false);
78fad34e 5569
51d1f98a 5570 if (adapter->flags & BE_FLAGS_PHY_MISCONFIGURED)
78fad34e
SP
5571 be_log_sfp_info(adapter);
5572
5573reschedule:
5574 adapter->work_counter++;
b7172414 5575 queue_delayed_work(be_wq, &adapter->work, msecs_to_jiffies(1000));
78fad34e
SP
5576}
5577
6b7c5b94
SP
5578static void be_unmap_pci_bars(struct be_adapter *adapter)
5579{
c5b3ad4c
SP
5580 if (adapter->csr)
5581 pci_iounmap(adapter->pdev, adapter->csr);
8788fdc2 5582 if (adapter->db)
ce66f781 5583 pci_iounmap(adapter->pdev, adapter->db);
a69bf3c5
DM
5584 if (adapter->pcicfg && adapter->pcicfg_mapped)
5585 pci_iounmap(adapter->pdev, adapter->pcicfg);
045508a8
PP
5586}
5587
ce66f781
SP
5588static int db_bar(struct be_adapter *adapter)
5589{
18c57c74 5590 if (lancer_chip(adapter) || be_virtfn(adapter))
ce66f781
SP
5591 return 0;
5592 else
5593 return 4;
5594}
5595
5596static int be_roce_map_pci_bars(struct be_adapter *adapter)
045508a8 5597{
dbf0f2a7 5598 if (skyhawk_chip(adapter)) {
ce66f781
SP
5599 adapter->roce_db.size = 4096;
5600 adapter->roce_db.io_addr = pci_resource_start(adapter->pdev,
5601 db_bar(adapter));
5602 adapter->roce_db.total_size = pci_resource_len(adapter->pdev,
5603 db_bar(adapter));
5604 }
045508a8 5605 return 0;
6b7c5b94
SP
5606}
5607
5608static int be_map_pci_bars(struct be_adapter *adapter)
5609{
0fa74a4b 5610 struct pci_dev *pdev = adapter->pdev;
6b7c5b94 5611 u8 __iomem *addr;
78fad34e
SP
5612 u32 sli_intf;
5613
5614 pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
5615 adapter->sli_family = (sli_intf & SLI_INTF_FAMILY_MASK) >>
5616 SLI_INTF_FAMILY_SHIFT;
5617 adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
fe6d2a38 5618
c5b3ad4c 5619 if (BEx_chip(adapter) && be_physfn(adapter)) {
0fa74a4b 5620 adapter->csr = pci_iomap(pdev, 2, 0);
ddf1169f 5621 if (!adapter->csr)
c5b3ad4c
SP
5622 return -ENOMEM;
5623 }
5624
25848c90 5625 addr = pci_iomap(pdev, db_bar(adapter), 0);
ddf1169f 5626 if (!addr)
6b7c5b94 5627 goto pci_map_err;
ba343c77 5628 adapter->db = addr;
ce66f781 5629
25848c90
SR
5630 if (skyhawk_chip(adapter) || BEx_chip(adapter)) {
5631 if (be_physfn(adapter)) {
5632 /* PCICFG is the 2nd BAR in BE2 */
5633 addr = pci_iomap(pdev, BE2_chip(adapter) ? 1 : 0, 0);
5634 if (!addr)
5635 goto pci_map_err;
5636 adapter->pcicfg = addr;
a69bf3c5 5637 adapter->pcicfg_mapped = true;
25848c90
SR
5638 } else {
5639 adapter->pcicfg = adapter->db + SRIOV_VF_PCICFG_OFFSET;
a69bf3c5 5640 adapter->pcicfg_mapped = false;
25848c90
SR
5641 }
5642 }
5643
ce66f781 5644 be_roce_map_pci_bars(adapter);
6b7c5b94 5645 return 0;
ce66f781 5646
6b7c5b94 5647pci_map_err:
25848c90 5648 dev_err(&pdev->dev, "Error in mapping PCI BARs\n");
6b7c5b94
SP
5649 be_unmap_pci_bars(adapter);
5650 return -ENOMEM;
5651}
5652
78fad34e 5653static void be_drv_cleanup(struct be_adapter *adapter)
6b7c5b94 5654{
8788fdc2 5655 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
78fad34e 5656 struct device *dev = &adapter->pdev->dev;
6b7c5b94
SP
5657
5658 if (mem->va)
78fad34e 5659 dma_free_coherent(dev, mem->size, mem->va, mem->dma);
e7b909a6 5660
5b8821b7 5661 mem = &adapter->rx_filter;
e7b909a6 5662 if (mem->va)
78fad34e
SP
5663 dma_free_coherent(dev, mem->size, mem->va, mem->dma);
5664
5665 mem = &adapter->stats_cmd;
5666 if (mem->va)
5667 dma_free_coherent(dev, mem->size, mem->va, mem->dma);
6b7c5b94
SP
5668}
5669
78fad34e
SP
5670/* Allocate and initialize various fields in be_adapter struct */
5671static int be_drv_init(struct be_adapter *adapter)
6b7c5b94 5672{
8788fdc2
SP
5673 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
5674 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
5b8821b7 5675 struct be_dma_mem *rx_filter = &adapter->rx_filter;
78fad34e
SP
5676 struct be_dma_mem *stats_cmd = &adapter->stats_cmd;
5677 struct device *dev = &adapter->pdev->dev;
5678 int status = 0;
6b7c5b94
SP
5679
5680 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
e51000db
SB
5681 mbox_mem_alloc->va = dma_zalloc_coherent(dev, mbox_mem_alloc->size,
5682 &mbox_mem_alloc->dma,
5683 GFP_KERNEL);
78fad34e
SP
5684 if (!mbox_mem_alloc->va)
5685 return -ENOMEM;
5686
6b7c5b94
SP
5687 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
5688 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
5689 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
e7b909a6 5690
5b8821b7 5691 rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
78fad34e
SP
5692 rx_filter->va = dma_zalloc_coherent(dev, rx_filter->size,
5693 &rx_filter->dma, GFP_KERNEL);
ddf1169f 5694 if (!rx_filter->va) {
e7b909a6
SP
5695 status = -ENOMEM;
5696 goto free_mbox;
5697 }
1f9061d2 5698
78fad34e
SP
5699 if (lancer_chip(adapter))
5700 stats_cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
5701 else if (BE2_chip(adapter))
5702 stats_cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
5703 else if (BE3_chip(adapter))
5704 stats_cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
5705 else
5706 stats_cmd->size = sizeof(struct be_cmd_req_get_stats_v2);
5707 stats_cmd->va = dma_zalloc_coherent(dev, stats_cmd->size,
5708 &stats_cmd->dma, GFP_KERNEL);
5709 if (!stats_cmd->va) {
5710 status = -ENOMEM;
5711 goto free_rx_filter;
5712 }
5713
2984961c 5714 mutex_init(&adapter->mbox_lock);
b7172414
SP
5715 mutex_init(&adapter->mcc_lock);
5716 mutex_init(&adapter->rx_filter_lock);
8788fdc2 5717 spin_lock_init(&adapter->mcc_cq_lock);
5eeff635 5718 init_completion(&adapter->et_cmd_compl);
e7b909a6 5719
78fad34e 5720 pci_save_state(adapter->pdev);
6b7c5b94 5721
78fad34e 5722 INIT_DELAYED_WORK(&adapter->work, be_worker);
710f3e59
SB
5723
5724 adapter->error_recovery.recovery_state = ERR_RECOVERY_ST_NONE;
5725 adapter->error_recovery.resched_delay = 0;
5726 INIT_DELAYED_WORK(&adapter->error_recovery.err_detection_work,
eb7dd46c 5727 be_err_detection_task);
6b7c5b94 5728
78fad34e
SP
5729 adapter->rx_fc = true;
5730 adapter->tx_fc = true;
6b7c5b94 5731
78fad34e
SP
5732 /* Must be a power of 2 or else MODULO will BUG_ON */
5733 adapter->be_get_temp_freq = 64;
ca34fe38 5734
bf8d9dfb 5735 INIT_LIST_HEAD(&adapter->vxlan_port_list);
6b7c5b94 5736 return 0;
78fad34e
SP
5737
5738free_rx_filter:
5739 dma_free_coherent(dev, rx_filter->size, rx_filter->va, rx_filter->dma);
5740free_mbox:
5741 dma_free_coherent(dev, mbox_mem_alloc->size, mbox_mem_alloc->va,
5742 mbox_mem_alloc->dma);
5743 return status;
6b7c5b94
SP
5744}
5745
3bc6b06c 5746static void be_remove(struct pci_dev *pdev)
6b7c5b94
SP
5747{
5748 struct be_adapter *adapter = pci_get_drvdata(pdev);
8d56ff11 5749
6b7c5b94
SP
5750 if (!adapter)
5751 return;
5752
045508a8 5753 be_roce_dev_remove(adapter);
8cef7a78 5754 be_intr_set(adapter, false);
045508a8 5755
eb7dd46c 5756 be_cancel_err_detection(adapter);
f67ef7ba 5757
6b7c5b94
SP
5758 unregister_netdev(adapter->netdev);
5759
5fb379ee
SP
5760 be_clear(adapter);
5761
f72099e0
SK
5762 if (!pci_vfs_assigned(adapter->pdev))
5763 be_cmd_reset_function(adapter);
5764
bf99e50d
PR
5765 /* tell fw we're done with firing cmds */
5766 be_cmd_fw_clean(adapter);
5767
78fad34e
SP
5768 be_unmap_pci_bars(adapter);
5769 be_drv_cleanup(adapter);
6b7c5b94 5770
d6b6d987
SP
5771 pci_disable_pcie_error_reporting(pdev);
5772
6b7c5b94
SP
5773 pci_release_regions(pdev);
5774 pci_disable_device(pdev);
5775
5776 free_netdev(adapter->netdev);
5777}
5778
9a03259c
AB
5779static ssize_t be_hwmon_show_temp(struct device *dev,
5780 struct device_attribute *dev_attr,
5781 char *buf)
29e9122b
VD
5782{
5783 struct be_adapter *adapter = dev_get_drvdata(dev);
5784
5785 /* Unit: millidegree Celsius */
5786 if (adapter->hwmon_info.be_on_die_temp == BE_INVALID_DIE_TEMP)
5787 return -EIO;
5788 else
5789 return sprintf(buf, "%u\n",
5790 adapter->hwmon_info.be_on_die_temp * 1000);
5791}
5792
d3757ba4 5793static SENSOR_DEVICE_ATTR(temp1_input, 0444,
29e9122b
VD
5794 be_hwmon_show_temp, NULL, 1);
5795
5796static struct attribute *be_hwmon_attrs[] = {
5797 &sensor_dev_attr_temp1_input.dev_attr.attr,
5798 NULL
5799};
5800
5801ATTRIBUTE_GROUPS(be_hwmon);
5802
d379142b
SP
5803static char *mc_name(struct be_adapter *adapter)
5804{
f93f160b
VV
5805 char *str = ""; /* default */
5806
5807 switch (adapter->mc_type) {
5808 case UMC:
5809 str = "UMC";
5810 break;
5811 case FLEX10:
5812 str = "FLEX10";
5813 break;
5814 case vNIC1:
5815 str = "vNIC-1";
5816 break;
5817 case nPAR:
5818 str = "nPAR";
5819 break;
5820 case UFP:
5821 str = "UFP";
5822 break;
5823 case vNIC2:
5824 str = "vNIC-2";
5825 break;
5826 default:
5827 str = "";
5828 }
5829
5830 return str;
d379142b
SP
5831}
5832
5833static inline char *func_name(struct be_adapter *adapter)
5834{
5835 return be_physfn(adapter) ? "PF" : "VF";
5836}
5837
f7062ee5
SP
5838static inline char *nic_name(struct pci_dev *pdev)
5839{
5840 switch (pdev->device) {
5841 case OC_DEVICE_ID1:
5842 return OC_NAME;
5843 case OC_DEVICE_ID2:
5844 return OC_NAME_BE;
5845 case OC_DEVICE_ID3:
5846 case OC_DEVICE_ID4:
5847 return OC_NAME_LANCER;
5848 case BE_DEVICE_ID2:
5849 return BE3_NAME;
5850 case OC_DEVICE_ID5:
5851 case OC_DEVICE_ID6:
5852 return OC_NAME_SH;
5853 default:
5854 return BE_NAME;
5855 }
5856}
5857
1dd06ae8 5858static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id)
6b7c5b94 5859{
6b7c5b94
SP
5860 struct be_adapter *adapter;
5861 struct net_device *netdev;
21252377 5862 int status = 0;
6b7c5b94 5863
acbafeb1
SP
5864 dev_info(&pdev->dev, "%s version is %s\n", DRV_NAME, DRV_VER);
5865
6b7c5b94
SP
5866 status = pci_enable_device(pdev);
5867 if (status)
5868 goto do_none;
5869
5870 status = pci_request_regions(pdev, DRV_NAME);
5871 if (status)
5872 goto disable_dev;
5873 pci_set_master(pdev);
5874
7f640062 5875 netdev = alloc_etherdev_mqs(sizeof(*adapter), MAX_TX_QS, MAX_RX_QS);
ddf1169f 5876 if (!netdev) {
6b7c5b94
SP
5877 status = -ENOMEM;
5878 goto rel_reg;
5879 }
5880 adapter = netdev_priv(netdev);
5881 adapter->pdev = pdev;
5882 pci_set_drvdata(pdev, adapter);
5883 adapter->netdev = netdev;
2243e2e9 5884 SET_NETDEV_DEV(netdev, &pdev->dev);
6b7c5b94 5885
4c15c243 5886 status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
6b7c5b94
SP
5887 if (!status) {
5888 netdev->features |= NETIF_F_HIGHDMA;
5889 } else {
4c15c243 5890 status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
6b7c5b94
SP
5891 if (status) {
5892 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
5893 goto free_netdev;
5894 }
5895 }
5896
2f951a9a
KA
5897 status = pci_enable_pcie_error_reporting(pdev);
5898 if (!status)
5899 dev_info(&pdev->dev, "PCIe error reporting enabled\n");
d6b6d987 5900
78fad34e 5901 status = be_map_pci_bars(adapter);
6b7c5b94 5902 if (status)
39f1d94d 5903 goto free_netdev;
6b7c5b94 5904
78fad34e
SP
5905 status = be_drv_init(adapter);
5906 if (status)
5907 goto unmap_bars;
5908
5fb379ee
SP
5909 status = be_setup(adapter);
5910 if (status)
78fad34e 5911 goto drv_cleanup;
2243e2e9 5912
3abcdeda 5913 be_netdev_init(netdev);
6b7c5b94
SP
5914 status = register_netdev(netdev);
5915 if (status != 0)
5fb379ee 5916 goto unsetup;
6b7c5b94 5917
045508a8
PP
5918 be_roce_dev_add(adapter);
5919
972f37b4 5920 be_schedule_err_detection(adapter, ERR_DETECTION_DELAY);
710f3e59 5921 adapter->error_recovery.probe_time = jiffies;
b4e32a71 5922
29e9122b 5923 /* On Die temperature not supported for VF. */
9a03259c 5924 if (be_physfn(adapter) && IS_ENABLED(CONFIG_BE2NET_HWMON)) {
29e9122b
VD
5925 adapter->hwmon_info.hwmon_dev =
5926 devm_hwmon_device_register_with_groups(&pdev->dev,
5927 DRV_NAME,
5928 adapter,
5929 be_hwmon_groups);
5930 adapter->hwmon_info.be_on_die_temp = BE_INVALID_DIE_TEMP;
5931 }
5932
d379142b 5933 dev_info(&pdev->dev, "%s: %s %s port %c\n", nic_name(pdev),
21252377 5934 func_name(adapter), mc_name(adapter), adapter->port_name);
34b1ef04 5935
6b7c5b94
SP
5936 return 0;
5937
5fb379ee
SP
5938unsetup:
5939 be_clear(adapter);
78fad34e
SP
5940drv_cleanup:
5941 be_drv_cleanup(adapter);
5942unmap_bars:
5943 be_unmap_pci_bars(adapter);
f9449ab7 5944free_netdev:
fe6d2a38 5945 free_netdev(netdev);
6b7c5b94
SP
5946rel_reg:
5947 pci_release_regions(pdev);
5948disable_dev:
5949 pci_disable_device(pdev);
5950do_none:
c4ca2374 5951 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
6b7c5b94
SP
5952 return status;
5953}
5954
5955static int be_suspend(struct pci_dev *pdev, pm_message_t state)
5956{
5957 struct be_adapter *adapter = pci_get_drvdata(pdev);
6b7c5b94 5958
d4360d6f 5959 be_intr_set(adapter, false);
eb7dd46c 5960 be_cancel_err_detection(adapter);
f67ef7ba 5961
87ac1a52 5962 be_cleanup(adapter);
6b7c5b94
SP
5963
5964 pci_save_state(pdev);
5965 pci_disable_device(pdev);
5966 pci_set_power_state(pdev, pci_choose_state(pdev, state));
5967 return 0;
5968}
5969
484d76fd 5970static int be_pci_resume(struct pci_dev *pdev)
6b7c5b94 5971{
6b7c5b94 5972 struct be_adapter *adapter = pci_get_drvdata(pdev);
484d76fd 5973 int status = 0;
6b7c5b94
SP
5974
5975 status = pci_enable_device(pdev);
5976 if (status)
5977 return status;
5978
6b7c5b94
SP
5979 pci_restore_state(pdev);
5980
484d76fd 5981 status = be_resume(adapter);
2243e2e9
SP
5982 if (status)
5983 return status;
5984
972f37b4 5985 be_schedule_err_detection(adapter, ERR_DETECTION_DELAY);
eb7dd46c 5986
6b7c5b94
SP
5987 return 0;
5988}
5989
82456b03
SP
5990/*
5991 * An FLR will stop BE from DMAing any data.
5992 */
5993static void be_shutdown(struct pci_dev *pdev)
5994{
5995 struct be_adapter *adapter = pci_get_drvdata(pdev);
82456b03 5996
2d5d4154
AK
5997 if (!adapter)
5998 return;
82456b03 5999
d114f99a 6000 be_roce_dev_shutdown(adapter);
0f4a6828 6001 cancel_delayed_work_sync(&adapter->work);
eb7dd46c 6002 be_cancel_err_detection(adapter);
a4ca055f 6003
2d5d4154 6004 netif_device_detach(adapter->netdev);
82456b03 6005
57841869
AK
6006 be_cmd_reset_function(adapter);
6007
82456b03 6008 pci_disable_device(pdev);
82456b03
SP
6009}
6010
cf588477 6011static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
748b539a 6012 pci_channel_state_t state)
cf588477
SP
6013{
6014 struct be_adapter *adapter = pci_get_drvdata(pdev);
cf588477
SP
6015
6016 dev_err(&adapter->pdev->dev, "EEH error detected\n");
6017
68f22793
PR
6018 be_roce_dev_remove(adapter);
6019
954f6825
VD
6020 if (!be_check_error(adapter, BE_ERROR_EEH)) {
6021 be_set_error(adapter, BE_ERROR_EEH);
cf588477 6022
eb7dd46c 6023 be_cancel_err_detection(adapter);
cf588477 6024
87ac1a52 6025 be_cleanup(adapter);
cf588477 6026 }
cf588477
SP
6027
6028 if (state == pci_channel_io_perm_failure)
6029 return PCI_ERS_RESULT_DISCONNECT;
6030
6031 pci_disable_device(pdev);
6032
eeb7fc7b
SK
6033 /* The error could cause the FW to trigger a flash debug dump.
6034 * Resetting the card while flash dump is in progress
c8a54163
PR
6035 * can cause it not to recover; wait for it to finish.
6036 * Wait only for first function as it is needed only once per
6037 * adapter.
eeb7fc7b 6038 */
c8a54163
PR
6039 if (pdev->devfn == 0)
6040 ssleep(30);
6041
cf588477
SP
6042 return PCI_ERS_RESULT_NEED_RESET;
6043}
6044
6045static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
6046{
6047 struct be_adapter *adapter = pci_get_drvdata(pdev);
6048 int status;
6049
6050 dev_info(&adapter->pdev->dev, "EEH reset\n");
cf588477
SP
6051
6052 status = pci_enable_device(pdev);
6053 if (status)
6054 return PCI_ERS_RESULT_DISCONNECT;
6055
6056 pci_set_master(pdev);
cf588477
SP
6057 pci_restore_state(pdev);
6058
6059 /* Check if card is ok and fw is ready */
c5b3ad4c
SP
6060 dev_info(&adapter->pdev->dev,
6061 "Waiting for FW to be ready after EEH reset\n");
bf99e50d 6062 status = be_fw_wait_ready(adapter);
cf588477
SP
6063 if (status)
6064 return PCI_ERS_RESULT_DISCONNECT;
6065
d6b6d987 6066 pci_cleanup_aer_uncorrect_error_status(pdev);
954f6825 6067 be_clear_error(adapter, BE_CLEAR_ALL);
cf588477
SP
6068 return PCI_ERS_RESULT_RECOVERED;
6069}
6070
6071static void be_eeh_resume(struct pci_dev *pdev)
6072{
6073 int status = 0;
6074 struct be_adapter *adapter = pci_get_drvdata(pdev);
cf588477
SP
6075
6076 dev_info(&adapter->pdev->dev, "EEH resume\n");
6077
6078 pci_save_state(pdev);
6079
484d76fd 6080 status = be_resume(adapter);
bf99e50d
PR
6081 if (status)
6082 goto err;
6083
68f22793
PR
6084 be_roce_dev_add(adapter);
6085
972f37b4 6086 be_schedule_err_detection(adapter, ERR_DETECTION_DELAY);
cf588477
SP
6087 return;
6088err:
6089 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
cf588477
SP
6090}
6091
ace40aff
VV
6092static int be_pci_sriov_configure(struct pci_dev *pdev, int num_vfs)
6093{
6094 struct be_adapter *adapter = pci_get_drvdata(pdev);
b9263cbf 6095 struct be_resources vft_res = {0};
ace40aff
VV
6096 int status;
6097
6098 if (!num_vfs)
6099 be_vf_clear(adapter);
6100
6101 adapter->num_vfs = num_vfs;
6102
6103 if (adapter->num_vfs == 0 && pci_vfs_assigned(pdev)) {
6104 dev_warn(&pdev->dev,
6105 "Cannot disable VFs while they are assigned\n");
6106 return -EBUSY;
6107 }
6108
6109 /* When the HW is in SRIOV capable configuration, the PF-pool resources
6110 * are equally distributed across the max-number of VFs. The user may
6111 * request only a subset of the max-vfs to be enabled.
6112 * Based on num_vfs, redistribute the resources across num_vfs so that
6113 * each VF will have access to more number of resources.
6114 * This facility is not available in BE3 FW.
6115 * Also, this is done by FW in Lancer chip.
6116 */
6117 if (skyhawk_chip(adapter) && !pci_num_vf(pdev)) {
b9263cbf
SR
6118 be_calculate_vf_res(adapter, adapter->num_vfs,
6119 &vft_res);
ace40aff 6120 status = be_cmd_set_sriov_config(adapter, adapter->pool_res,
b9263cbf 6121 adapter->num_vfs, &vft_res);
ace40aff
VV
6122 if (status)
6123 dev_err(&pdev->dev,
6124 "Failed to optimize SR-IOV resources\n");
6125 }
6126
6127 status = be_get_resources(adapter);
6128 if (status)
6129 return be_cmd_status(status);
6130
6131 /* Updating real_num_tx/rx_queues() requires rtnl_lock() */
6132 rtnl_lock();
6133 status = be_update_queues(adapter);
6134 rtnl_unlock();
6135 if (status)
6136 return be_cmd_status(status);
6137
6138 if (adapter->num_vfs)
6139 status = be_vf_setup(adapter);
6140
6141 if (!status)
6142 return adapter->num_vfs;
6143
6144 return 0;
6145}
6146
3646f0e5 6147static const struct pci_error_handlers be_eeh_handlers = {
cf588477
SP
6148 .error_detected = be_eeh_err_detected,
6149 .slot_reset = be_eeh_reset,
6150 .resume = be_eeh_resume,
6151};
6152
6b7c5b94
SP
6153static struct pci_driver be_driver = {
6154 .name = DRV_NAME,
6155 .id_table = be_dev_ids,
6156 .probe = be_probe,
6157 .remove = be_remove,
6158 .suspend = be_suspend,
484d76fd 6159 .resume = be_pci_resume,
82456b03 6160 .shutdown = be_shutdown,
ace40aff 6161 .sriov_configure = be_pci_sriov_configure,
cf588477 6162 .err_handler = &be_eeh_handlers
6b7c5b94
SP
6163};
6164
6165static int __init be_init_module(void)
6166{
710f3e59
SB
6167 int status;
6168
8e95a202
JP
6169 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
6170 rx_frag_size != 2048) {
6b7c5b94
SP
6171 printk(KERN_WARNING DRV_NAME
6172 " : Module param rx_frag_size must be 2048/4096/8192."
6173 " Using 2048\n");
6174 rx_frag_size = 2048;
6175 }
6b7c5b94 6176
ace40aff
VV
6177 if (num_vfs > 0) {
6178 pr_info(DRV_NAME " : Module param num_vfs is obsolete.");
6179 pr_info(DRV_NAME " : Use sysfs method to enable VFs\n");
6180 }
6181
b7172414
SP
6182 be_wq = create_singlethread_workqueue("be_wq");
6183 if (!be_wq) {
6184 pr_warn(DRV_NAME "workqueue creation failed\n");
6185 return -1;
6186 }
6187
710f3e59
SB
6188 be_err_recovery_workq =
6189 create_singlethread_workqueue("be_err_recover");
6190 if (!be_err_recovery_workq)
6191 pr_warn(DRV_NAME "Could not create error recovery workqueue\n");
6192
6193 status = pci_register_driver(&be_driver);
6194 if (status) {
6195 destroy_workqueue(be_wq);
6196 be_destroy_err_recovery_workq();
6197 }
6198 return status;
6b7c5b94
SP
6199}
6200module_init(be_init_module);
6201
6202static void __exit be_exit_module(void)
6203{
6204 pci_unregister_driver(&be_driver);
b7172414 6205
710f3e59
SB
6206 be_destroy_err_recovery_workq();
6207
b7172414
SP
6208 if (be_wq)
6209 destroy_workqueue(be_wq);
6b7c5b94
SP
6210}
6211module_exit(be_exit_module);