Merge tag 'drm-misc-next-2017-01-30' of git://anongit.freedesktop.org/git/drm-misc...
[linux-block.git] / drivers / net / ethernet / emulex / benet / be_main.c
CommitLineData
6b7c5b94 1/*
7dfbe7d7 2 * Copyright (C) 2005 - 2016 Broadcom
6b7c5b94
SP
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
d2145cde 11 * linux-drivers@emulex.com
6b7c5b94 12 *
d2145cde
AK
13 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
6b7c5b94
SP
16 */
17
70c71606 18#include <linux/prefetch.h>
9d9779e7 19#include <linux/module.h>
6b7c5b94 20#include "be.h"
8788fdc2 21#include "be_cmds.h"
65f71b8b 22#include <asm/div64.h>
d6b6d987 23#include <linux/aer.h>
a77dcb8c 24#include <linux/if_bridge.h>
6384a4d0 25#include <net/busy_poll.h>
c9c47142 26#include <net/vxlan.h>
6b7c5b94
SP
27
28MODULE_VERSION(DRV_VER);
6b7c5b94 29MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
00d3d51e 30MODULE_AUTHOR("Emulex Corporation");
6b7c5b94
SP
31MODULE_LICENSE("GPL");
32
ace40aff
VV
33/* num_vfs module param is obsolete.
34 * Use sysfs method to enable/disable VFs.
35 */
ba343c77 36static unsigned int num_vfs;
ba343c77 37module_param(num_vfs, uint, S_IRUGO);
ba343c77 38MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
6b7c5b94 39
11ac75ed
SP
40static ushort rx_frag_size = 2048;
41module_param(rx_frag_size, ushort, S_IRUGO);
42MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
43
710f3e59
SB
44/* Per-module error detection/recovery workq shared across all functions.
45 * Each function schedules its own work request on this shared workq.
46 */
e6053dd5 47static struct workqueue_struct *be_err_recovery_workq;
710f3e59 48
9baa3c34 49static const struct pci_device_id be_dev_ids[] = {
c4ca2374 50 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
59fd5d87 51 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
c4ca2374
AK
52 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
53 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
fe6d2a38 54 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
12f4d0a8 55 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
ecedb6ae 56 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
76b73530 57 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID6)},
6b7c5b94
SP
58 { 0 }
59};
60MODULE_DEVICE_TABLE(pci, be_dev_ids);
b7172414
SP
61
62/* Workqueue used by all functions for defering cmd calls to the adapter */
e6053dd5 63static struct workqueue_struct *be_wq;
b7172414 64
7c185276 65/* UE Status Low CSR */
42c8b11e 66static const char * const ue_status_low_desc[] = {
7c185276
AK
67 "CEV",
68 "CTX",
69 "DBUF",
70 "ERX",
71 "Host",
72 "MPU",
73 "NDMA",
74 "PTC ",
75 "RDMA ",
76 "RXF ",
77 "RXIPS ",
78 "RXULP0 ",
79 "RXULP1 ",
80 "RXULP2 ",
81 "TIM ",
82 "TPOST ",
83 "TPRE ",
84 "TXIPS ",
85 "TXULP0 ",
86 "TXULP1 ",
87 "UC ",
88 "WDMA ",
89 "TXULP2 ",
90 "HOST1 ",
91 "P0_OB_LINK ",
92 "P1_OB_LINK ",
93 "HOST_GPIO ",
94 "MBOX ",
6bdf8f55
VV
95 "ERX2 ",
96 "SPARE ",
97 "JTAG ",
98 "MPU_INTPEND "
7c185276 99};
e2fb1afa 100
7c185276 101/* UE Status High CSR */
42c8b11e 102static const char * const ue_status_hi_desc[] = {
7c185276
AK
103 "LPCMEMHOST",
104 "MGMT_MAC",
105 "PCS0ONLINE",
106 "MPU_IRAM",
107 "PCS1ONLINE",
108 "PCTL0",
109 "PCTL1",
110 "PMEM",
111 "RR",
112 "TXPB",
113 "RXPP",
114 "XAUI",
115 "TXP",
116 "ARM",
117 "IPC",
118 "HOST2",
119 "HOST3",
120 "HOST4",
121 "HOST5",
122 "HOST6",
123 "HOST7",
6bdf8f55
VV
124 "ECRC",
125 "Poison TLP",
42c8b11e 126 "NETC",
6bdf8f55
VV
127 "PERIPH",
128 "LLTXULP",
129 "D2P",
130 "RCON",
131 "LDMA",
132 "LLTXP",
133 "LLTXPB",
7c185276
AK
134 "Unknown"
135};
6b7c5b94 136
c1bb0a55
VD
137#define BE_VF_IF_EN_FLAGS (BE_IF_FLAGS_UNTAGGED | \
138 BE_IF_FLAGS_BROADCAST | \
139 BE_IF_FLAGS_MULTICAST | \
140 BE_IF_FLAGS_PASS_L3L4_ERRORS)
141
6b7c5b94
SP
142static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
143{
144 struct be_dma_mem *mem = &q->dma_mem;
03d28ffe 145
1cfafab9 146 if (mem->va) {
2b7bcebf
IV
147 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
148 mem->dma);
1cfafab9
SP
149 mem->va = NULL;
150 }
6b7c5b94
SP
151}
152
153static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
748b539a 154 u16 len, u16 entry_size)
6b7c5b94
SP
155{
156 struct be_dma_mem *mem = &q->dma_mem;
157
158 memset(q, 0, sizeof(*q));
159 q->len = len;
160 q->entry_size = entry_size;
161 mem->size = len * entry_size;
ede23fa8
JP
162 mem->va = dma_zalloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
163 GFP_KERNEL);
6b7c5b94 164 if (!mem->va)
10ef9ab4 165 return -ENOMEM;
6b7c5b94
SP
166 return 0;
167}
168
68c45a2d 169static void be_reg_intr_set(struct be_adapter *adapter, bool enable)
6b7c5b94 170{
db3ea781 171 u32 reg, enabled;
5f0b849e 172
db3ea781 173 pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
748b539a 174 &reg);
db3ea781
SP
175 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
176
5f0b849e 177 if (!enabled && enable)
6b7c5b94 178 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
5f0b849e 179 else if (enabled && !enable)
6b7c5b94 180 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
5f0b849e 181 else
6b7c5b94 182 return;
5f0b849e 183
db3ea781 184 pci_write_config_dword(adapter->pdev,
748b539a 185 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
6b7c5b94
SP
186}
187
68c45a2d
SK
188static void be_intr_set(struct be_adapter *adapter, bool enable)
189{
190 int status = 0;
191
192 /* On lancer interrupts can't be controlled via this register */
193 if (lancer_chip(adapter))
194 return;
195
954f6825 196 if (be_check_error(adapter, BE_ERROR_EEH))
68c45a2d
SK
197 return;
198
199 status = be_cmd_intr_set(adapter, enable);
200 if (status)
201 be_reg_intr_set(adapter, enable);
202}
203
8788fdc2 204static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
6b7c5b94
SP
205{
206 u32 val = 0;
03d28ffe 207
954f6825
VD
208 if (be_check_error(adapter, BE_ERROR_HW))
209 return;
210
6b7c5b94
SP
211 val |= qid & DB_RQ_RING_ID_MASK;
212 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
f3eb62d2
SP
213
214 wmb();
8788fdc2 215 iowrite32(val, adapter->db + DB_RQ_OFFSET);
6b7c5b94
SP
216}
217
94d73aaa
VV
218static void be_txq_notify(struct be_adapter *adapter, struct be_tx_obj *txo,
219 u16 posted)
6b7c5b94
SP
220{
221 u32 val = 0;
03d28ffe 222
954f6825
VD
223 if (be_check_error(adapter, BE_ERROR_HW))
224 return;
225
94d73aaa 226 val |= txo->q.id & DB_TXULP_RING_ID_MASK;
6b7c5b94 227 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
f3eb62d2
SP
228
229 wmb();
94d73aaa 230 iowrite32(val, adapter->db + txo->db_offset);
6b7c5b94
SP
231}
232
8788fdc2 233static void be_eq_notify(struct be_adapter *adapter, u16 qid,
20947770
PR
234 bool arm, bool clear_int, u16 num_popped,
235 u32 eq_delay_mult_enc)
6b7c5b94
SP
236{
237 u32 val = 0;
03d28ffe 238
6b7c5b94 239 val |= qid & DB_EQ_RING_ID_MASK;
748b539a 240 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) << DB_EQ_RING_ID_EXT_MASK_SHIFT);
cf588477 241
954f6825 242 if (be_check_error(adapter, BE_ERROR_HW))
cf588477
SP
243 return;
244
6b7c5b94
SP
245 if (arm)
246 val |= 1 << DB_EQ_REARM_SHIFT;
247 if (clear_int)
248 val |= 1 << DB_EQ_CLR_SHIFT;
249 val |= 1 << DB_EQ_EVNT_SHIFT;
250 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
20947770 251 val |= eq_delay_mult_enc << DB_EQ_R2I_DLY_SHIFT;
8788fdc2 252 iowrite32(val, adapter->db + DB_EQ_OFFSET);
6b7c5b94
SP
253}
254
8788fdc2 255void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
6b7c5b94
SP
256{
257 u32 val = 0;
03d28ffe 258
6b7c5b94 259 val |= qid & DB_CQ_RING_ID_MASK;
fe6d2a38
SP
260 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
261 DB_CQ_RING_ID_EXT_MASK_SHIFT);
cf588477 262
954f6825 263 if (be_check_error(adapter, BE_ERROR_HW))
cf588477
SP
264 return;
265
6b7c5b94
SP
266 if (arm)
267 val |= 1 << DB_CQ_REARM_SHIFT;
268 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
8788fdc2 269 iowrite32(val, adapter->db + DB_CQ_OFFSET);
6b7c5b94
SP
270}
271
988d44b1
SR
272static int be_dev_mac_add(struct be_adapter *adapter, u8 *mac)
273{
274 int i;
275
276 /* Check if mac has already been added as part of uc-list */
277 for (i = 0; i < adapter->uc_macs; i++) {
1d0f110a 278 if (ether_addr_equal(adapter->uc_list[i].mac, mac)) {
988d44b1
SR
279 /* mac already added, skip addition */
280 adapter->pmac_id[0] = adapter->pmac_id[i + 1];
281 return 0;
282 }
283 }
284
285 return be_cmd_pmac_add(adapter, mac, adapter->if_handle,
286 &adapter->pmac_id[0], 0);
287}
288
289static void be_dev_mac_del(struct be_adapter *adapter, int pmac_id)
290{
291 int i;
292
293 /* Skip deletion if the programmed mac is
294 * being used in uc-list
295 */
296 for (i = 0; i < adapter->uc_macs; i++) {
297 if (adapter->pmac_id[i + 1] == pmac_id)
298 return;
299 }
300 be_cmd_pmac_del(adapter, adapter->if_handle, pmac_id, 0);
301}
302
6b7c5b94
SP
303static int be_mac_addr_set(struct net_device *netdev, void *p)
304{
305 struct be_adapter *adapter = netdev_priv(netdev);
5a712c13 306 struct device *dev = &adapter->pdev->dev;
6b7c5b94 307 struct sockaddr *addr = p;
5a712c13
SP
308 int status;
309 u8 mac[ETH_ALEN];
988d44b1 310 u32 old_pmac_id = adapter->pmac_id[0];
6b7c5b94 311
ca9e4988
AK
312 if (!is_valid_ether_addr(addr->sa_data))
313 return -EADDRNOTAVAIL;
314
ff32f8ab
VV
315 /* Proceed further only if, User provided MAC is different
316 * from active MAC
317 */
c27ebf58 318 if (ether_addr_equal(addr->sa_data, adapter->dev_mac))
ff32f8ab
VV
319 return 0;
320
34393529
IV
321 /* BE3 VFs without FILTMGMT privilege are not allowed to set its MAC
322 * address
323 */
324 if (BEx_chip(adapter) && be_virtfn(adapter) &&
325 !check_privilege(adapter, BE_PRIV_FILTMGMT))
326 return -EPERM;
327
bcc84140
KA
328 /* if device is not running, copy MAC to netdev->dev_addr */
329 if (!netif_running(netdev))
330 goto done;
331
5a712c13
SP
332 /* The PMAC_ADD cmd may fail if the VF doesn't have FILTMGMT
333 * privilege or if PF did not provision the new MAC address.
334 * On BE3, this cmd will always fail if the VF doesn't have the
335 * FILTMGMT privilege. This failure is OK, only if the PF programmed
336 * the MAC for the VF.
704e4c88 337 */
988d44b1
SR
338 mutex_lock(&adapter->rx_filter_lock);
339 status = be_dev_mac_add(adapter, (u8 *)addr->sa_data);
5a712c13 340 if (!status) {
5a712c13
SP
341
342 /* Delete the old programmed MAC. This call may fail if the
343 * old MAC was already deleted by the PF driver.
344 */
345 if (adapter->pmac_id[0] != old_pmac_id)
988d44b1 346 be_dev_mac_del(adapter, old_pmac_id);
704e4c88
PR
347 }
348
988d44b1 349 mutex_unlock(&adapter->rx_filter_lock);
5a712c13
SP
350 /* Decide if the new MAC is successfully activated only after
351 * querying the FW
704e4c88 352 */
988d44b1 353 status = be_cmd_get_active_mac(adapter, adapter->pmac_id[0], mac,
b188f090 354 adapter->if_handle, true, 0);
a65027e4 355 if (status)
e3a7ae2c 356 goto err;
6b7c5b94 357
5a712c13
SP
358 /* The MAC change did not happen, either due to lack of privilege
359 * or PF didn't pre-provision.
360 */
61d23e9f 361 if (!ether_addr_equal(addr->sa_data, mac)) {
5a712c13
SP
362 status = -EPERM;
363 goto err;
364 }
bcc84140 365done:
c27ebf58 366 ether_addr_copy(adapter->dev_mac, addr->sa_data);
bcc84140
KA
367 ether_addr_copy(netdev->dev_addr, addr->sa_data);
368 dev_info(dev, "MAC address changed to %pM\n", addr->sa_data);
e3a7ae2c
SK
369 return 0;
370err:
5a712c13 371 dev_warn(dev, "MAC address change to %pM failed\n", addr->sa_data);
6b7c5b94
SP
372 return status;
373}
374
ca34fe38
SP
375/* BE2 supports only v0 cmd */
376static void *hw_stats_from_cmd(struct be_adapter *adapter)
377{
378 if (BE2_chip(adapter)) {
379 struct be_cmd_resp_get_stats_v0 *cmd = adapter->stats_cmd.va;
380
381 return &cmd->hw_stats;
61000861 382 } else if (BE3_chip(adapter)) {
ca34fe38
SP
383 struct be_cmd_resp_get_stats_v1 *cmd = adapter->stats_cmd.va;
384
61000861
AK
385 return &cmd->hw_stats;
386 } else {
387 struct be_cmd_resp_get_stats_v2 *cmd = adapter->stats_cmd.va;
388
ca34fe38
SP
389 return &cmd->hw_stats;
390 }
391}
392
393/* BE2 supports only v0 cmd */
394static void *be_erx_stats_from_cmd(struct be_adapter *adapter)
395{
396 if (BE2_chip(adapter)) {
397 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
398
399 return &hw_stats->erx;
61000861 400 } else if (BE3_chip(adapter)) {
ca34fe38
SP
401 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
402
61000861
AK
403 return &hw_stats->erx;
404 } else {
405 struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
406
ca34fe38
SP
407 return &hw_stats->erx;
408 }
409}
410
411static void populate_be_v0_stats(struct be_adapter *adapter)
89a88ab8 412{
ac124ff9
SP
413 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
414 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
415 struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
89a88ab8 416 struct be_port_rxf_stats_v0 *port_stats =
ac124ff9
SP
417 &rxf_stats->port[adapter->port_num];
418 struct be_drv_stats *drvs = &adapter->drv_stats;
89a88ab8 419
ac124ff9 420 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
89a88ab8
AK
421 drvs->rx_pause_frames = port_stats->rx_pause_frames;
422 drvs->rx_crc_errors = port_stats->rx_crc_errors;
423 drvs->rx_control_frames = port_stats->rx_control_frames;
424 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
425 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
426 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
427 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
428 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
429 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
430 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
431 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
432 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
433 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
434 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
ac124ff9 435 drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
89a88ab8
AK
436 drvs->rx_dropped_header_too_small =
437 port_stats->rx_dropped_header_too_small;
18fb06a1
SR
438 drvs->rx_address_filtered =
439 port_stats->rx_address_filtered +
440 port_stats->rx_vlan_filtered;
89a88ab8
AK
441 drvs->rx_alignment_symbol_errors =
442 port_stats->rx_alignment_symbol_errors;
443
444 drvs->tx_pauseframes = port_stats->tx_pauseframes;
445 drvs->tx_controlframes = port_stats->tx_controlframes;
446
447 if (adapter->port_num)
ac124ff9 448 drvs->jabber_events = rxf_stats->port1_jabber_events;
89a88ab8 449 else
ac124ff9 450 drvs->jabber_events = rxf_stats->port0_jabber_events;
89a88ab8 451 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
89a88ab8 452 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
89a88ab8
AK
453 drvs->forwarded_packets = rxf_stats->forwarded_packets;
454 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
ac124ff9
SP
455 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
456 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
89a88ab8
AK
457 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
458}
459
ca34fe38 460static void populate_be_v1_stats(struct be_adapter *adapter)
89a88ab8 461{
ac124ff9
SP
462 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
463 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
464 struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
89a88ab8 465 struct be_port_rxf_stats_v1 *port_stats =
ac124ff9
SP
466 &rxf_stats->port[adapter->port_num];
467 struct be_drv_stats *drvs = &adapter->drv_stats;
89a88ab8 468
ac124ff9 469 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
02fe7027
AK
470 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
471 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
89a88ab8
AK
472 drvs->rx_pause_frames = port_stats->rx_pause_frames;
473 drvs->rx_crc_errors = port_stats->rx_crc_errors;
474 drvs->rx_control_frames = port_stats->rx_control_frames;
475 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
476 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
477 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
478 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
479 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
480 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
481 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
482 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
483 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
484 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
485 drvs->rx_dropped_header_too_small =
486 port_stats->rx_dropped_header_too_small;
487 drvs->rx_input_fifo_overflow_drop =
488 port_stats->rx_input_fifo_overflow_drop;
18fb06a1 489 drvs->rx_address_filtered = port_stats->rx_address_filtered;
89a88ab8
AK
490 drvs->rx_alignment_symbol_errors =
491 port_stats->rx_alignment_symbol_errors;
ac124ff9 492 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
89a88ab8
AK
493 drvs->tx_pauseframes = port_stats->tx_pauseframes;
494 drvs->tx_controlframes = port_stats->tx_controlframes;
b5adffc4 495 drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
89a88ab8
AK
496 drvs->jabber_events = port_stats->jabber_events;
497 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
89a88ab8 498 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
89a88ab8
AK
499 drvs->forwarded_packets = rxf_stats->forwarded_packets;
500 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
ac124ff9
SP
501 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
502 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
89a88ab8
AK
503 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
504}
505
61000861
AK
506static void populate_be_v2_stats(struct be_adapter *adapter)
507{
508 struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
509 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
510 struct be_rxf_stats_v2 *rxf_stats = &hw_stats->rxf;
511 struct be_port_rxf_stats_v2 *port_stats =
512 &rxf_stats->port[adapter->port_num];
513 struct be_drv_stats *drvs = &adapter->drv_stats;
514
515 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
516 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
517 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
518 drvs->rx_pause_frames = port_stats->rx_pause_frames;
519 drvs->rx_crc_errors = port_stats->rx_crc_errors;
520 drvs->rx_control_frames = port_stats->rx_control_frames;
521 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
522 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
523 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
524 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
525 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
526 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
527 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
528 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
529 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
530 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
531 drvs->rx_dropped_header_too_small =
532 port_stats->rx_dropped_header_too_small;
533 drvs->rx_input_fifo_overflow_drop =
534 port_stats->rx_input_fifo_overflow_drop;
535 drvs->rx_address_filtered = port_stats->rx_address_filtered;
536 drvs->rx_alignment_symbol_errors =
537 port_stats->rx_alignment_symbol_errors;
538 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
539 drvs->tx_pauseframes = port_stats->tx_pauseframes;
540 drvs->tx_controlframes = port_stats->tx_controlframes;
541 drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
542 drvs->jabber_events = port_stats->jabber_events;
543 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
544 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
545 drvs->forwarded_packets = rxf_stats->forwarded_packets;
546 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
547 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
548 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
549 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
748b539a 550 if (be_roce_supported(adapter)) {
461ae379
AK
551 drvs->rx_roce_bytes_lsd = port_stats->roce_bytes_received_lsd;
552 drvs->rx_roce_bytes_msd = port_stats->roce_bytes_received_msd;
553 drvs->rx_roce_frames = port_stats->roce_frames_received;
554 drvs->roce_drops_crc = port_stats->roce_drops_crc;
555 drvs->roce_drops_payload_len =
556 port_stats->roce_drops_payload_len;
557 }
61000861
AK
558}
559
005d5696
SX
560static void populate_lancer_stats(struct be_adapter *adapter)
561{
005d5696 562 struct be_drv_stats *drvs = &adapter->drv_stats;
748b539a 563 struct lancer_pport_stats *pport_stats = pport_stats_from_cmd(adapter);
ac124ff9
SP
564
565 be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
566 drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
567 drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
568 drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
005d5696 569 drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
ac124ff9 570 drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
005d5696
SX
571 drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
572 drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
573 drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
574 drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
575 drvs->rx_dropped_tcp_length =
576 pport_stats->rx_dropped_invalid_tcp_length;
577 drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
578 drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
579 drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
580 drvs->rx_dropped_header_too_small =
581 pport_stats->rx_dropped_header_too_small;
582 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
18fb06a1
SR
583 drvs->rx_address_filtered =
584 pport_stats->rx_address_filtered +
585 pport_stats->rx_vlan_filtered;
ac124ff9 586 drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
005d5696 587 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
ac124ff9
SP
588 drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
589 drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
005d5696 590 drvs->jabber_events = pport_stats->rx_jabbers;
ac124ff9
SP
591 drvs->forwarded_packets = pport_stats->num_forwards_lo;
592 drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
005d5696 593 drvs->rx_drops_too_many_frags =
ac124ff9 594 pport_stats->rx_drops_too_many_frags_lo;
005d5696 595}
89a88ab8 596
09c1c68f
SP
597static void accumulate_16bit_val(u32 *acc, u16 val)
598{
599#define lo(x) (x & 0xFFFF)
600#define hi(x) (x & 0xFFFF0000)
601 bool wrapped = val < lo(*acc);
602 u32 newacc = hi(*acc) + val;
603
604 if (wrapped)
605 newacc += 65536;
606 ACCESS_ONCE(*acc) = newacc;
607}
608
4188e7df 609static void populate_erx_stats(struct be_adapter *adapter,
748b539a 610 struct be_rx_obj *rxo, u32 erx_stat)
a6c578ef
AK
611{
612 if (!BEx_chip(adapter))
613 rx_stats(rxo)->rx_drops_no_frags = erx_stat;
614 else
615 /* below erx HW counter can actually wrap around after
616 * 65535. Driver accumulates a 32-bit value
617 */
618 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
619 (u16)erx_stat);
620}
621
89a88ab8
AK
622void be_parse_stats(struct be_adapter *adapter)
623{
61000861 624 struct be_erx_stats_v2 *erx = be_erx_stats_from_cmd(adapter);
ac124ff9
SP
625 struct be_rx_obj *rxo;
626 int i;
a6c578ef 627 u32 erx_stat;
ac124ff9 628
ca34fe38
SP
629 if (lancer_chip(adapter)) {
630 populate_lancer_stats(adapter);
005d5696 631 } else {
ca34fe38
SP
632 if (BE2_chip(adapter))
633 populate_be_v0_stats(adapter);
61000861
AK
634 else if (BE3_chip(adapter))
635 /* for BE3 */
ca34fe38 636 populate_be_v1_stats(adapter);
61000861
AK
637 else
638 populate_be_v2_stats(adapter);
d51ebd33 639
61000861 640 /* erx_v2 is longer than v0, v1. use v2 for v0, v1 access */
ca34fe38 641 for_all_rx_queues(adapter, rxo, i) {
a6c578ef
AK
642 erx_stat = erx->rx_drops_no_fragments[rxo->q.id];
643 populate_erx_stats(adapter, rxo, erx_stat);
ca34fe38 644 }
09c1c68f 645 }
89a88ab8
AK
646}
647
ab1594e9 648static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
748b539a 649 struct rtnl_link_stats64 *stats)
6b7c5b94 650{
ab1594e9 651 struct be_adapter *adapter = netdev_priv(netdev);
89a88ab8 652 struct be_drv_stats *drvs = &adapter->drv_stats;
3abcdeda 653 struct be_rx_obj *rxo;
3c8def97 654 struct be_tx_obj *txo;
ab1594e9
SP
655 u64 pkts, bytes;
656 unsigned int start;
3abcdeda 657 int i;
6b7c5b94 658
3abcdeda 659 for_all_rx_queues(adapter, rxo, i) {
ab1594e9 660 const struct be_rx_stats *rx_stats = rx_stats(rxo);
03d28ffe 661
ab1594e9 662 do {
57a7744e 663 start = u64_stats_fetch_begin_irq(&rx_stats->sync);
ab1594e9
SP
664 pkts = rx_stats(rxo)->rx_pkts;
665 bytes = rx_stats(rxo)->rx_bytes;
57a7744e 666 } while (u64_stats_fetch_retry_irq(&rx_stats->sync, start));
ab1594e9
SP
667 stats->rx_packets += pkts;
668 stats->rx_bytes += bytes;
669 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
670 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
671 rx_stats(rxo)->rx_drops_no_frags;
3abcdeda
SP
672 }
673
3c8def97 674 for_all_tx_queues(adapter, txo, i) {
ab1594e9 675 const struct be_tx_stats *tx_stats = tx_stats(txo);
03d28ffe 676
ab1594e9 677 do {
57a7744e 678 start = u64_stats_fetch_begin_irq(&tx_stats->sync);
ab1594e9
SP
679 pkts = tx_stats(txo)->tx_pkts;
680 bytes = tx_stats(txo)->tx_bytes;
57a7744e 681 } while (u64_stats_fetch_retry_irq(&tx_stats->sync, start));
ab1594e9
SP
682 stats->tx_packets += pkts;
683 stats->tx_bytes += bytes;
3c8def97 684 }
6b7c5b94
SP
685
686 /* bad pkts received */
ab1594e9 687 stats->rx_errors = drvs->rx_crc_errors +
89a88ab8
AK
688 drvs->rx_alignment_symbol_errors +
689 drvs->rx_in_range_errors +
690 drvs->rx_out_range_errors +
691 drvs->rx_frame_too_long +
692 drvs->rx_dropped_too_small +
693 drvs->rx_dropped_too_short +
694 drvs->rx_dropped_header_too_small +
695 drvs->rx_dropped_tcp_length +
ab1594e9 696 drvs->rx_dropped_runt;
68110868 697
6b7c5b94 698 /* detailed rx errors */
ab1594e9 699 stats->rx_length_errors = drvs->rx_in_range_errors +
89a88ab8
AK
700 drvs->rx_out_range_errors +
701 drvs->rx_frame_too_long;
68110868 702
ab1594e9 703 stats->rx_crc_errors = drvs->rx_crc_errors;
6b7c5b94
SP
704
705 /* frame alignment errors */
ab1594e9 706 stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
68110868 707
6b7c5b94
SP
708 /* receiver fifo overrun */
709 /* drops_no_pbuf is no per i/f, it's per BE card */
ab1594e9 710 stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
89a88ab8
AK
711 drvs->rx_input_fifo_overflow_drop +
712 drvs->rx_drops_no_pbuf;
ab1594e9 713 return stats;
6b7c5b94
SP
714}
715
b236916a 716void be_link_status_update(struct be_adapter *adapter, u8 link_status)
6b7c5b94 717{
6b7c5b94
SP
718 struct net_device *netdev = adapter->netdev;
719
b236916a 720 if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
ea172a01 721 netif_carrier_off(netdev);
b236916a 722 adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
6b7c5b94 723 }
b236916a 724
bdce2ad7 725 if (link_status)
b236916a
AK
726 netif_carrier_on(netdev);
727 else
728 netif_carrier_off(netdev);
18824894
IV
729
730 netdev_info(netdev, "Link is %s\n", link_status ? "Up" : "Down");
6b7c5b94
SP
731}
732
f3d6ad84
SB
733static int be_gso_hdr_len(struct sk_buff *skb)
734{
735 if (skb->encapsulation)
736 return skb_inner_transport_offset(skb) +
737 inner_tcp_hdrlen(skb);
738 return skb_transport_offset(skb) + tcp_hdrlen(skb);
739}
740
5f07b3c5 741static void be_tx_stats_update(struct be_tx_obj *txo, struct sk_buff *skb)
6b7c5b94 742{
3c8def97 743 struct be_tx_stats *stats = tx_stats(txo);
f3d6ad84
SB
744 u32 tx_pkts = skb_shinfo(skb)->gso_segs ? : 1;
745 /* Account for headers which get duplicated in TSO pkt */
746 u32 dup_hdr_len = tx_pkts > 1 ? be_gso_hdr_len(skb) * (tx_pkts - 1) : 0;
3c8def97 747
ab1594e9 748 u64_stats_update_begin(&stats->sync);
ac124ff9 749 stats->tx_reqs++;
f3d6ad84 750 stats->tx_bytes += skb->len + dup_hdr_len;
8670f2a5
SB
751 stats->tx_pkts += tx_pkts;
752 if (skb->encapsulation && skb->ip_summed == CHECKSUM_PARTIAL)
753 stats->tx_vxlan_offload_pkts += tx_pkts;
ab1594e9 754 u64_stats_update_end(&stats->sync);
6b7c5b94
SP
755}
756
5f07b3c5
SP
757/* Returns number of WRBs needed for the skb */
758static u32 skb_wrb_cnt(struct sk_buff *skb)
6b7c5b94 759{
5f07b3c5
SP
760 /* +1 for the header wrb */
761 return 1 + (skb_headlen(skb) ? 1 : 0) + skb_shinfo(skb)->nr_frags;
6b7c5b94
SP
762}
763
764static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
765{
f986afcb
SP
766 wrb->frag_pa_hi = cpu_to_le32(upper_32_bits(addr));
767 wrb->frag_pa_lo = cpu_to_le32(lower_32_bits(addr));
768 wrb->frag_len = cpu_to_le32(len & ETH_WRB_FRAG_LEN_MASK);
769 wrb->rsvd0 = 0;
770}
771
772/* A dummy wrb is just all zeros. Using a separate routine for dummy-wrb
773 * to avoid the swap and shift/mask operations in wrb_fill().
774 */
775static inline void wrb_fill_dummy(struct be_eth_wrb *wrb)
776{
777 wrb->frag_pa_hi = 0;
778 wrb->frag_pa_lo = 0;
779 wrb->frag_len = 0;
89b1f496 780 wrb->rsvd0 = 0;
6b7c5b94
SP
781}
782
1ded132d 783static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
748b539a 784 struct sk_buff *skb)
1ded132d
AK
785{
786 u8 vlan_prio;
787 u16 vlan_tag;
788
df8a39de 789 vlan_tag = skb_vlan_tag_get(skb);
1ded132d
AK
790 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
791 /* If vlan priority provided by OS is NOT in available bmap */
792 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
793 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
fdf81bfb 794 adapter->recommended_prio_bits;
1ded132d
AK
795
796 return vlan_tag;
797}
798
c9c47142
SP
799/* Used only for IP tunnel packets */
800static u16 skb_inner_ip_proto(struct sk_buff *skb)
801{
802 return (inner_ip_hdr(skb)->version == 4) ?
803 inner_ip_hdr(skb)->protocol : inner_ipv6_hdr(skb)->nexthdr;
804}
805
806static u16 skb_ip_proto(struct sk_buff *skb)
807{
808 return (ip_hdr(skb)->version == 4) ?
809 ip_hdr(skb)->protocol : ipv6_hdr(skb)->nexthdr;
810}
811
cf5671e6
SB
812static inline bool be_is_txq_full(struct be_tx_obj *txo)
813{
814 return atomic_read(&txo->q.used) + BE_MAX_TX_FRAG_COUNT >= txo->q.len;
815}
816
817static inline bool be_can_txq_wake(struct be_tx_obj *txo)
818{
819 return atomic_read(&txo->q.used) < txo->q.len / 2;
820}
821
822static inline bool be_is_tx_compl_pending(struct be_tx_obj *txo)
823{
824 return atomic_read(&txo->q.used) > txo->pend_wrb_cnt;
825}
826
804abcdb
SB
827static void be_get_wrb_params_from_skb(struct be_adapter *adapter,
828 struct sk_buff *skb,
829 struct be_wrb_params *wrb_params)
6b7c5b94 830{
804abcdb 831 u16 proto;
6b7c5b94 832
49e4b847 833 if (skb_is_gso(skb)) {
804abcdb
SB
834 BE_WRB_F_SET(wrb_params->features, LSO, 1);
835 wrb_params->lso_mss = skb_shinfo(skb)->gso_size;
fe6d2a38 836 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
804abcdb 837 BE_WRB_F_SET(wrb_params->features, LSO6, 1);
6b7c5b94 838 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
c9c47142 839 if (skb->encapsulation) {
804abcdb 840 BE_WRB_F_SET(wrb_params->features, IPCS, 1);
c9c47142
SP
841 proto = skb_inner_ip_proto(skb);
842 } else {
843 proto = skb_ip_proto(skb);
844 }
845 if (proto == IPPROTO_TCP)
804abcdb 846 BE_WRB_F_SET(wrb_params->features, TCPCS, 1);
c9c47142 847 else if (proto == IPPROTO_UDP)
804abcdb 848 BE_WRB_F_SET(wrb_params->features, UDPCS, 1);
6b7c5b94
SP
849 }
850
df8a39de 851 if (skb_vlan_tag_present(skb)) {
804abcdb
SB
852 BE_WRB_F_SET(wrb_params->features, VLAN, 1);
853 wrb_params->vlan_tag = be_get_tx_vlan_tag(adapter, skb);
6b7c5b94
SP
854 }
855
804abcdb
SB
856 BE_WRB_F_SET(wrb_params->features, CRC, 1);
857}
5f07b3c5 858
804abcdb
SB
859static void wrb_fill_hdr(struct be_adapter *adapter,
860 struct be_eth_hdr_wrb *hdr,
861 struct be_wrb_params *wrb_params,
862 struct sk_buff *skb)
863{
864 memset(hdr, 0, sizeof(*hdr));
865
866 SET_TX_WRB_HDR_BITS(crc, hdr,
867 BE_WRB_F_GET(wrb_params->features, CRC));
868 SET_TX_WRB_HDR_BITS(ipcs, hdr,
869 BE_WRB_F_GET(wrb_params->features, IPCS));
870 SET_TX_WRB_HDR_BITS(tcpcs, hdr,
871 BE_WRB_F_GET(wrb_params->features, TCPCS));
872 SET_TX_WRB_HDR_BITS(udpcs, hdr,
873 BE_WRB_F_GET(wrb_params->features, UDPCS));
874
875 SET_TX_WRB_HDR_BITS(lso, hdr,
876 BE_WRB_F_GET(wrb_params->features, LSO));
877 SET_TX_WRB_HDR_BITS(lso6, hdr,
878 BE_WRB_F_GET(wrb_params->features, LSO6));
879 SET_TX_WRB_HDR_BITS(lso_mss, hdr, wrb_params->lso_mss);
880
881 /* Hack to skip HW VLAN tagging needs evt = 1, compl = 0. When this
882 * hack is not needed, the evt bit is set while ringing DB.
5f07b3c5 883 */
804abcdb
SB
884 SET_TX_WRB_HDR_BITS(event, hdr,
885 BE_WRB_F_GET(wrb_params->features, VLAN_SKIP_HW));
886 SET_TX_WRB_HDR_BITS(vlan, hdr,
887 BE_WRB_F_GET(wrb_params->features, VLAN));
888 SET_TX_WRB_HDR_BITS(vlan_tag, hdr, wrb_params->vlan_tag);
889
890 SET_TX_WRB_HDR_BITS(num_wrb, hdr, skb_wrb_cnt(skb));
891 SET_TX_WRB_HDR_BITS(len, hdr, skb->len);
760c295e
VD
892 SET_TX_WRB_HDR_BITS(mgmt, hdr,
893 BE_WRB_F_GET(wrb_params->features, OS2BMC));
6b7c5b94
SP
894}
895
2b7bcebf 896static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
748b539a 897 bool unmap_single)
7101e111
SP
898{
899 dma_addr_t dma;
f986afcb 900 u32 frag_len = le32_to_cpu(wrb->frag_len);
7101e111 901
7101e111 902
f986afcb
SP
903 dma = (u64)le32_to_cpu(wrb->frag_pa_hi) << 32 |
904 (u64)le32_to_cpu(wrb->frag_pa_lo);
905 if (frag_len) {
7101e111 906 if (unmap_single)
f986afcb 907 dma_unmap_single(dev, dma, frag_len, DMA_TO_DEVICE);
7101e111 908 else
f986afcb 909 dma_unmap_page(dev, dma, frag_len, DMA_TO_DEVICE);
7101e111
SP
910 }
911}
6b7c5b94 912
79a0d7d8 913/* Grab a WRB header for xmit */
b0fd2eb2 914static u32 be_tx_get_wrb_hdr(struct be_tx_obj *txo)
79a0d7d8 915{
b0fd2eb2 916 u32 head = txo->q.head;
79a0d7d8
SB
917
918 queue_head_inc(&txo->q);
919 return head;
920}
921
922/* Set up the WRB header for xmit */
923static void be_tx_setup_wrb_hdr(struct be_adapter *adapter,
924 struct be_tx_obj *txo,
925 struct be_wrb_params *wrb_params,
926 struct sk_buff *skb, u16 head)
927{
928 u32 num_frags = skb_wrb_cnt(skb);
929 struct be_queue_info *txq = &txo->q;
930 struct be_eth_hdr_wrb *hdr = queue_index_node(txq, head);
931
932 wrb_fill_hdr(adapter, hdr, wrb_params, skb);
933 be_dws_cpu_to_le(hdr, sizeof(*hdr));
934
935 BUG_ON(txo->sent_skb_list[head]);
936 txo->sent_skb_list[head] = skb;
937 txo->last_req_hdr = head;
938 atomic_add(num_frags, &txq->used);
939 txo->last_req_wrb_cnt = num_frags;
940 txo->pend_wrb_cnt += num_frags;
941}
942
943/* Setup a WRB fragment (buffer descriptor) for xmit */
944static void be_tx_setup_wrb_frag(struct be_tx_obj *txo, dma_addr_t busaddr,
945 int len)
946{
947 struct be_eth_wrb *wrb;
948 struct be_queue_info *txq = &txo->q;
949
950 wrb = queue_head_node(txq);
951 wrb_fill(wrb, busaddr, len);
952 queue_head_inc(txq);
953}
954
955/* Bring the queue back to the state it was in before be_xmit_enqueue() routine
956 * was invoked. The producer index is restored to the previous packet and the
957 * WRBs of the current packet are unmapped. Invoked to handle tx setup errors.
958 */
959static void be_xmit_restore(struct be_adapter *adapter,
b0fd2eb2 960 struct be_tx_obj *txo, u32 head, bool map_single,
79a0d7d8
SB
961 u32 copied)
962{
963 struct device *dev;
964 struct be_eth_wrb *wrb;
965 struct be_queue_info *txq = &txo->q;
966
967 dev = &adapter->pdev->dev;
968 txq->head = head;
969
970 /* skip the first wrb (hdr); it's not mapped */
971 queue_head_inc(txq);
972 while (copied) {
973 wrb = queue_head_node(txq);
974 unmap_tx_frag(dev, wrb, map_single);
975 map_single = false;
976 copied -= le32_to_cpu(wrb->frag_len);
977 queue_head_inc(txq);
978 }
979
980 txq->head = head;
981}
982
983/* Enqueue the given packet for transmit. This routine allocates WRBs for the
984 * packet, dma maps the packet buffers and sets up the WRBs. Returns the number
985 * of WRBs used up by the packet.
986 */
5f07b3c5 987static u32 be_xmit_enqueue(struct be_adapter *adapter, struct be_tx_obj *txo,
804abcdb
SB
988 struct sk_buff *skb,
989 struct be_wrb_params *wrb_params)
6b7c5b94 990{
5f07b3c5 991 u32 i, copied = 0, wrb_cnt = skb_wrb_cnt(skb);
2b7bcebf 992 struct device *dev = &adapter->pdev->dev;
5f07b3c5 993 struct be_queue_info *txq = &txo->q;
7101e111 994 bool map_single = false;
b0fd2eb2 995 u32 head = txq->head;
79a0d7d8
SB
996 dma_addr_t busaddr;
997 int len;
6b7c5b94 998
79a0d7d8 999 head = be_tx_get_wrb_hdr(txo);
6b7c5b94 1000
ebc8d2ab 1001 if (skb->len > skb->data_len) {
79a0d7d8 1002 len = skb_headlen(skb);
03d28ffe 1003
2b7bcebf
IV
1004 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
1005 if (dma_mapping_error(dev, busaddr))
7101e111
SP
1006 goto dma_err;
1007 map_single = true;
79a0d7d8 1008 be_tx_setup_wrb_frag(txo, busaddr, len);
ebc8d2ab
DM
1009 copied += len;
1010 }
6b7c5b94 1011
ebc8d2ab 1012 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
748b539a 1013 const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
79a0d7d8 1014 len = skb_frag_size(frag);
03d28ffe 1015
79a0d7d8 1016 busaddr = skb_frag_dma_map(dev, frag, 0, len, DMA_TO_DEVICE);
2b7bcebf 1017 if (dma_mapping_error(dev, busaddr))
7101e111 1018 goto dma_err;
79a0d7d8
SB
1019 be_tx_setup_wrb_frag(txo, busaddr, len);
1020 copied += len;
6b7c5b94
SP
1021 }
1022
79a0d7d8 1023 be_tx_setup_wrb_hdr(adapter, txo, wrb_params, skb, head);
6b7c5b94 1024
5f07b3c5
SP
1025 be_tx_stats_update(txo, skb);
1026 return wrb_cnt;
6b7c5b94 1027
7101e111 1028dma_err:
79a0d7d8
SB
1029 adapter->drv_stats.dma_map_errors++;
1030 be_xmit_restore(adapter, txo, head, map_single, copied);
7101e111 1031 return 0;
6b7c5b94
SP
1032}
1033
f7062ee5
SP
1034static inline int qnq_async_evt_rcvd(struct be_adapter *adapter)
1035{
1036 return adapter->flags & BE_FLAGS_QNQ_ASYNC_EVT_RCVD;
1037}
1038
93040ae5 1039static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
bc0c3405 1040 struct sk_buff *skb,
804abcdb
SB
1041 struct be_wrb_params
1042 *wrb_params)
93040ae5
SK
1043{
1044 u16 vlan_tag = 0;
1045
1046 skb = skb_share_check(skb, GFP_ATOMIC);
1047 if (unlikely(!skb))
1048 return skb;
1049
df8a39de 1050 if (skb_vlan_tag_present(skb))
93040ae5 1051 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
52fe29e4
SB
1052
1053 if (qnq_async_evt_rcvd(adapter) && adapter->pvid) {
1054 if (!vlan_tag)
1055 vlan_tag = adapter->pvid;
1056 /* f/w workaround to set skip_hw_vlan = 1, informs the F/W to
1057 * skip VLAN insertion
1058 */
804abcdb 1059 BE_WRB_F_SET(wrb_params->features, VLAN_SKIP_HW, 1);
52fe29e4 1060 }
bc0c3405
AK
1061
1062 if (vlan_tag) {
62749e2c
JP
1063 skb = vlan_insert_tag_set_proto(skb, htons(ETH_P_8021Q),
1064 vlan_tag);
bc0c3405
AK
1065 if (unlikely(!skb))
1066 return skb;
bc0c3405
AK
1067 skb->vlan_tci = 0;
1068 }
1069
1070 /* Insert the outer VLAN, if any */
1071 if (adapter->qnq_vid) {
1072 vlan_tag = adapter->qnq_vid;
62749e2c
JP
1073 skb = vlan_insert_tag_set_proto(skb, htons(ETH_P_8021Q),
1074 vlan_tag);
bc0c3405
AK
1075 if (unlikely(!skb))
1076 return skb;
804abcdb 1077 BE_WRB_F_SET(wrb_params->features, VLAN_SKIP_HW, 1);
bc0c3405
AK
1078 }
1079
93040ae5
SK
1080 return skb;
1081}
1082
bc0c3405
AK
1083static bool be_ipv6_exthdr_check(struct sk_buff *skb)
1084{
1085 struct ethhdr *eh = (struct ethhdr *)skb->data;
1086 u16 offset = ETH_HLEN;
1087
1088 if (eh->h_proto == htons(ETH_P_IPV6)) {
1089 struct ipv6hdr *ip6h = (struct ipv6hdr *)(skb->data + offset);
1090
1091 offset += sizeof(struct ipv6hdr);
1092 if (ip6h->nexthdr != NEXTHDR_TCP &&
1093 ip6h->nexthdr != NEXTHDR_UDP) {
1094 struct ipv6_opt_hdr *ehdr =
504fbf1e 1095 (struct ipv6_opt_hdr *)(skb->data + offset);
bc0c3405
AK
1096
1097 /* offending pkt: 2nd byte following IPv6 hdr is 0xff */
1098 if (ehdr->hdrlen == 0xff)
1099 return true;
1100 }
1101 }
1102 return false;
1103}
1104
1105static int be_vlan_tag_tx_chk(struct be_adapter *adapter, struct sk_buff *skb)
1106{
df8a39de 1107 return skb_vlan_tag_present(skb) || adapter->pvid || adapter->qnq_vid;
bc0c3405
AK
1108}
1109
748b539a 1110static int be_ipv6_tx_stall_chk(struct be_adapter *adapter, struct sk_buff *skb)
bc0c3405 1111{
ee9c799c 1112 return BE3_chip(adapter) && be_ipv6_exthdr_check(skb);
bc0c3405
AK
1113}
1114
ec495fac
VV
1115static struct sk_buff *be_lancer_xmit_workarounds(struct be_adapter *adapter,
1116 struct sk_buff *skb,
804abcdb
SB
1117 struct be_wrb_params
1118 *wrb_params)
6b7c5b94 1119{
d2cb6ce7 1120 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
ee9c799c
SP
1121 unsigned int eth_hdr_len;
1122 struct iphdr *ip;
93040ae5 1123
1297f9db
AK
1124 /* For padded packets, BE HW modifies tot_len field in IP header
1125 * incorrecly when VLAN tag is inserted by HW.
3904dcc4 1126 * For padded packets, Lancer computes incorrect checksum.
1ded132d 1127 */
ee9c799c
SP
1128 eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
1129 VLAN_ETH_HLEN : ETH_HLEN;
3904dcc4 1130 if (skb->len <= 60 &&
df8a39de 1131 (lancer_chip(adapter) || skb_vlan_tag_present(skb)) &&
ee9c799c 1132 is_ipv4_pkt(skb)) {
93040ae5
SK
1133 ip = (struct iphdr *)ip_hdr(skb);
1134 pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len));
1135 }
1ded132d 1136
d2cb6ce7 1137 /* If vlan tag is already inlined in the packet, skip HW VLAN
f93f160b 1138 * tagging in pvid-tagging mode
d2cb6ce7 1139 */
f93f160b 1140 if (be_pvid_tagging_enabled(adapter) &&
d2cb6ce7 1141 veh->h_vlan_proto == htons(ETH_P_8021Q))
804abcdb 1142 BE_WRB_F_SET(wrb_params->features, VLAN_SKIP_HW, 1);
d2cb6ce7 1143
93040ae5
SK
1144 /* HW has a bug wherein it will calculate CSUM for VLAN
1145 * pkts even though it is disabled.
1146 * Manually insert VLAN in pkt.
1147 */
1148 if (skb->ip_summed != CHECKSUM_PARTIAL &&
df8a39de 1149 skb_vlan_tag_present(skb)) {
804abcdb 1150 skb = be_insert_vlan_in_pkt(adapter, skb, wrb_params);
bc0c3405 1151 if (unlikely(!skb))
c9128951 1152 goto err;
bc0c3405
AK
1153 }
1154
1155 /* HW may lockup when VLAN HW tagging is requested on
1156 * certain ipv6 packets. Drop such pkts if the HW workaround to
1157 * skip HW tagging is not enabled by FW.
1158 */
1159 if (unlikely(be_ipv6_tx_stall_chk(adapter, skb) &&
cd3307aa
KA
1160 (adapter->pvid || adapter->qnq_vid) &&
1161 !qnq_async_evt_rcvd(adapter)))
bc0c3405
AK
1162 goto tx_drop;
1163
1164 /* Manual VLAN tag insertion to prevent:
1165 * ASIC lockup when the ASIC inserts VLAN tag into
1166 * certain ipv6 packets. Insert VLAN tags in driver,
1167 * and set event, completion, vlan bits accordingly
1168 * in the Tx WRB.
1169 */
1170 if (be_ipv6_tx_stall_chk(adapter, skb) &&
1171 be_vlan_tag_tx_chk(adapter, skb)) {
804abcdb 1172 skb = be_insert_vlan_in_pkt(adapter, skb, wrb_params);
1ded132d 1173 if (unlikely(!skb))
c9128951 1174 goto err;
1ded132d
AK
1175 }
1176
ee9c799c
SP
1177 return skb;
1178tx_drop:
1179 dev_kfree_skb_any(skb);
c9128951 1180err:
ee9c799c
SP
1181 return NULL;
1182}
1183
ec495fac
VV
1184static struct sk_buff *be_xmit_workarounds(struct be_adapter *adapter,
1185 struct sk_buff *skb,
804abcdb 1186 struct be_wrb_params *wrb_params)
ec495fac 1187{
127bfce5 1188 int err;
1189
8227e990
SR
1190 /* Lancer, SH and BE3 in SRIOV mode have a bug wherein
1191 * packets that are 32b or less may cause a transmit stall
1192 * on that port. The workaround is to pad such packets
1193 * (len <= 32 bytes) to a minimum length of 36b.
ec495fac 1194 */
8227e990 1195 if (skb->len <= 32) {
74b6939d 1196 if (skb_put_padto(skb, 36))
ec495fac 1197 return NULL;
ec495fac
VV
1198 }
1199
1200 if (BEx_chip(adapter) || lancer_chip(adapter)) {
804abcdb 1201 skb = be_lancer_xmit_workarounds(adapter, skb, wrb_params);
ec495fac
VV
1202 if (!skb)
1203 return NULL;
1204 }
1205
127bfce5 1206 /* The stack can send us skbs with length greater than
1207 * what the HW can handle. Trim the extra bytes.
1208 */
1209 WARN_ON_ONCE(skb->len > BE_MAX_GSO_SIZE);
1210 err = pskb_trim(skb, BE_MAX_GSO_SIZE);
1211 WARN_ON(err);
1212
ec495fac
VV
1213 return skb;
1214}
1215
5f07b3c5
SP
1216static void be_xmit_flush(struct be_adapter *adapter, struct be_tx_obj *txo)
1217{
1218 struct be_queue_info *txq = &txo->q;
1219 struct be_eth_hdr_wrb *hdr = queue_index_node(txq, txo->last_req_hdr);
1220
1221 /* Mark the last request eventable if it hasn't been marked already */
1222 if (!(hdr->dw[2] & cpu_to_le32(TX_HDR_WRB_EVT)))
1223 hdr->dw[2] |= cpu_to_le32(TX_HDR_WRB_EVT | TX_HDR_WRB_COMPL);
1224
1225 /* compose a dummy wrb if there are odd set of wrbs to notify */
1226 if (!lancer_chip(adapter) && (txo->pend_wrb_cnt & 1)) {
f986afcb 1227 wrb_fill_dummy(queue_head_node(txq));
5f07b3c5
SP
1228 queue_head_inc(txq);
1229 atomic_inc(&txq->used);
1230 txo->pend_wrb_cnt++;
1231 hdr->dw[2] &= ~cpu_to_le32(TX_HDR_WRB_NUM_MASK <<
1232 TX_HDR_WRB_NUM_SHIFT);
1233 hdr->dw[2] |= cpu_to_le32((txo->last_req_wrb_cnt + 1) <<
1234 TX_HDR_WRB_NUM_SHIFT);
1235 }
1236 be_txq_notify(adapter, txo, txo->pend_wrb_cnt);
1237 txo->pend_wrb_cnt = 0;
1238}
1239
760c295e
VD
1240/* OS2BMC related */
1241
1242#define DHCP_CLIENT_PORT 68
1243#define DHCP_SERVER_PORT 67
1244#define NET_BIOS_PORT1 137
1245#define NET_BIOS_PORT2 138
1246#define DHCPV6_RAS_PORT 547
1247
1248#define is_mc_allowed_on_bmc(adapter, eh) \
1249 (!is_multicast_filt_enabled(adapter) && \
1250 is_multicast_ether_addr(eh->h_dest) && \
1251 !is_broadcast_ether_addr(eh->h_dest))
1252
1253#define is_bc_allowed_on_bmc(adapter, eh) \
1254 (!is_broadcast_filt_enabled(adapter) && \
1255 is_broadcast_ether_addr(eh->h_dest))
1256
1257#define is_arp_allowed_on_bmc(adapter, skb) \
1258 (is_arp(skb) && is_arp_filt_enabled(adapter))
1259
1260#define is_broadcast_packet(eh, adapter) \
1261 (is_multicast_ether_addr(eh->h_dest) && \
1262 !compare_ether_addr(eh->h_dest, adapter->netdev->broadcast))
1263
1264#define is_arp(skb) (skb->protocol == htons(ETH_P_ARP))
1265
1266#define is_arp_filt_enabled(adapter) \
1267 (adapter->bmc_filt_mask & (BMC_FILT_BROADCAST_ARP))
1268
1269#define is_dhcp_client_filt_enabled(adapter) \
1270 (adapter->bmc_filt_mask & BMC_FILT_BROADCAST_DHCP_CLIENT)
1271
1272#define is_dhcp_srvr_filt_enabled(adapter) \
1273 (adapter->bmc_filt_mask & BMC_FILT_BROADCAST_DHCP_SERVER)
1274
1275#define is_nbios_filt_enabled(adapter) \
1276 (adapter->bmc_filt_mask & BMC_FILT_BROADCAST_NET_BIOS)
1277
1278#define is_ipv6_na_filt_enabled(adapter) \
1279 (adapter->bmc_filt_mask & \
1280 BMC_FILT_MULTICAST_IPV6_NEIGH_ADVER)
1281
1282#define is_ipv6_ra_filt_enabled(adapter) \
1283 (adapter->bmc_filt_mask & BMC_FILT_MULTICAST_IPV6_RA)
1284
1285#define is_ipv6_ras_filt_enabled(adapter) \
1286 (adapter->bmc_filt_mask & BMC_FILT_MULTICAST_IPV6_RAS)
1287
1288#define is_broadcast_filt_enabled(adapter) \
1289 (adapter->bmc_filt_mask & BMC_FILT_BROADCAST)
1290
1291#define is_multicast_filt_enabled(adapter) \
1292 (adapter->bmc_filt_mask & BMC_FILT_MULTICAST)
1293
1294static bool be_send_pkt_to_bmc(struct be_adapter *adapter,
1295 struct sk_buff **skb)
1296{
1297 struct ethhdr *eh = (struct ethhdr *)(*skb)->data;
1298 bool os2bmc = false;
1299
1300 if (!be_is_os2bmc_enabled(adapter))
1301 goto done;
1302
1303 if (!is_multicast_ether_addr(eh->h_dest))
1304 goto done;
1305
1306 if (is_mc_allowed_on_bmc(adapter, eh) ||
1307 is_bc_allowed_on_bmc(adapter, eh) ||
1308 is_arp_allowed_on_bmc(adapter, (*skb))) {
1309 os2bmc = true;
1310 goto done;
1311 }
1312
1313 if ((*skb)->protocol == htons(ETH_P_IPV6)) {
1314 struct ipv6hdr *hdr = ipv6_hdr((*skb));
1315 u8 nexthdr = hdr->nexthdr;
1316
1317 if (nexthdr == IPPROTO_ICMPV6) {
1318 struct icmp6hdr *icmp6 = icmp6_hdr((*skb));
1319
1320 switch (icmp6->icmp6_type) {
1321 case NDISC_ROUTER_ADVERTISEMENT:
1322 os2bmc = is_ipv6_ra_filt_enabled(adapter);
1323 goto done;
1324 case NDISC_NEIGHBOUR_ADVERTISEMENT:
1325 os2bmc = is_ipv6_na_filt_enabled(adapter);
1326 goto done;
1327 default:
1328 break;
1329 }
1330 }
1331 }
1332
1333 if (is_udp_pkt((*skb))) {
1334 struct udphdr *udp = udp_hdr((*skb));
1335
1645d997 1336 switch (ntohs(udp->dest)) {
760c295e
VD
1337 case DHCP_CLIENT_PORT:
1338 os2bmc = is_dhcp_client_filt_enabled(adapter);
1339 goto done;
1340 case DHCP_SERVER_PORT:
1341 os2bmc = is_dhcp_srvr_filt_enabled(adapter);
1342 goto done;
1343 case NET_BIOS_PORT1:
1344 case NET_BIOS_PORT2:
1345 os2bmc = is_nbios_filt_enabled(adapter);
1346 goto done;
1347 case DHCPV6_RAS_PORT:
1348 os2bmc = is_ipv6_ras_filt_enabled(adapter);
1349 goto done;
1350 default:
1351 break;
1352 }
1353 }
1354done:
1355 /* For packets over a vlan, which are destined
1356 * to BMC, asic expects the vlan to be inline in the packet.
1357 */
1358 if (os2bmc)
1359 *skb = be_insert_vlan_in_pkt(adapter, *skb, NULL);
1360
1361 return os2bmc;
1362}
1363
ee9c799c
SP
1364static netdev_tx_t be_xmit(struct sk_buff *skb, struct net_device *netdev)
1365{
1366 struct be_adapter *adapter = netdev_priv(netdev);
5f07b3c5
SP
1367 u16 q_idx = skb_get_queue_mapping(skb);
1368 struct be_tx_obj *txo = &adapter->tx_obj[q_idx];
804abcdb 1369 struct be_wrb_params wrb_params = { 0 };
804abcdb 1370 bool flush = !skb->xmit_more;
5f07b3c5 1371 u16 wrb_cnt;
ee9c799c 1372
804abcdb 1373 skb = be_xmit_workarounds(adapter, skb, &wrb_params);
5f07b3c5
SP
1374 if (unlikely(!skb))
1375 goto drop;
6b7c5b94 1376
804abcdb
SB
1377 be_get_wrb_params_from_skb(adapter, skb, &wrb_params);
1378
1379 wrb_cnt = be_xmit_enqueue(adapter, txo, skb, &wrb_params);
5f07b3c5
SP
1380 if (unlikely(!wrb_cnt)) {
1381 dev_kfree_skb_any(skb);
1382 goto drop;
1383 }
cd8f76c0 1384
760c295e
VD
1385 /* if os2bmc is enabled and if the pkt is destined to bmc,
1386 * enqueue the pkt a 2nd time with mgmt bit set.
1387 */
1388 if (be_send_pkt_to_bmc(adapter, &skb)) {
1389 BE_WRB_F_SET(wrb_params.features, OS2BMC, 1);
1390 wrb_cnt = be_xmit_enqueue(adapter, txo, skb, &wrb_params);
1391 if (unlikely(!wrb_cnt))
1392 goto drop;
1393 else
1394 skb_get(skb);
1395 }
1396
cf5671e6 1397 if (be_is_txq_full(txo)) {
5f07b3c5
SP
1398 netif_stop_subqueue(netdev, q_idx);
1399 tx_stats(txo)->tx_stops++;
1400 }
c190e3c8 1401
5f07b3c5
SP
1402 if (flush || __netif_subqueue_stopped(netdev, q_idx))
1403 be_xmit_flush(adapter, txo);
6b7c5b94 1404
5f07b3c5
SP
1405 return NETDEV_TX_OK;
1406drop:
1407 tx_stats(txo)->tx_drv_drops++;
1408 /* Flush the already enqueued tx requests */
1409 if (flush && txo->pend_wrb_cnt)
1410 be_xmit_flush(adapter, txo);
6b7c5b94 1411
6b7c5b94
SP
1412 return NETDEV_TX_OK;
1413}
1414
f66b7cfd
SP
1415static inline bool be_in_all_promisc(struct be_adapter *adapter)
1416{
1417 return (adapter->if_flags & BE_IF_FLAGS_ALL_PROMISCUOUS) ==
1418 BE_IF_FLAGS_ALL_PROMISCUOUS;
1419}
1420
1421static int be_set_vlan_promisc(struct be_adapter *adapter)
1422{
1423 struct device *dev = &adapter->pdev->dev;
1424 int status;
1425
1426 if (adapter->if_flags & BE_IF_FLAGS_VLAN_PROMISCUOUS)
1427 return 0;
1428
1429 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_VLAN_PROMISCUOUS, ON);
1430 if (!status) {
1431 dev_info(dev, "Enabled VLAN promiscuous mode\n");
1432 adapter->if_flags |= BE_IF_FLAGS_VLAN_PROMISCUOUS;
1433 } else {
1434 dev_err(dev, "Failed to enable VLAN promiscuous mode\n");
1435 }
1436 return status;
1437}
1438
1439static int be_clear_vlan_promisc(struct be_adapter *adapter)
1440{
1441 struct device *dev = &adapter->pdev->dev;
1442 int status;
1443
1444 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_VLAN_PROMISCUOUS, OFF);
1445 if (!status) {
1446 dev_info(dev, "Disabling VLAN promiscuous mode\n");
1447 adapter->if_flags &= ~BE_IF_FLAGS_VLAN_PROMISCUOUS;
1448 }
1449 return status;
1450}
1451
6b7c5b94 1452/*
82903e4b
AK
1453 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
1454 * If the user configures more, place BE in vlan promiscuous mode.
6b7c5b94 1455 */
10329df8 1456static int be_vid_config(struct be_adapter *adapter)
6b7c5b94 1457{
50762667 1458 struct device *dev = &adapter->pdev->dev;
10329df8 1459 u16 vids[BE_NUM_VLANS_SUPPORTED];
f6cbd364 1460 u16 num = 0, i = 0;
82903e4b 1461 int status = 0;
1da87b7f 1462
92fbb1df
SB
1463 /* No need to change the VLAN state if the I/F is in promiscuous */
1464 if (adapter->netdev->flags & IFF_PROMISC)
c0e64ef4
SP
1465 return 0;
1466
92bf14ab 1467 if (adapter->vlans_added > be_max_vlans(adapter))
f66b7cfd 1468 return be_set_vlan_promisc(adapter);
0fc16ebf 1469
841f60fc
SK
1470 if (adapter->if_flags & BE_IF_FLAGS_VLAN_PROMISCUOUS) {
1471 status = be_clear_vlan_promisc(adapter);
1472 if (status)
1473 return status;
1474 }
0fc16ebf 1475 /* Construct VLAN Table to give to HW */
f6cbd364
RN
1476 for_each_set_bit(i, adapter->vids, VLAN_N_VID)
1477 vids[num++] = cpu_to_le16(i);
0fc16ebf 1478
435452aa 1479 status = be_cmd_vlan_config(adapter, adapter->if_handle, vids, num, 0);
0fc16ebf 1480 if (status) {
f66b7cfd 1481 dev_err(dev, "Setting HW VLAN filtering failed\n");
d9d604f8 1482 /* Set to VLAN promisc mode as setting VLAN filter failed */
77be8c1c
KA
1483 if (addl_status(status) == MCC_ADDL_STATUS_INSUFFICIENT_VLANS ||
1484 addl_status(status) ==
4c60005f 1485 MCC_ADDL_STATUS_INSUFFICIENT_RESOURCES)
f66b7cfd 1486 return be_set_vlan_promisc(adapter);
6b7c5b94 1487 }
0fc16ebf 1488 return status;
6b7c5b94
SP
1489}
1490
80d5c368 1491static int be_vlan_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
6b7c5b94
SP
1492{
1493 struct be_adapter *adapter = netdev_priv(netdev);
80817cbf 1494 int status = 0;
6b7c5b94 1495
b7172414
SP
1496 mutex_lock(&adapter->rx_filter_lock);
1497
a85e9986
PR
1498 /* Packets with VID 0 are always received by Lancer by default */
1499 if (lancer_chip(adapter) && vid == 0)
b7172414 1500 goto done;
48291c22 1501
f6cbd364 1502 if (test_bit(vid, adapter->vids))
b7172414 1503 goto done;
a85e9986 1504
f6cbd364 1505 set_bit(vid, adapter->vids);
a6b74e01 1506 adapter->vlans_added++;
8e586137 1507
b7172414
SP
1508 status = be_vid_config(adapter);
1509done:
1510 mutex_unlock(&adapter->rx_filter_lock);
1511 return status;
6b7c5b94
SP
1512}
1513
80d5c368 1514static int be_vlan_rem_vid(struct net_device *netdev, __be16 proto, u16 vid)
6b7c5b94
SP
1515{
1516 struct be_adapter *adapter = netdev_priv(netdev);
b7172414
SP
1517 int status = 0;
1518
1519 mutex_lock(&adapter->rx_filter_lock);
6b7c5b94 1520
a85e9986
PR
1521 /* Packets with VID 0 are always received by Lancer by default */
1522 if (lancer_chip(adapter) && vid == 0)
b7172414 1523 goto done;
a85e9986 1524
41dcdfbd 1525 if (!test_bit(vid, adapter->vids))
b7172414 1526 goto done;
41dcdfbd 1527
f6cbd364 1528 clear_bit(vid, adapter->vids);
9d4dfe4a
KA
1529 adapter->vlans_added--;
1530
b7172414
SP
1531 status = be_vid_config(adapter);
1532done:
1533 mutex_unlock(&adapter->rx_filter_lock);
1534 return status;
6b7c5b94
SP
1535}
1536
f66b7cfd
SP
1537static void be_set_all_promisc(struct be_adapter *adapter)
1538{
1539 be_cmd_rx_filter(adapter, BE_IF_FLAGS_ALL_PROMISCUOUS, ON);
1540 adapter->if_flags |= BE_IF_FLAGS_ALL_PROMISCUOUS;
1541}
1542
1543static void be_set_mc_promisc(struct be_adapter *adapter)
6b7c5b94 1544{
0fc16ebf 1545 int status;
6b7c5b94 1546
f66b7cfd
SP
1547 if (adapter->if_flags & BE_IF_FLAGS_MCAST_PROMISCUOUS)
1548 return;
6b7c5b94 1549
f66b7cfd
SP
1550 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_MCAST_PROMISCUOUS, ON);
1551 if (!status)
1552 adapter->if_flags |= BE_IF_FLAGS_MCAST_PROMISCUOUS;
1553}
1554
92fbb1df 1555static void be_set_uc_promisc(struct be_adapter *adapter)
f66b7cfd
SP
1556{
1557 int status;
1558
92fbb1df
SB
1559 if (adapter->if_flags & BE_IF_FLAGS_PROMISCUOUS)
1560 return;
1561
1562 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_PROMISCUOUS, ON);
f66b7cfd 1563 if (!status)
92fbb1df
SB
1564 adapter->if_flags |= BE_IF_FLAGS_PROMISCUOUS;
1565}
1566
1567static void be_clear_uc_promisc(struct be_adapter *adapter)
1568{
1569 int status;
1570
1571 if (!(adapter->if_flags & BE_IF_FLAGS_PROMISCUOUS))
1572 return;
1573
1574 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_PROMISCUOUS, OFF);
1575 if (!status)
1576 adapter->if_flags &= ~BE_IF_FLAGS_PROMISCUOUS;
1577}
1578
1579/* The below 2 functions are the callback args for __dev_mc_sync/dev_uc_sync().
1580 * We use a single callback function for both sync and unsync. We really don't
1581 * add/remove addresses through this callback. But, we use it to detect changes
1582 * to the uc/mc lists. The entire uc/mc list is programmed in be_set_rx_mode().
1583 */
1584static int be_uc_list_update(struct net_device *netdev,
1585 const unsigned char *addr)
1586{
1587 struct be_adapter *adapter = netdev_priv(netdev);
1588
1589 adapter->update_uc_list = true;
1590 return 0;
1591}
1592
1593static int be_mc_list_update(struct net_device *netdev,
1594 const unsigned char *addr)
1595{
1596 struct be_adapter *adapter = netdev_priv(netdev);
1597
1598 adapter->update_mc_list = true;
1599 return 0;
1600}
1601
1602static void be_set_mc_list(struct be_adapter *adapter)
1603{
1604 struct net_device *netdev = adapter->netdev;
b7172414 1605 struct netdev_hw_addr *ha;
92fbb1df
SB
1606 bool mc_promisc = false;
1607 int status;
1608
b7172414 1609 netif_addr_lock_bh(netdev);
92fbb1df
SB
1610 __dev_mc_sync(netdev, be_mc_list_update, be_mc_list_update);
1611
1612 if (netdev->flags & IFF_PROMISC) {
1613 adapter->update_mc_list = false;
1614 } else if (netdev->flags & IFF_ALLMULTI ||
1615 netdev_mc_count(netdev) > be_max_mc(adapter)) {
1616 /* Enable multicast promisc if num configured exceeds
1617 * what we support
1618 */
1619 mc_promisc = true;
1620 adapter->update_mc_list = false;
1621 } else if (adapter->if_flags & BE_IF_FLAGS_MCAST_PROMISCUOUS) {
1622 /* Update mc-list unconditionally if the iface was previously
1623 * in mc-promisc mode and now is out of that mode.
1624 */
1625 adapter->update_mc_list = true;
1626 }
1627
b7172414
SP
1628 if (adapter->update_mc_list) {
1629 int i = 0;
1630
1631 /* cache the mc-list in adapter */
1632 netdev_for_each_mc_addr(ha, netdev) {
1633 ether_addr_copy(adapter->mc_list[i].mac, ha->addr);
1634 i++;
1635 }
1636 adapter->mc_count = netdev_mc_count(netdev);
1637 }
1638 netif_addr_unlock_bh(netdev);
1639
92fbb1df 1640 if (mc_promisc) {
f66b7cfd 1641 be_set_mc_promisc(adapter);
92fbb1df
SB
1642 } else if (adapter->update_mc_list) {
1643 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_MULTICAST, ON);
1644 if (!status)
1645 adapter->if_flags &= ~BE_IF_FLAGS_MCAST_PROMISCUOUS;
1646 else
1647 be_set_mc_promisc(adapter);
1648
1649 adapter->update_mc_list = false;
1650 }
1651}
1652
1653static void be_clear_mc_list(struct be_adapter *adapter)
1654{
1655 struct net_device *netdev = adapter->netdev;
1656
1657 __dev_mc_unsync(netdev, NULL);
1658 be_cmd_rx_filter(adapter, BE_IF_FLAGS_MULTICAST, OFF);
b7172414 1659 adapter->mc_count = 0;
f66b7cfd
SP
1660}
1661
988d44b1
SR
1662static int be_uc_mac_add(struct be_adapter *adapter, int uc_idx)
1663{
1d0f110a 1664 if (ether_addr_equal(adapter->uc_list[uc_idx].mac, adapter->dev_mac)) {
988d44b1
SR
1665 adapter->pmac_id[uc_idx + 1] = adapter->pmac_id[0];
1666 return 0;
1667 }
1668
1d0f110a 1669 return be_cmd_pmac_add(adapter, adapter->uc_list[uc_idx].mac,
988d44b1
SR
1670 adapter->if_handle,
1671 &adapter->pmac_id[uc_idx + 1], 0);
1672}
1673
1674static void be_uc_mac_del(struct be_adapter *adapter, int pmac_id)
1675{
1676 if (pmac_id == adapter->pmac_id[0])
1677 return;
1678
1679 be_cmd_pmac_del(adapter, adapter->if_handle, pmac_id, 0);
1680}
1681
f66b7cfd
SP
1682static void be_set_uc_list(struct be_adapter *adapter)
1683{
92fbb1df 1684 struct net_device *netdev = adapter->netdev;
f66b7cfd 1685 struct netdev_hw_addr *ha;
92fbb1df 1686 bool uc_promisc = false;
b7172414 1687 int curr_uc_macs = 0, i;
f66b7cfd 1688
b7172414 1689 netif_addr_lock_bh(netdev);
92fbb1df 1690 __dev_uc_sync(netdev, be_uc_list_update, be_uc_list_update);
f66b7cfd 1691
92fbb1df
SB
1692 if (netdev->flags & IFF_PROMISC) {
1693 adapter->update_uc_list = false;
1694 } else if (netdev_uc_count(netdev) > (be_max_uc(adapter) - 1)) {
1695 uc_promisc = true;
1696 adapter->update_uc_list = false;
1697 } else if (adapter->if_flags & BE_IF_FLAGS_PROMISCUOUS) {
1698 /* Update uc-list unconditionally if the iface was previously
1699 * in uc-promisc mode and now is out of that mode.
1700 */
1701 adapter->update_uc_list = true;
6b7c5b94
SP
1702 }
1703
b7172414 1704 if (adapter->update_uc_list) {
b7172414 1705 /* cache the uc-list in adapter array */
6052cd1a 1706 i = 0;
b7172414
SP
1707 netdev_for_each_uc_addr(ha, netdev) {
1708 ether_addr_copy(adapter->uc_list[i].mac, ha->addr);
1709 i++;
1710 }
1711 curr_uc_macs = netdev_uc_count(netdev);
1712 }
1713 netif_addr_unlock_bh(netdev);
1714
92fbb1df
SB
1715 if (uc_promisc) {
1716 be_set_uc_promisc(adapter);
1717 } else if (adapter->update_uc_list) {
1718 be_clear_uc_promisc(adapter);
1719
b7172414 1720 for (i = 0; i < adapter->uc_macs; i++)
988d44b1 1721 be_uc_mac_del(adapter, adapter->pmac_id[i + 1]);
92fbb1df 1722
b7172414 1723 for (i = 0; i < curr_uc_macs; i++)
988d44b1 1724 be_uc_mac_add(adapter, i);
b7172414 1725 adapter->uc_macs = curr_uc_macs;
92fbb1df 1726 adapter->update_uc_list = false;
f66b7cfd
SP
1727 }
1728}
6b7c5b94 1729
f66b7cfd
SP
1730static void be_clear_uc_list(struct be_adapter *adapter)
1731{
92fbb1df 1732 struct net_device *netdev = adapter->netdev;
f66b7cfd 1733 int i;
fbc13f01 1734
92fbb1df 1735 __dev_uc_unsync(netdev, NULL);
b7172414 1736 for (i = 0; i < adapter->uc_macs; i++)
988d44b1
SR
1737 be_uc_mac_del(adapter, adapter->pmac_id[i + 1]);
1738
f66b7cfd
SP
1739 adapter->uc_macs = 0;
1740}
fbc13f01 1741
b7172414 1742static void __be_set_rx_mode(struct be_adapter *adapter)
f66b7cfd 1743{
b7172414
SP
1744 struct net_device *netdev = adapter->netdev;
1745
1746 mutex_lock(&adapter->rx_filter_lock);
fbc13f01 1747
f66b7cfd 1748 if (netdev->flags & IFF_PROMISC) {
92fbb1df
SB
1749 if (!be_in_all_promisc(adapter))
1750 be_set_all_promisc(adapter);
1751 } else if (be_in_all_promisc(adapter)) {
1752 /* We need to re-program the vlan-list or clear
1753 * vlan-promisc mode (if needed) when the interface
1754 * comes out of promisc mode.
1755 */
1756 be_vid_config(adapter);
f66b7cfd 1757 }
a0794885 1758
92fbb1df 1759 be_set_uc_list(adapter);
f66b7cfd 1760 be_set_mc_list(adapter);
b7172414
SP
1761
1762 mutex_unlock(&adapter->rx_filter_lock);
1763}
1764
1765static void be_work_set_rx_mode(struct work_struct *work)
1766{
1767 struct be_cmd_work *cmd_work =
1768 container_of(work, struct be_cmd_work, work);
1769
1770 __be_set_rx_mode(cmd_work->adapter);
1771 kfree(cmd_work);
6b7c5b94
SP
1772}
1773
ba343c77
SB
1774static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
1775{
1776 struct be_adapter *adapter = netdev_priv(netdev);
11ac75ed 1777 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
ba343c77
SB
1778 int status;
1779
11ac75ed 1780 if (!sriov_enabled(adapter))
ba343c77
SB
1781 return -EPERM;
1782
11ac75ed 1783 if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
ba343c77
SB
1784 return -EINVAL;
1785
3c31aaf3
VV
1786 /* Proceed further only if user provided MAC is different
1787 * from active MAC
1788 */
1789 if (ether_addr_equal(mac, vf_cfg->mac_addr))
1790 return 0;
1791
3175d8c2
SP
1792 if (BEx_chip(adapter)) {
1793 be_cmd_pmac_del(adapter, vf_cfg->if_handle, vf_cfg->pmac_id,
1794 vf + 1);
ba343c77 1795
11ac75ed
SP
1796 status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
1797 &vf_cfg->pmac_id, vf + 1);
3175d8c2
SP
1798 } else {
1799 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
1800 vf + 1);
590c391d
PR
1801 }
1802
abccf23e
KA
1803 if (status) {
1804 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed: %#x",
1805 mac, vf, status);
1806 return be_cmd_status(status);
1807 }
64600ea5 1808
abccf23e
KA
1809 ether_addr_copy(vf_cfg->mac_addr, mac);
1810
1811 return 0;
ba343c77
SB
1812}
1813
64600ea5 1814static int be_get_vf_config(struct net_device *netdev, int vf,
748b539a 1815 struct ifla_vf_info *vi)
64600ea5
AK
1816{
1817 struct be_adapter *adapter = netdev_priv(netdev);
11ac75ed 1818 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
64600ea5 1819
11ac75ed 1820 if (!sriov_enabled(adapter))
64600ea5
AK
1821 return -EPERM;
1822
11ac75ed 1823 if (vf >= adapter->num_vfs)
64600ea5
AK
1824 return -EINVAL;
1825
1826 vi->vf = vf;
ed616689
SC
1827 vi->max_tx_rate = vf_cfg->tx_rate;
1828 vi->min_tx_rate = 0;
a60b3a13
AK
1829 vi->vlan = vf_cfg->vlan_tag & VLAN_VID_MASK;
1830 vi->qos = vf_cfg->vlan_tag >> VLAN_PRIO_SHIFT;
11ac75ed 1831 memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
bdce2ad7 1832 vi->linkstate = adapter->vf_cfg[vf].plink_tracking;
e7bcbd7b 1833 vi->spoofchk = adapter->vf_cfg[vf].spoofchk;
64600ea5
AK
1834
1835 return 0;
1836}
1837
435452aa
VV
1838static int be_set_vf_tvt(struct be_adapter *adapter, int vf, u16 vlan)
1839{
1840 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1841 u16 vids[BE_NUM_VLANS_SUPPORTED];
1842 int vf_if_id = vf_cfg->if_handle;
1843 int status;
1844
1845 /* Enable Transparent VLAN Tagging */
e7bcbd7b 1846 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1, vf_if_id, 0, 0);
435452aa
VV
1847 if (status)
1848 return status;
1849
1850 /* Clear pre-programmed VLAN filters on VF if any, if TVT is enabled */
1851 vids[0] = 0;
1852 status = be_cmd_vlan_config(adapter, vf_if_id, vids, 1, vf + 1);
1853 if (!status)
1854 dev_info(&adapter->pdev->dev,
1855 "Cleared guest VLANs on VF%d", vf);
1856
1857 /* After TVT is enabled, disallow VFs to program VLAN filters */
1858 if (vf_cfg->privileges & BE_PRIV_FILTMGMT) {
1859 status = be_cmd_set_fn_privileges(adapter, vf_cfg->privileges &
1860 ~BE_PRIV_FILTMGMT, vf + 1);
1861 if (!status)
1862 vf_cfg->privileges &= ~BE_PRIV_FILTMGMT;
1863 }
1864 return 0;
1865}
1866
1867static int be_clear_vf_tvt(struct be_adapter *adapter, int vf)
1868{
1869 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1870 struct device *dev = &adapter->pdev->dev;
1871 int status;
1872
1873 /* Reset Transparent VLAN Tagging. */
1874 status = be_cmd_set_hsw_config(adapter, BE_RESET_VLAN_TAG_ID, vf + 1,
e7bcbd7b 1875 vf_cfg->if_handle, 0, 0);
435452aa
VV
1876 if (status)
1877 return status;
1878
1879 /* Allow VFs to program VLAN filtering */
1880 if (!(vf_cfg->privileges & BE_PRIV_FILTMGMT)) {
1881 status = be_cmd_set_fn_privileges(adapter, vf_cfg->privileges |
1882 BE_PRIV_FILTMGMT, vf + 1);
1883 if (!status) {
1884 vf_cfg->privileges |= BE_PRIV_FILTMGMT;
1885 dev_info(dev, "VF%d: FILTMGMT priv enabled", vf);
1886 }
1887 }
1888
1889 dev_info(dev,
1890 "Disable/re-enable i/f in VM to clear Transparent VLAN tag");
1891 return 0;
1892}
1893
79aab093
MS
1894static int be_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos,
1895 __be16 vlan_proto)
1da87b7f
AK
1896{
1897 struct be_adapter *adapter = netdev_priv(netdev);
b9fc0e53 1898 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
435452aa 1899 int status;
1da87b7f 1900
11ac75ed 1901 if (!sriov_enabled(adapter))
1da87b7f
AK
1902 return -EPERM;
1903
b9fc0e53 1904 if (vf >= adapter->num_vfs || vlan > 4095 || qos > 7)
1da87b7f
AK
1905 return -EINVAL;
1906
79aab093
MS
1907 if (vlan_proto != htons(ETH_P_8021Q))
1908 return -EPROTONOSUPPORT;
1909
b9fc0e53
AK
1910 if (vlan || qos) {
1911 vlan |= qos << VLAN_PRIO_SHIFT;
435452aa 1912 status = be_set_vf_tvt(adapter, vf, vlan);
1da87b7f 1913 } else {
435452aa 1914 status = be_clear_vf_tvt(adapter, vf);
1da87b7f
AK
1915 }
1916
abccf23e
KA
1917 if (status) {
1918 dev_err(&adapter->pdev->dev,
435452aa
VV
1919 "VLAN %d config on VF %d failed : %#x\n", vlan, vf,
1920 status);
abccf23e
KA
1921 return be_cmd_status(status);
1922 }
1923
1924 vf_cfg->vlan_tag = vlan;
abccf23e 1925 return 0;
1da87b7f
AK
1926}
1927
ed616689
SC
1928static int be_set_vf_tx_rate(struct net_device *netdev, int vf,
1929 int min_tx_rate, int max_tx_rate)
e1d18735
AK
1930{
1931 struct be_adapter *adapter = netdev_priv(netdev);
0f77ba73
RN
1932 struct device *dev = &adapter->pdev->dev;
1933 int percent_rate, status = 0;
1934 u16 link_speed = 0;
1935 u8 link_status;
e1d18735 1936
11ac75ed 1937 if (!sriov_enabled(adapter))
e1d18735
AK
1938 return -EPERM;
1939
94f434c2 1940 if (vf >= adapter->num_vfs)
e1d18735
AK
1941 return -EINVAL;
1942
ed616689
SC
1943 if (min_tx_rate)
1944 return -EINVAL;
1945
0f77ba73
RN
1946 if (!max_tx_rate)
1947 goto config_qos;
1948
1949 status = be_cmd_link_status_query(adapter, &link_speed,
1950 &link_status, 0);
1951 if (status)
1952 goto err;
1953
1954 if (!link_status) {
1955 dev_err(dev, "TX-rate setting not allowed when link is down\n");
940a3fcd 1956 status = -ENETDOWN;
0f77ba73
RN
1957 goto err;
1958 }
1959
1960 if (max_tx_rate < 100 || max_tx_rate > link_speed) {
1961 dev_err(dev, "TX-rate must be between 100 and %d Mbps\n",
1962 link_speed);
1963 status = -EINVAL;
1964 goto err;
1965 }
1966
1967 /* On Skyhawk the QOS setting must be done only as a % value */
1968 percent_rate = link_speed / 100;
1969 if (skyhawk_chip(adapter) && (max_tx_rate % percent_rate)) {
1970 dev_err(dev, "TX-rate must be a multiple of %d Mbps\n",
1971 percent_rate);
1972 status = -EINVAL;
1973 goto err;
94f434c2 1974 }
e1d18735 1975
0f77ba73
RN
1976config_qos:
1977 status = be_cmd_config_qos(adapter, max_tx_rate, link_speed, vf + 1);
e1d18735 1978 if (status)
0f77ba73
RN
1979 goto err;
1980
1981 adapter->vf_cfg[vf].tx_rate = max_tx_rate;
1982 return 0;
1983
1984err:
1985 dev_err(dev, "TX-rate setting of %dMbps on VF%d failed\n",
1986 max_tx_rate, vf);
abccf23e 1987 return be_cmd_status(status);
e1d18735 1988}
e2fb1afa 1989
bdce2ad7
SR
1990static int be_set_vf_link_state(struct net_device *netdev, int vf,
1991 int link_state)
1992{
1993 struct be_adapter *adapter = netdev_priv(netdev);
1994 int status;
1995
1996 if (!sriov_enabled(adapter))
1997 return -EPERM;
1998
1999 if (vf >= adapter->num_vfs)
2000 return -EINVAL;
2001
2002 status = be_cmd_set_logical_link_config(adapter, link_state, vf+1);
abccf23e
KA
2003 if (status) {
2004 dev_err(&adapter->pdev->dev,
2005 "Link state change on VF %d failed: %#x\n", vf, status);
2006 return be_cmd_status(status);
2007 }
bdce2ad7 2008
abccf23e
KA
2009 adapter->vf_cfg[vf].plink_tracking = link_state;
2010
2011 return 0;
bdce2ad7 2012}
e1d18735 2013
e7bcbd7b
KA
2014static int be_set_vf_spoofchk(struct net_device *netdev, int vf, bool enable)
2015{
2016 struct be_adapter *adapter = netdev_priv(netdev);
2017 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
2018 u8 spoofchk;
2019 int status;
2020
2021 if (!sriov_enabled(adapter))
2022 return -EPERM;
2023
2024 if (vf >= adapter->num_vfs)
2025 return -EINVAL;
2026
2027 if (BEx_chip(adapter))
2028 return -EOPNOTSUPP;
2029
2030 if (enable == vf_cfg->spoofchk)
2031 return 0;
2032
2033 spoofchk = enable ? ENABLE_MAC_SPOOFCHK : DISABLE_MAC_SPOOFCHK;
2034
2035 status = be_cmd_set_hsw_config(adapter, 0, vf + 1, vf_cfg->if_handle,
2036 0, spoofchk);
2037 if (status) {
2038 dev_err(&adapter->pdev->dev,
2039 "Spoofchk change on VF %d failed: %#x\n", vf, status);
2040 return be_cmd_status(status);
2041 }
2042
2043 vf_cfg->spoofchk = enable;
2044 return 0;
2045}
2046
2632bafd
SP
2047static void be_aic_update(struct be_aic_obj *aic, u64 rx_pkts, u64 tx_pkts,
2048 ulong now)
6b7c5b94 2049{
2632bafd
SP
2050 aic->rx_pkts_prev = rx_pkts;
2051 aic->tx_reqs_prev = tx_pkts;
2052 aic->jiffies = now;
2053}
ac124ff9 2054
20947770 2055static int be_get_new_eqd(struct be_eq_obj *eqo)
2632bafd 2056{
20947770
PR
2057 struct be_adapter *adapter = eqo->adapter;
2058 int eqd, start;
2632bafd 2059 struct be_aic_obj *aic;
2632bafd
SP
2060 struct be_rx_obj *rxo;
2061 struct be_tx_obj *txo;
20947770 2062 u64 rx_pkts = 0, tx_pkts = 0;
2632bafd
SP
2063 ulong now;
2064 u32 pps, delta;
20947770 2065 int i;
10ef9ab4 2066
20947770
PR
2067 aic = &adapter->aic_obj[eqo->idx];
2068 if (!aic->enable) {
2069 if (aic->jiffies)
2070 aic->jiffies = 0;
2071 eqd = aic->et_eqd;
2072 return eqd;
2073 }
6b7c5b94 2074
20947770 2075 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
2632bafd 2076 do {
57a7744e 2077 start = u64_stats_fetch_begin_irq(&rxo->stats.sync);
20947770 2078 rx_pkts += rxo->stats.rx_pkts;
57a7744e 2079 } while (u64_stats_fetch_retry_irq(&rxo->stats.sync, start));
20947770 2080 }
10ef9ab4 2081
20947770 2082 for_all_tx_queues_on_eq(adapter, eqo, txo, i) {
2632bafd 2083 do {
57a7744e 2084 start = u64_stats_fetch_begin_irq(&txo->stats.sync);
20947770 2085 tx_pkts += txo->stats.tx_reqs;
57a7744e 2086 } while (u64_stats_fetch_retry_irq(&txo->stats.sync, start));
20947770 2087 }
6b7c5b94 2088
20947770
PR
2089 /* Skip, if wrapped around or first calculation */
2090 now = jiffies;
2091 if (!aic->jiffies || time_before(now, aic->jiffies) ||
2092 rx_pkts < aic->rx_pkts_prev ||
2093 tx_pkts < aic->tx_reqs_prev) {
2094 be_aic_update(aic, rx_pkts, tx_pkts, now);
2095 return aic->prev_eqd;
2096 }
2632bafd 2097
20947770
PR
2098 delta = jiffies_to_msecs(now - aic->jiffies);
2099 if (delta == 0)
2100 return aic->prev_eqd;
10ef9ab4 2101
20947770
PR
2102 pps = (((u32)(rx_pkts - aic->rx_pkts_prev) * 1000) / delta) +
2103 (((u32)(tx_pkts - aic->tx_reqs_prev) * 1000) / delta);
2104 eqd = (pps / 15000) << 2;
2632bafd 2105
20947770
PR
2106 if (eqd < 8)
2107 eqd = 0;
2108 eqd = min_t(u32, eqd, aic->max_eqd);
2109 eqd = max_t(u32, eqd, aic->min_eqd);
2110
2111 be_aic_update(aic, rx_pkts, tx_pkts, now);
2112
2113 return eqd;
2114}
2115
2116/* For Skyhawk-R only */
2117static u32 be_get_eq_delay_mult_enc(struct be_eq_obj *eqo)
2118{
2119 struct be_adapter *adapter = eqo->adapter;
2120 struct be_aic_obj *aic = &adapter->aic_obj[eqo->idx];
2121 ulong now = jiffies;
2122 int eqd;
2123 u32 mult_enc;
2124
2125 if (!aic->enable)
2126 return 0;
2127
3c0d49aa 2128 if (jiffies_to_msecs(now - aic->jiffies) < 1)
20947770
PR
2129 eqd = aic->prev_eqd;
2130 else
2131 eqd = be_get_new_eqd(eqo);
2132
2133 if (eqd > 100)
2134 mult_enc = R2I_DLY_ENC_1;
2135 else if (eqd > 60)
2136 mult_enc = R2I_DLY_ENC_2;
2137 else if (eqd > 20)
2138 mult_enc = R2I_DLY_ENC_3;
2139 else
2140 mult_enc = R2I_DLY_ENC_0;
2141
2142 aic->prev_eqd = eqd;
2143
2144 return mult_enc;
2145}
2146
2147void be_eqd_update(struct be_adapter *adapter, bool force_update)
2148{
2149 struct be_set_eqd set_eqd[MAX_EVT_QS];
2150 struct be_aic_obj *aic;
2151 struct be_eq_obj *eqo;
2152 int i, num = 0, eqd;
2153
2154 for_all_evt_queues(adapter, eqo, i) {
2155 aic = &adapter->aic_obj[eqo->idx];
2156 eqd = be_get_new_eqd(eqo);
2157 if (force_update || eqd != aic->prev_eqd) {
2632bafd
SP
2158 set_eqd[num].delay_multiplier = (eqd * 65)/100;
2159 set_eqd[num].eq_id = eqo->q.id;
2160 aic->prev_eqd = eqd;
2161 num++;
2162 }
ac124ff9 2163 }
2632bafd
SP
2164
2165 if (num)
2166 be_cmd_modify_eqd(adapter, set_eqd, num);
6b7c5b94
SP
2167}
2168
3abcdeda 2169static void be_rx_stats_update(struct be_rx_obj *rxo,
748b539a 2170 struct be_rx_compl_info *rxcp)
4097f663 2171{
ac124ff9 2172 struct be_rx_stats *stats = rx_stats(rxo);
1ef78abe 2173
ab1594e9 2174 u64_stats_update_begin(&stats->sync);
3abcdeda 2175 stats->rx_compl++;
2e588f84 2176 stats->rx_bytes += rxcp->pkt_size;
3abcdeda 2177 stats->rx_pkts++;
8670f2a5
SB
2178 if (rxcp->tunneled)
2179 stats->rx_vxlan_offload_pkts++;
2e588f84 2180 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
3abcdeda 2181 stats->rx_mcast_pkts++;
2e588f84 2182 if (rxcp->err)
ac124ff9 2183 stats->rx_compl_err++;
ab1594e9 2184 u64_stats_update_end(&stats->sync);
4097f663
SP
2185}
2186
2e588f84 2187static inline bool csum_passed(struct be_rx_compl_info *rxcp)
728a9972 2188{
19fad86f 2189 /* L4 checksum is not reliable for non TCP/UDP packets.
c9c47142
SP
2190 * Also ignore ipcksm for ipv6 pkts
2191 */
2e588f84 2192 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
c9c47142 2193 (rxcp->ip_csum || rxcp->ipv6) && !rxcp->err;
728a9972
AK
2194}
2195
0b0ef1d0 2196static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo)
6b7c5b94 2197{
10ef9ab4 2198 struct be_adapter *adapter = rxo->adapter;
6b7c5b94 2199 struct be_rx_page_info *rx_page_info;
3abcdeda 2200 struct be_queue_info *rxq = &rxo->q;
b0fd2eb2 2201 u32 frag_idx = rxq->tail;
6b7c5b94 2202
3abcdeda 2203 rx_page_info = &rxo->page_info_tbl[frag_idx];
6b7c5b94
SP
2204 BUG_ON(!rx_page_info->page);
2205
e50287be 2206 if (rx_page_info->last_frag) {
2b7bcebf
IV
2207 dma_unmap_page(&adapter->pdev->dev,
2208 dma_unmap_addr(rx_page_info, bus),
2209 adapter->big_page_size, DMA_FROM_DEVICE);
e50287be
SP
2210 rx_page_info->last_frag = false;
2211 } else {
2212 dma_sync_single_for_cpu(&adapter->pdev->dev,
2213 dma_unmap_addr(rx_page_info, bus),
2214 rx_frag_size, DMA_FROM_DEVICE);
205859a2 2215 }
6b7c5b94 2216
0b0ef1d0 2217 queue_tail_inc(rxq);
6b7c5b94
SP
2218 atomic_dec(&rxq->used);
2219 return rx_page_info;
2220}
2221
2222/* Throwaway the data in the Rx completion */
10ef9ab4
SP
2223static void be_rx_compl_discard(struct be_rx_obj *rxo,
2224 struct be_rx_compl_info *rxcp)
6b7c5b94 2225{
6b7c5b94 2226 struct be_rx_page_info *page_info;
2e588f84 2227 u16 i, num_rcvd = rxcp->num_rcvd;
6b7c5b94 2228
e80d9da6 2229 for (i = 0; i < num_rcvd; i++) {
0b0ef1d0 2230 page_info = get_rx_page_info(rxo);
e80d9da6
PR
2231 put_page(page_info->page);
2232 memset(page_info, 0, sizeof(*page_info));
6b7c5b94
SP
2233 }
2234}
2235
2236/*
2237 * skb_fill_rx_data forms a complete skb for an ether frame
2238 * indicated by rxcp.
2239 */
10ef9ab4
SP
2240static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
2241 struct be_rx_compl_info *rxcp)
6b7c5b94 2242{
6b7c5b94 2243 struct be_rx_page_info *page_info;
2e588f84
SP
2244 u16 i, j;
2245 u16 hdr_len, curr_frag_len, remaining;
6b7c5b94 2246 u8 *start;
6b7c5b94 2247
0b0ef1d0 2248 page_info = get_rx_page_info(rxo);
6b7c5b94
SP
2249 start = page_address(page_info->page) + page_info->page_offset;
2250 prefetch(start);
2251
2252 /* Copy data in the first descriptor of this completion */
2e588f84 2253 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
6b7c5b94 2254
6b7c5b94
SP
2255 skb->len = curr_frag_len;
2256 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
ac1ae5f3 2257 memcpy(skb->data, start, curr_frag_len);
6b7c5b94
SP
2258 /* Complete packet has now been moved to data */
2259 put_page(page_info->page);
2260 skb->data_len = 0;
2261 skb->tail += curr_frag_len;
2262 } else {
ac1ae5f3
ED
2263 hdr_len = ETH_HLEN;
2264 memcpy(skb->data, start, hdr_len);
6b7c5b94 2265 skb_shinfo(skb)->nr_frags = 1;
b061b39e 2266 skb_frag_set_page(skb, 0, page_info->page);
6b7c5b94
SP
2267 skb_shinfo(skb)->frags[0].page_offset =
2268 page_info->page_offset + hdr_len;
748b539a
SP
2269 skb_frag_size_set(&skb_shinfo(skb)->frags[0],
2270 curr_frag_len - hdr_len);
6b7c5b94 2271 skb->data_len = curr_frag_len - hdr_len;
bdb28a97 2272 skb->truesize += rx_frag_size;
6b7c5b94
SP
2273 skb->tail += hdr_len;
2274 }
205859a2 2275 page_info->page = NULL;
6b7c5b94 2276
2e588f84
SP
2277 if (rxcp->pkt_size <= rx_frag_size) {
2278 BUG_ON(rxcp->num_rcvd != 1);
2279 return;
6b7c5b94
SP
2280 }
2281
2282 /* More frags present for this completion */
2e588f84
SP
2283 remaining = rxcp->pkt_size - curr_frag_len;
2284 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
0b0ef1d0 2285 page_info = get_rx_page_info(rxo);
2e588f84 2286 curr_frag_len = min(remaining, rx_frag_size);
6b7c5b94 2287
bd46cb6c
AK
2288 /* Coalesce all frags from the same physical page in one slot */
2289 if (page_info->page_offset == 0) {
2290 /* Fresh page */
2291 j++;
b061b39e 2292 skb_frag_set_page(skb, j, page_info->page);
bd46cb6c
AK
2293 skb_shinfo(skb)->frags[j].page_offset =
2294 page_info->page_offset;
9e903e08 2295 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
bd46cb6c
AK
2296 skb_shinfo(skb)->nr_frags++;
2297 } else {
2298 put_page(page_info->page);
2299 }
2300
9e903e08 2301 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
6b7c5b94
SP
2302 skb->len += curr_frag_len;
2303 skb->data_len += curr_frag_len;
bdb28a97 2304 skb->truesize += rx_frag_size;
2e588f84 2305 remaining -= curr_frag_len;
205859a2 2306 page_info->page = NULL;
6b7c5b94 2307 }
bd46cb6c 2308 BUG_ON(j > MAX_SKB_FRAGS);
6b7c5b94
SP
2309}
2310
5be93b9a 2311/* Process the RX completion indicated by rxcp when GRO is disabled */
6384a4d0 2312static void be_rx_compl_process(struct be_rx_obj *rxo, struct napi_struct *napi,
10ef9ab4 2313 struct be_rx_compl_info *rxcp)
6b7c5b94 2314{
10ef9ab4 2315 struct be_adapter *adapter = rxo->adapter;
6332c8d3 2316 struct net_device *netdev = adapter->netdev;
6b7c5b94 2317 struct sk_buff *skb;
89420424 2318
bb349bb4 2319 skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE);
a058a632 2320 if (unlikely(!skb)) {
ac124ff9 2321 rx_stats(rxo)->rx_drops_no_skbs++;
10ef9ab4 2322 be_rx_compl_discard(rxo, rxcp);
6b7c5b94
SP
2323 return;
2324 }
2325
10ef9ab4 2326 skb_fill_rx_data(rxo, skb, rxcp);
6b7c5b94 2327
6332c8d3 2328 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
728a9972 2329 skb->ip_summed = CHECKSUM_UNNECESSARY;
c6ce2f4b
SK
2330 else
2331 skb_checksum_none_assert(skb);
6b7c5b94 2332
6332c8d3 2333 skb->protocol = eth_type_trans(skb, netdev);
aaa6daec 2334 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
10ef9ab4 2335 if (netdev->features & NETIF_F_RXHASH)
d2464c8c 2336 skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
c9c47142 2337
b6c0e89d 2338 skb->csum_level = rxcp->tunneled;
6384a4d0 2339 skb_mark_napi_id(skb, napi);
6b7c5b94 2340
343e43c0 2341 if (rxcp->vlanf)
86a9bad3 2342 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
4c5102f9
AK
2343
2344 netif_receive_skb(skb);
6b7c5b94
SP
2345}
2346
5be93b9a 2347/* Process the RX completion indicated by rxcp when GRO is enabled */
4188e7df
JH
2348static void be_rx_compl_process_gro(struct be_rx_obj *rxo,
2349 struct napi_struct *napi,
2350 struct be_rx_compl_info *rxcp)
6b7c5b94 2351{
10ef9ab4 2352 struct be_adapter *adapter = rxo->adapter;
6b7c5b94 2353 struct be_rx_page_info *page_info;
5be93b9a 2354 struct sk_buff *skb = NULL;
2e588f84
SP
2355 u16 remaining, curr_frag_len;
2356 u16 i, j;
3968fa1e 2357
10ef9ab4 2358 skb = napi_get_frags(napi);
5be93b9a 2359 if (!skb) {
10ef9ab4 2360 be_rx_compl_discard(rxo, rxcp);
5be93b9a
AK
2361 return;
2362 }
2363
2e588f84
SP
2364 remaining = rxcp->pkt_size;
2365 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
0b0ef1d0 2366 page_info = get_rx_page_info(rxo);
6b7c5b94
SP
2367
2368 curr_frag_len = min(remaining, rx_frag_size);
2369
bd46cb6c
AK
2370 /* Coalesce all frags from the same physical page in one slot */
2371 if (i == 0 || page_info->page_offset == 0) {
2372 /* First frag or Fresh page */
2373 j++;
b061b39e 2374 skb_frag_set_page(skb, j, page_info->page);
5be93b9a
AK
2375 skb_shinfo(skb)->frags[j].page_offset =
2376 page_info->page_offset;
9e903e08 2377 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
bd46cb6c
AK
2378 } else {
2379 put_page(page_info->page);
2380 }
9e903e08 2381 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
bdb28a97 2382 skb->truesize += rx_frag_size;
bd46cb6c 2383 remaining -= curr_frag_len;
6b7c5b94
SP
2384 memset(page_info, 0, sizeof(*page_info));
2385 }
bd46cb6c 2386 BUG_ON(j > MAX_SKB_FRAGS);
6b7c5b94 2387
5be93b9a 2388 skb_shinfo(skb)->nr_frags = j + 1;
2e588f84
SP
2389 skb->len = rxcp->pkt_size;
2390 skb->data_len = rxcp->pkt_size;
5be93b9a 2391 skb->ip_summed = CHECKSUM_UNNECESSARY;
aaa6daec 2392 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
4b972914 2393 if (adapter->netdev->features & NETIF_F_RXHASH)
d2464c8c 2394 skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
c9c47142 2395
b6c0e89d 2396 skb->csum_level = rxcp->tunneled;
5be93b9a 2397
343e43c0 2398 if (rxcp->vlanf)
86a9bad3 2399 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
4c5102f9 2400
10ef9ab4 2401 napi_gro_frags(napi);
2e588f84
SP
2402}
2403
10ef9ab4
SP
2404static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
2405 struct be_rx_compl_info *rxcp)
2e588f84 2406{
c3c18bc1
SP
2407 rxcp->pkt_size = GET_RX_COMPL_V1_BITS(pktsize, compl);
2408 rxcp->vlanf = GET_RX_COMPL_V1_BITS(vtp, compl);
2409 rxcp->err = GET_RX_COMPL_V1_BITS(err, compl);
2410 rxcp->tcpf = GET_RX_COMPL_V1_BITS(tcpf, compl);
2411 rxcp->udpf = GET_RX_COMPL_V1_BITS(udpf, compl);
2412 rxcp->ip_csum = GET_RX_COMPL_V1_BITS(ipcksm, compl);
2413 rxcp->l4_csum = GET_RX_COMPL_V1_BITS(l4_cksm, compl);
2414 rxcp->ipv6 = GET_RX_COMPL_V1_BITS(ip_version, compl);
2415 rxcp->num_rcvd = GET_RX_COMPL_V1_BITS(numfrags, compl);
2416 rxcp->pkt_type = GET_RX_COMPL_V1_BITS(cast_enc, compl);
2417 rxcp->rss_hash = GET_RX_COMPL_V1_BITS(rsshash, compl);
15d72184 2418 if (rxcp->vlanf) {
c3c18bc1
SP
2419 rxcp->qnq = GET_RX_COMPL_V1_BITS(qnq, compl);
2420 rxcp->vlan_tag = GET_RX_COMPL_V1_BITS(vlan_tag, compl);
15d72184 2421 }
c3c18bc1 2422 rxcp->port = GET_RX_COMPL_V1_BITS(port, compl);
c9c47142 2423 rxcp->tunneled =
c3c18bc1 2424 GET_RX_COMPL_V1_BITS(tunneled, compl);
2e588f84
SP
2425}
2426
10ef9ab4
SP
2427static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
2428 struct be_rx_compl_info *rxcp)
2e588f84 2429{
c3c18bc1
SP
2430 rxcp->pkt_size = GET_RX_COMPL_V0_BITS(pktsize, compl);
2431 rxcp->vlanf = GET_RX_COMPL_V0_BITS(vtp, compl);
2432 rxcp->err = GET_RX_COMPL_V0_BITS(err, compl);
2433 rxcp->tcpf = GET_RX_COMPL_V0_BITS(tcpf, compl);
2434 rxcp->udpf = GET_RX_COMPL_V0_BITS(udpf, compl);
2435 rxcp->ip_csum = GET_RX_COMPL_V0_BITS(ipcksm, compl);
2436 rxcp->l4_csum = GET_RX_COMPL_V0_BITS(l4_cksm, compl);
2437 rxcp->ipv6 = GET_RX_COMPL_V0_BITS(ip_version, compl);
2438 rxcp->num_rcvd = GET_RX_COMPL_V0_BITS(numfrags, compl);
2439 rxcp->pkt_type = GET_RX_COMPL_V0_BITS(cast_enc, compl);
2440 rxcp->rss_hash = GET_RX_COMPL_V0_BITS(rsshash, compl);
15d72184 2441 if (rxcp->vlanf) {
c3c18bc1
SP
2442 rxcp->qnq = GET_RX_COMPL_V0_BITS(qnq, compl);
2443 rxcp->vlan_tag = GET_RX_COMPL_V0_BITS(vlan_tag, compl);
15d72184 2444 }
c3c18bc1
SP
2445 rxcp->port = GET_RX_COMPL_V0_BITS(port, compl);
2446 rxcp->ip_frag = GET_RX_COMPL_V0_BITS(ip_frag, compl);
2e588f84
SP
2447}
2448
2449static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
2450{
2451 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
2452 struct be_rx_compl_info *rxcp = &rxo->rxcp;
2453 struct be_adapter *adapter = rxo->adapter;
6b7c5b94 2454
2e588f84
SP
2455 /* For checking the valid bit it is Ok to use either definition as the
2456 * valid bit is at the same position in both v0 and v1 Rx compl */
2457 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
2458 return NULL;
6b7c5b94 2459
2e588f84
SP
2460 rmb();
2461 be_dws_le_to_cpu(compl, sizeof(*compl));
6b7c5b94 2462
2e588f84 2463 if (adapter->be3_native)
10ef9ab4 2464 be_parse_rx_compl_v1(compl, rxcp);
2e588f84 2465 else
10ef9ab4 2466 be_parse_rx_compl_v0(compl, rxcp);
6b7c5b94 2467
e38b1706
SK
2468 if (rxcp->ip_frag)
2469 rxcp->l4_csum = 0;
2470
15d72184 2471 if (rxcp->vlanf) {
f93f160b
VV
2472 /* In QNQ modes, if qnq bit is not set, then the packet was
2473 * tagged only with the transparent outer vlan-tag and must
2474 * not be treated as a vlan packet by host
2475 */
2476 if (be_is_qnq_mode(adapter) && !rxcp->qnq)
15d72184 2477 rxcp->vlanf = 0;
6b7c5b94 2478
15d72184 2479 if (!lancer_chip(adapter))
3c709f8f 2480 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
6b7c5b94 2481
939cf306 2482 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
f6cbd364 2483 !test_bit(rxcp->vlan_tag, adapter->vids))
15d72184
SP
2484 rxcp->vlanf = 0;
2485 }
2e588f84
SP
2486
2487 /* As the compl has been parsed, reset it; we wont touch it again */
2488 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
6b7c5b94 2489
3abcdeda 2490 queue_tail_inc(&rxo->cq);
6b7c5b94
SP
2491 return rxcp;
2492}
2493
1829b086 2494static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
6b7c5b94 2495{
6b7c5b94 2496 u32 order = get_order(size);
1829b086 2497
6b7c5b94 2498 if (order > 0)
1829b086
ED
2499 gfp |= __GFP_COMP;
2500 return alloc_pages(gfp, order);
6b7c5b94
SP
2501}
2502
2503/*
2504 * Allocate a page, split it to fragments of size rx_frag_size and post as
2505 * receive buffers to BE
2506 */
c30d7266 2507static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp, u32 frags_needed)
6b7c5b94 2508{
3abcdeda 2509 struct be_adapter *adapter = rxo->adapter;
26d92f92 2510 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
3abcdeda 2511 struct be_queue_info *rxq = &rxo->q;
6b7c5b94 2512 struct page *pagep = NULL;
ba42fad0 2513 struct device *dev = &adapter->pdev->dev;
6b7c5b94
SP
2514 struct be_eth_rx_d *rxd;
2515 u64 page_dmaaddr = 0, frag_dmaaddr;
c30d7266 2516 u32 posted, page_offset = 0, notify = 0;
6b7c5b94 2517
3abcdeda 2518 page_info = &rxo->page_info_tbl[rxq->head];
c30d7266 2519 for (posted = 0; posted < frags_needed && !page_info->page; posted++) {
6b7c5b94 2520 if (!pagep) {
1829b086 2521 pagep = be_alloc_pages(adapter->big_page_size, gfp);
6b7c5b94 2522 if (unlikely(!pagep)) {
ac124ff9 2523 rx_stats(rxo)->rx_post_fail++;
6b7c5b94
SP
2524 break;
2525 }
ba42fad0
IV
2526 page_dmaaddr = dma_map_page(dev, pagep, 0,
2527 adapter->big_page_size,
2b7bcebf 2528 DMA_FROM_DEVICE);
ba42fad0
IV
2529 if (dma_mapping_error(dev, page_dmaaddr)) {
2530 put_page(pagep);
2531 pagep = NULL;
d3de1540 2532 adapter->drv_stats.dma_map_errors++;
ba42fad0
IV
2533 break;
2534 }
e50287be 2535 page_offset = 0;
6b7c5b94
SP
2536 } else {
2537 get_page(pagep);
e50287be 2538 page_offset += rx_frag_size;
6b7c5b94 2539 }
e50287be 2540 page_info->page_offset = page_offset;
6b7c5b94 2541 page_info->page = pagep;
6b7c5b94
SP
2542
2543 rxd = queue_head_node(rxq);
e50287be 2544 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
6b7c5b94
SP
2545 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
2546 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
6b7c5b94
SP
2547
2548 /* Any space left in the current big page for another frag? */
2549 if ((page_offset + rx_frag_size + rx_frag_size) >
2550 adapter->big_page_size) {
2551 pagep = NULL;
e50287be
SP
2552 page_info->last_frag = true;
2553 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
2554 } else {
2555 dma_unmap_addr_set(page_info, bus, frag_dmaaddr);
6b7c5b94 2556 }
26d92f92
SP
2557
2558 prev_page_info = page_info;
2559 queue_head_inc(rxq);
10ef9ab4 2560 page_info = &rxo->page_info_tbl[rxq->head];
6b7c5b94 2561 }
e50287be
SP
2562
2563 /* Mark the last frag of a page when we break out of the above loop
2564 * with no more slots available in the RXQ
2565 */
2566 if (pagep) {
2567 prev_page_info->last_frag = true;
2568 dma_unmap_addr_set(prev_page_info, bus, page_dmaaddr);
2569 }
6b7c5b94
SP
2570
2571 if (posted) {
6b7c5b94 2572 atomic_add(posted, &rxq->used);
6384a4d0
SP
2573 if (rxo->rx_post_starved)
2574 rxo->rx_post_starved = false;
c30d7266 2575 do {
69304cc9 2576 notify = min(MAX_NUM_POST_ERX_DB, posted);
c30d7266
AK
2577 be_rxq_notify(adapter, rxq->id, notify);
2578 posted -= notify;
2579 } while (posted);
ea1dae11
SP
2580 } else if (atomic_read(&rxq->used) == 0) {
2581 /* Let be_worker replenish when memory is available */
3abcdeda 2582 rxo->rx_post_starved = true;
6b7c5b94 2583 }
6b7c5b94
SP
2584}
2585
152ffe5b 2586static struct be_tx_compl_info *be_tx_compl_get(struct be_tx_obj *txo)
6b7c5b94 2587{
152ffe5b
SB
2588 struct be_queue_info *tx_cq = &txo->cq;
2589 struct be_tx_compl_info *txcp = &txo->txcp;
2590 struct be_eth_tx_compl *compl = queue_tail_node(tx_cq);
6b7c5b94 2591
152ffe5b 2592 if (compl->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
6b7c5b94
SP
2593 return NULL;
2594
152ffe5b 2595 /* Ensure load ordering of valid bit dword and other dwords below */
f3eb62d2 2596 rmb();
152ffe5b 2597 be_dws_le_to_cpu(compl, sizeof(*compl));
6b7c5b94 2598
152ffe5b
SB
2599 txcp->status = GET_TX_COMPL_BITS(status, compl);
2600 txcp->end_index = GET_TX_COMPL_BITS(wrb_index, compl);
6b7c5b94 2601
152ffe5b 2602 compl->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
6b7c5b94
SP
2603 queue_tail_inc(tx_cq);
2604 return txcp;
2605}
2606
3c8def97 2607static u16 be_tx_compl_process(struct be_adapter *adapter,
748b539a 2608 struct be_tx_obj *txo, u16 last_index)
6b7c5b94 2609{
5f07b3c5 2610 struct sk_buff **sent_skbs = txo->sent_skb_list;
3c8def97 2611 struct be_queue_info *txq = &txo->q;
5f07b3c5
SP
2612 struct sk_buff *skb = NULL;
2613 bool unmap_skb_hdr = false;
a73b796e 2614 struct be_eth_wrb *wrb;
b0fd2eb2 2615 u16 num_wrbs = 0;
2616 u32 frag_index;
6b7c5b94 2617
ec43b1a6 2618 do {
5f07b3c5
SP
2619 if (sent_skbs[txq->tail]) {
2620 /* Free skb from prev req */
2621 if (skb)
2622 dev_consume_skb_any(skb);
2623 skb = sent_skbs[txq->tail];
2624 sent_skbs[txq->tail] = NULL;
2625 queue_tail_inc(txq); /* skip hdr wrb */
2626 num_wrbs++;
2627 unmap_skb_hdr = true;
2628 }
a73b796e 2629 wrb = queue_tail_node(txq);
5f07b3c5 2630 frag_index = txq->tail;
2b7bcebf 2631 unmap_tx_frag(&adapter->pdev->dev, wrb,
5f07b3c5 2632 (unmap_skb_hdr && skb_headlen(skb)));
ec43b1a6 2633 unmap_skb_hdr = false;
6b7c5b94 2634 queue_tail_inc(txq);
5f07b3c5
SP
2635 num_wrbs++;
2636 } while (frag_index != last_index);
2637 dev_consume_skb_any(skb);
6b7c5b94 2638
4d586b82 2639 return num_wrbs;
6b7c5b94
SP
2640}
2641
10ef9ab4
SP
2642/* Return the number of events in the event queue */
2643static inline int events_get(struct be_eq_obj *eqo)
859b1e4e 2644{
10ef9ab4
SP
2645 struct be_eq_entry *eqe;
2646 int num = 0;
859b1e4e 2647
10ef9ab4
SP
2648 do {
2649 eqe = queue_tail_node(&eqo->q);
2650 if (eqe->evt == 0)
2651 break;
859b1e4e 2652
10ef9ab4
SP
2653 rmb();
2654 eqe->evt = 0;
2655 num++;
2656 queue_tail_inc(&eqo->q);
2657 } while (true);
2658
2659 return num;
859b1e4e
SP
2660}
2661
10ef9ab4
SP
2662/* Leaves the EQ is disarmed state */
2663static void be_eq_clean(struct be_eq_obj *eqo)
859b1e4e 2664{
10ef9ab4 2665 int num = events_get(eqo);
859b1e4e 2666
20947770 2667 be_eq_notify(eqo->adapter, eqo->q.id, false, true, num, 0);
859b1e4e
SP
2668}
2669
99b44304
KA
2670/* Free posted rx buffers that were not used */
2671static void be_rxq_clean(struct be_rx_obj *rxo)
6b7c5b94 2672{
3abcdeda 2673 struct be_queue_info *rxq = &rxo->q;
99b44304
KA
2674 struct be_rx_page_info *page_info;
2675
2676 while (atomic_read(&rxq->used) > 0) {
2677 page_info = get_rx_page_info(rxo);
2678 put_page(page_info->page);
2679 memset(page_info, 0, sizeof(*page_info));
2680 }
2681 BUG_ON(atomic_read(&rxq->used));
2682 rxq->tail = 0;
2683 rxq->head = 0;
2684}
2685
2686static void be_rx_cq_clean(struct be_rx_obj *rxo)
2687{
3abcdeda 2688 struct be_queue_info *rx_cq = &rxo->cq;
2e588f84 2689 struct be_rx_compl_info *rxcp;
d23e946c
SP
2690 struct be_adapter *adapter = rxo->adapter;
2691 int flush_wait = 0;
6b7c5b94 2692
d23e946c
SP
2693 /* Consume pending rx completions.
2694 * Wait for the flush completion (identified by zero num_rcvd)
2695 * to arrive. Notify CQ even when there are no more CQ entries
2696 * for HW to flush partially coalesced CQ entries.
2697 * In Lancer, there is no need to wait for flush compl.
2698 */
2699 for (;;) {
2700 rxcp = be_rx_compl_get(rxo);
ddf1169f 2701 if (!rxcp) {
d23e946c
SP
2702 if (lancer_chip(adapter))
2703 break;
2704
954f6825
VD
2705 if (flush_wait++ > 50 ||
2706 be_check_error(adapter,
2707 BE_ERROR_HW)) {
d23e946c
SP
2708 dev_warn(&adapter->pdev->dev,
2709 "did not receive flush compl\n");
2710 break;
2711 }
2712 be_cq_notify(adapter, rx_cq->id, true, 0);
2713 mdelay(1);
2714 } else {
2715 be_rx_compl_discard(rxo, rxcp);
3f5dffe6 2716 be_cq_notify(adapter, rx_cq->id, false, 1);
d23e946c
SP
2717 if (rxcp->num_rcvd == 0)
2718 break;
2719 }
6b7c5b94
SP
2720 }
2721
d23e946c
SP
2722 /* After cleanup, leave the CQ in unarmed state */
2723 be_cq_notify(adapter, rx_cq->id, false, 0);
6b7c5b94
SP
2724}
2725
0ae57bb3 2726static void be_tx_compl_clean(struct be_adapter *adapter)
6b7c5b94 2727{
5f07b3c5 2728 struct device *dev = &adapter->pdev->dev;
b0fd2eb2 2729 u16 cmpl = 0, timeo = 0, num_wrbs = 0;
152ffe5b 2730 struct be_tx_compl_info *txcp;
0ae57bb3 2731 struct be_queue_info *txq;
b0fd2eb2 2732 u32 end_idx, notified_idx;
152ffe5b 2733 struct be_tx_obj *txo;
0ae57bb3 2734 int i, pending_txqs;
a8e9179a 2735
1a3d0717 2736 /* Stop polling for compls when HW has been silent for 10ms */
a8e9179a 2737 do {
0ae57bb3
SP
2738 pending_txqs = adapter->num_tx_qs;
2739
2740 for_all_tx_queues(adapter, txo, i) {
1a3d0717
VV
2741 cmpl = 0;
2742 num_wrbs = 0;
0ae57bb3 2743 txq = &txo->q;
152ffe5b
SB
2744 while ((txcp = be_tx_compl_get(txo))) {
2745 num_wrbs +=
2746 be_tx_compl_process(adapter, txo,
2747 txcp->end_index);
0ae57bb3
SP
2748 cmpl++;
2749 }
2750 if (cmpl) {
2751 be_cq_notify(adapter, txo->cq.id, false, cmpl);
2752 atomic_sub(num_wrbs, &txq->used);
1a3d0717 2753 timeo = 0;
0ae57bb3 2754 }
cf5671e6 2755 if (!be_is_tx_compl_pending(txo))
0ae57bb3 2756 pending_txqs--;
a8e9179a
SP
2757 }
2758
954f6825
VD
2759 if (pending_txqs == 0 || ++timeo > 10 ||
2760 be_check_error(adapter, BE_ERROR_HW))
a8e9179a
SP
2761 break;
2762
2763 mdelay(1);
2764 } while (true);
2765
5f07b3c5 2766 /* Free enqueued TX that was never notified to HW */
0ae57bb3
SP
2767 for_all_tx_queues(adapter, txo, i) {
2768 txq = &txo->q;
0ae57bb3 2769
5f07b3c5
SP
2770 if (atomic_read(&txq->used)) {
2771 dev_info(dev, "txq%d: cleaning %d pending tx-wrbs\n",
2772 i, atomic_read(&txq->used));
2773 notified_idx = txq->tail;
0ae57bb3 2774 end_idx = txq->tail;
5f07b3c5
SP
2775 index_adv(&end_idx, atomic_read(&txq->used) - 1,
2776 txq->len);
2777 /* Use the tx-compl process logic to handle requests
2778 * that were not sent to the HW.
2779 */
0ae57bb3
SP
2780 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
2781 atomic_sub(num_wrbs, &txq->used);
5f07b3c5
SP
2782 BUG_ON(atomic_read(&txq->used));
2783 txo->pend_wrb_cnt = 0;
2784 /* Since hw was never notified of these requests,
2785 * reset TXQ indices
2786 */
2787 txq->head = notified_idx;
2788 txq->tail = notified_idx;
0ae57bb3 2789 }
b03388d6 2790 }
6b7c5b94
SP
2791}
2792
10ef9ab4
SP
2793static void be_evt_queues_destroy(struct be_adapter *adapter)
2794{
2795 struct be_eq_obj *eqo;
2796 int i;
2797
2798 for_all_evt_queues(adapter, eqo, i) {
19d59aa7
PR
2799 if (eqo->q.created) {
2800 be_eq_clean(eqo);
10ef9ab4 2801 be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
68d7bdcb 2802 netif_napi_del(&eqo->napi);
649886a3 2803 free_cpumask_var(eqo->affinity_mask);
19d59aa7 2804 }
10ef9ab4
SP
2805 be_queue_free(adapter, &eqo->q);
2806 }
2807}
2808
2809static int be_evt_queues_create(struct be_adapter *adapter)
2810{
2811 struct be_queue_info *eq;
2812 struct be_eq_obj *eqo;
2632bafd 2813 struct be_aic_obj *aic;
10ef9ab4
SP
2814 int i, rc;
2815
e261768e 2816 /* need enough EQs to service both RX and TX queues */
92bf14ab 2817 adapter->num_evt_qs = min_t(u16, num_irqs(adapter),
e261768e
SP
2818 max(adapter->cfg_num_rx_irqs,
2819 adapter->cfg_num_tx_irqs));
10ef9ab4
SP
2820
2821 for_all_evt_queues(adapter, eqo, i) {
f36963c9 2822 int numa_node = dev_to_node(&adapter->pdev->dev);
649886a3 2823
2632bafd 2824 aic = &adapter->aic_obj[i];
10ef9ab4 2825 eqo->adapter = adapter;
10ef9ab4 2826 eqo->idx = i;
2632bafd
SP
2827 aic->max_eqd = BE_MAX_EQD;
2828 aic->enable = true;
10ef9ab4
SP
2829
2830 eq = &eqo->q;
2831 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
748b539a 2832 sizeof(struct be_eq_entry));
10ef9ab4
SP
2833 if (rc)
2834 return rc;
2835
f2f781a7 2836 rc = be_cmd_eq_create(adapter, eqo);
10ef9ab4
SP
2837 if (rc)
2838 return rc;
649886a3
KA
2839
2840 if (!zalloc_cpumask_var(&eqo->affinity_mask, GFP_KERNEL))
2841 return -ENOMEM;
2842 cpumask_set_cpu(cpumask_local_spread(i, numa_node),
2843 eqo->affinity_mask);
2844 netif_napi_add(adapter->netdev, &eqo->napi, be_poll,
2845 BE_NAPI_WEIGHT);
10ef9ab4 2846 }
1cfafab9 2847 return 0;
10ef9ab4
SP
2848}
2849
5fb379ee
SP
2850static void be_mcc_queues_destroy(struct be_adapter *adapter)
2851{
2852 struct be_queue_info *q;
5fb379ee 2853
8788fdc2 2854 q = &adapter->mcc_obj.q;
5fb379ee 2855 if (q->created)
8788fdc2 2856 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
5fb379ee
SP
2857 be_queue_free(adapter, q);
2858
8788fdc2 2859 q = &adapter->mcc_obj.cq;
5fb379ee 2860 if (q->created)
8788fdc2 2861 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
5fb379ee
SP
2862 be_queue_free(adapter, q);
2863}
2864
2865/* Must be called only after TX qs are created as MCC shares TX EQ */
2866static int be_mcc_queues_create(struct be_adapter *adapter)
2867{
2868 struct be_queue_info *q, *cq;
5fb379ee 2869
8788fdc2 2870 cq = &adapter->mcc_obj.cq;
5fb379ee 2871 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
748b539a 2872 sizeof(struct be_mcc_compl)))
5fb379ee
SP
2873 goto err;
2874
10ef9ab4
SP
2875 /* Use the default EQ for MCC completions */
2876 if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0))
5fb379ee
SP
2877 goto mcc_cq_free;
2878
8788fdc2 2879 q = &adapter->mcc_obj.q;
5fb379ee
SP
2880 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
2881 goto mcc_cq_destroy;
2882
8788fdc2 2883 if (be_cmd_mccq_create(adapter, q, cq))
5fb379ee
SP
2884 goto mcc_q_free;
2885
2886 return 0;
2887
2888mcc_q_free:
2889 be_queue_free(adapter, q);
2890mcc_cq_destroy:
8788fdc2 2891 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
5fb379ee
SP
2892mcc_cq_free:
2893 be_queue_free(adapter, cq);
2894err:
2895 return -1;
2896}
2897
6b7c5b94
SP
2898static void be_tx_queues_destroy(struct be_adapter *adapter)
2899{
2900 struct be_queue_info *q;
3c8def97
SP
2901 struct be_tx_obj *txo;
2902 u8 i;
6b7c5b94 2903
3c8def97
SP
2904 for_all_tx_queues(adapter, txo, i) {
2905 q = &txo->q;
2906 if (q->created)
2907 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
2908 be_queue_free(adapter, q);
6b7c5b94 2909
3c8def97
SP
2910 q = &txo->cq;
2911 if (q->created)
2912 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2913 be_queue_free(adapter, q);
2914 }
6b7c5b94
SP
2915}
2916
7707133c 2917static int be_tx_qs_create(struct be_adapter *adapter)
6b7c5b94 2918{
73f394e6 2919 struct be_queue_info *cq;
3c8def97 2920 struct be_tx_obj *txo;
73f394e6 2921 struct be_eq_obj *eqo;
92bf14ab 2922 int status, i;
6b7c5b94 2923
e261768e 2924 adapter->num_tx_qs = min(adapter->num_evt_qs, adapter->cfg_num_tx_irqs);
dafc0fe3 2925
10ef9ab4
SP
2926 for_all_tx_queues(adapter, txo, i) {
2927 cq = &txo->cq;
2928 status = be_queue_alloc(adapter, cq, TX_CQ_LEN,
2929 sizeof(struct be_eth_tx_compl));
2930 if (status)
2931 return status;
3c8def97 2932
827da44c
JS
2933 u64_stats_init(&txo->stats.sync);
2934 u64_stats_init(&txo->stats.sync_compl);
2935
10ef9ab4
SP
2936 /* If num_evt_qs is less than num_tx_qs, then more than
2937 * one txq share an eq
2938 */
73f394e6
SP
2939 eqo = &adapter->eq_obj[i % adapter->num_evt_qs];
2940 status = be_cmd_cq_create(adapter, cq, &eqo->q, false, 3);
10ef9ab4
SP
2941 if (status)
2942 return status;
6b7c5b94 2943
10ef9ab4
SP
2944 status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
2945 sizeof(struct be_eth_wrb));
2946 if (status)
2947 return status;
6b7c5b94 2948
94d73aaa 2949 status = be_cmd_txq_create(adapter, txo);
10ef9ab4
SP
2950 if (status)
2951 return status;
73f394e6
SP
2952
2953 netif_set_xps_queue(adapter->netdev, eqo->affinity_mask,
2954 eqo->idx);
3c8def97 2955 }
6b7c5b94 2956
d379142b
SP
2957 dev_info(&adapter->pdev->dev, "created %d TX queue(s)\n",
2958 adapter->num_tx_qs);
10ef9ab4 2959 return 0;
6b7c5b94
SP
2960}
2961
10ef9ab4 2962static void be_rx_cqs_destroy(struct be_adapter *adapter)
6b7c5b94
SP
2963{
2964 struct be_queue_info *q;
3abcdeda
SP
2965 struct be_rx_obj *rxo;
2966 int i;
2967
2968 for_all_rx_queues(adapter, rxo, i) {
3abcdeda
SP
2969 q = &rxo->cq;
2970 if (q->created)
2971 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2972 be_queue_free(adapter, q);
ac6a0c4a
SP
2973 }
2974}
2975
10ef9ab4 2976static int be_rx_cqs_create(struct be_adapter *adapter)
6b7c5b94 2977{
10ef9ab4 2978 struct be_queue_info *eq, *cq;
3abcdeda
SP
2979 struct be_rx_obj *rxo;
2980 int rc, i;
6b7c5b94 2981
e261768e
SP
2982 adapter->num_rss_qs =
2983 min(adapter->num_evt_qs, adapter->cfg_num_rx_irqs);
92bf14ab 2984
71bb8bd0 2985 /* We'll use RSS only if atleast 2 RSS rings are supported. */
e261768e 2986 if (adapter->num_rss_qs < 2)
71bb8bd0
VV
2987 adapter->num_rss_qs = 0;
2988
2989 adapter->num_rx_qs = adapter->num_rss_qs + adapter->need_def_rxq;
2990
2991 /* When the interface is not capable of RSS rings (and there is no
2992 * need to create a default RXQ) we'll still need one RXQ
10ef9ab4 2993 */
71bb8bd0
VV
2994 if (adapter->num_rx_qs == 0)
2995 adapter->num_rx_qs = 1;
92bf14ab 2996
6b7c5b94 2997 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
3abcdeda
SP
2998 for_all_rx_queues(adapter, rxo, i) {
2999 rxo->adapter = adapter;
3abcdeda
SP
3000 cq = &rxo->cq;
3001 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
748b539a 3002 sizeof(struct be_eth_rx_compl));
3abcdeda 3003 if (rc)
10ef9ab4 3004 return rc;
3abcdeda 3005
827da44c 3006 u64_stats_init(&rxo->stats.sync);
10ef9ab4
SP
3007 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
3008 rc = be_cmd_cq_create(adapter, cq, eq, false, 3);
3abcdeda 3009 if (rc)
10ef9ab4 3010 return rc;
3abcdeda 3011 }
6b7c5b94 3012
d379142b 3013 dev_info(&adapter->pdev->dev,
71bb8bd0 3014 "created %d RX queue(s)\n", adapter->num_rx_qs);
10ef9ab4 3015 return 0;
b628bde2
SP
3016}
3017
6b7c5b94
SP
3018static irqreturn_t be_intx(int irq, void *dev)
3019{
e49cc34f
SP
3020 struct be_eq_obj *eqo = dev;
3021 struct be_adapter *adapter = eqo->adapter;
3022 int num_evts = 0;
6b7c5b94 3023
d0b9cec3
SP
3024 /* IRQ is not expected when NAPI is scheduled as the EQ
3025 * will not be armed.
3026 * But, this can happen on Lancer INTx where it takes
3027 * a while to de-assert INTx or in BE2 where occasionaly
3028 * an interrupt may be raised even when EQ is unarmed.
3029 * If NAPI is already scheduled, then counting & notifying
3030 * events will orphan them.
e49cc34f 3031 */
d0b9cec3 3032 if (napi_schedule_prep(&eqo->napi)) {
e49cc34f 3033 num_evts = events_get(eqo);
d0b9cec3
SP
3034 __napi_schedule(&eqo->napi);
3035 if (num_evts)
3036 eqo->spurious_intr = 0;
3037 }
20947770 3038 be_eq_notify(adapter, eqo->q.id, false, true, num_evts, 0);
e49cc34f 3039
d0b9cec3
SP
3040 /* Return IRQ_HANDLED only for the the first spurious intr
3041 * after a valid intr to stop the kernel from branding
3042 * this irq as a bad one!
e49cc34f 3043 */
d0b9cec3
SP
3044 if (num_evts || eqo->spurious_intr++ == 0)
3045 return IRQ_HANDLED;
3046 else
3047 return IRQ_NONE;
6b7c5b94
SP
3048}
3049
10ef9ab4 3050static irqreturn_t be_msix(int irq, void *dev)
6b7c5b94 3051{
10ef9ab4 3052 struct be_eq_obj *eqo = dev;
6b7c5b94 3053
20947770 3054 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0, 0);
0b545a62 3055 napi_schedule(&eqo->napi);
6b7c5b94
SP
3056 return IRQ_HANDLED;
3057}
3058
2e588f84 3059static inline bool do_gro(struct be_rx_compl_info *rxcp)
6b7c5b94 3060{
e38b1706 3061 return (rxcp->tcpf && !rxcp->err && rxcp->l4_csum) ? true : false;
6b7c5b94
SP
3062}
3063
10ef9ab4 3064static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
748b539a 3065 int budget, int polling)
6b7c5b94 3066{
3abcdeda
SP
3067 struct be_adapter *adapter = rxo->adapter;
3068 struct be_queue_info *rx_cq = &rxo->cq;
2e588f84 3069 struct be_rx_compl_info *rxcp;
6b7c5b94 3070 u32 work_done;
c30d7266 3071 u32 frags_consumed = 0;
6b7c5b94
SP
3072
3073 for (work_done = 0; work_done < budget; work_done++) {
3abcdeda 3074 rxcp = be_rx_compl_get(rxo);
6b7c5b94
SP
3075 if (!rxcp)
3076 break;
3077
12004ae9
SP
3078 /* Is it a flush compl that has no data */
3079 if (unlikely(rxcp->num_rcvd == 0))
3080 goto loop_continue;
3081
3082 /* Discard compl with partial DMA Lancer B0 */
3083 if (unlikely(!rxcp->pkt_size)) {
10ef9ab4 3084 be_rx_compl_discard(rxo, rxcp);
12004ae9
SP
3085 goto loop_continue;
3086 }
3087
3088 /* On BE drop pkts that arrive due to imperfect filtering in
3089 * promiscuous mode on some skews
3090 */
3091 if (unlikely(rxcp->port != adapter->port_num &&
748b539a 3092 !lancer_chip(adapter))) {
10ef9ab4 3093 be_rx_compl_discard(rxo, rxcp);
12004ae9 3094 goto loop_continue;
64642811 3095 }
009dd872 3096
6384a4d0
SP
3097 /* Don't do gro when we're busy_polling */
3098 if (do_gro(rxcp) && polling != BUSY_POLLING)
10ef9ab4 3099 be_rx_compl_process_gro(rxo, napi, rxcp);
12004ae9 3100 else
6384a4d0
SP
3101 be_rx_compl_process(rxo, napi, rxcp);
3102
12004ae9 3103loop_continue:
c30d7266 3104 frags_consumed += rxcp->num_rcvd;
2e588f84 3105 be_rx_stats_update(rxo, rxcp);
6b7c5b94
SP
3106 }
3107
10ef9ab4
SP
3108 if (work_done) {
3109 be_cq_notify(adapter, rx_cq->id, true, work_done);
9372cacb 3110
6384a4d0
SP
3111 /* When an rx-obj gets into post_starved state, just
3112 * let be_worker do the posting.
3113 */
3114 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM &&
3115 !rxo->rx_post_starved)
c30d7266
AK
3116 be_post_rx_frags(rxo, GFP_ATOMIC,
3117 max_t(u32, MAX_RX_POST,
3118 frags_consumed));
6b7c5b94 3119 }
10ef9ab4 3120
6b7c5b94
SP
3121 return work_done;
3122}
3123
152ffe5b 3124static inline void be_update_tx_err(struct be_tx_obj *txo, u8 status)
512bb8a2
KA
3125{
3126 switch (status) {
3127 case BE_TX_COMP_HDR_PARSE_ERR:
3128 tx_stats(txo)->tx_hdr_parse_err++;
3129 break;
3130 case BE_TX_COMP_NDMA_ERR:
3131 tx_stats(txo)->tx_dma_err++;
3132 break;
3133 case BE_TX_COMP_ACL_ERR:
3134 tx_stats(txo)->tx_spoof_check_err++;
3135 break;
3136 }
3137}
3138
152ffe5b 3139static inline void lancer_update_tx_err(struct be_tx_obj *txo, u8 status)
512bb8a2
KA
3140{
3141 switch (status) {
3142 case LANCER_TX_COMP_LSO_ERR:
3143 tx_stats(txo)->tx_tso_err++;
3144 break;
3145 case LANCER_TX_COMP_HSW_DROP_MAC_ERR:
3146 case LANCER_TX_COMP_HSW_DROP_VLAN_ERR:
3147 tx_stats(txo)->tx_spoof_check_err++;
3148 break;
3149 case LANCER_TX_COMP_QINQ_ERR:
3150 tx_stats(txo)->tx_qinq_err++;
3151 break;
3152 case LANCER_TX_COMP_PARITY_ERR:
3153 tx_stats(txo)->tx_internal_parity_err++;
3154 break;
3155 case LANCER_TX_COMP_DMA_ERR:
3156 tx_stats(txo)->tx_dma_err++;
3157 break;
3158 }
3159}
3160
c8f64615
SP
3161static void be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
3162 int idx)
6b7c5b94 3163{
c8f64615 3164 int num_wrbs = 0, work_done = 0;
152ffe5b 3165 struct be_tx_compl_info *txcp;
c8f64615 3166
152ffe5b
SB
3167 while ((txcp = be_tx_compl_get(txo))) {
3168 num_wrbs += be_tx_compl_process(adapter, txo, txcp->end_index);
c8f64615 3169 work_done++;
3c8def97 3170
152ffe5b 3171 if (txcp->status) {
512bb8a2 3172 if (lancer_chip(adapter))
152ffe5b 3173 lancer_update_tx_err(txo, txcp->status);
512bb8a2 3174 else
152ffe5b 3175 be_update_tx_err(txo, txcp->status);
512bb8a2 3176 }
10ef9ab4 3177 }
6b7c5b94 3178
10ef9ab4
SP
3179 if (work_done) {
3180 be_cq_notify(adapter, txo->cq.id, true, work_done);
3181 atomic_sub(num_wrbs, &txo->q.used);
3c8def97 3182
10ef9ab4
SP
3183 /* As Tx wrbs have been freed up, wake up netdev queue
3184 * if it was stopped due to lack of tx wrbs. */
3185 if (__netif_subqueue_stopped(adapter->netdev, idx) &&
cf5671e6 3186 be_can_txq_wake(txo)) {
10ef9ab4 3187 netif_wake_subqueue(adapter->netdev, idx);
3c8def97 3188 }
10ef9ab4
SP
3189
3190 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
3191 tx_stats(txo)->tx_compl += work_done;
3192 u64_stats_update_end(&tx_stats(txo)->sync_compl);
6b7c5b94 3193 }
10ef9ab4 3194}
6b7c5b94 3195
f7062ee5
SP
3196#ifdef CONFIG_NET_RX_BUSY_POLL
3197static inline bool be_lock_napi(struct be_eq_obj *eqo)
3198{
3199 bool status = true;
3200
3201 spin_lock(&eqo->lock); /* BH is already disabled */
3202 if (eqo->state & BE_EQ_LOCKED) {
3203 WARN_ON(eqo->state & BE_EQ_NAPI);
3204 eqo->state |= BE_EQ_NAPI_YIELD;
3205 status = false;
3206 } else {
3207 eqo->state = BE_EQ_NAPI;
3208 }
3209 spin_unlock(&eqo->lock);
3210 return status;
3211}
3212
3213static inline void be_unlock_napi(struct be_eq_obj *eqo)
3214{
3215 spin_lock(&eqo->lock); /* BH is already disabled */
3216
3217 WARN_ON(eqo->state & (BE_EQ_POLL | BE_EQ_NAPI_YIELD));
3218 eqo->state = BE_EQ_IDLE;
3219
3220 spin_unlock(&eqo->lock);
3221}
3222
3223static inline bool be_lock_busy_poll(struct be_eq_obj *eqo)
3224{
3225 bool status = true;
3226
3227 spin_lock_bh(&eqo->lock);
3228 if (eqo->state & BE_EQ_LOCKED) {
3229 eqo->state |= BE_EQ_POLL_YIELD;
3230 status = false;
3231 } else {
3232 eqo->state |= BE_EQ_POLL;
3233 }
3234 spin_unlock_bh(&eqo->lock);
3235 return status;
3236}
3237
3238static inline void be_unlock_busy_poll(struct be_eq_obj *eqo)
3239{
3240 spin_lock_bh(&eqo->lock);
3241
3242 WARN_ON(eqo->state & (BE_EQ_NAPI));
3243 eqo->state = BE_EQ_IDLE;
3244
3245 spin_unlock_bh(&eqo->lock);
3246}
3247
3248static inline void be_enable_busy_poll(struct be_eq_obj *eqo)
3249{
3250 spin_lock_init(&eqo->lock);
3251 eqo->state = BE_EQ_IDLE;
3252}
3253
3254static inline void be_disable_busy_poll(struct be_eq_obj *eqo)
3255{
3256 local_bh_disable();
3257
3258 /* It's enough to just acquire napi lock on the eqo to stop
3259 * be_busy_poll() from processing any queueus.
3260 */
3261 while (!be_lock_napi(eqo))
3262 mdelay(1);
3263
3264 local_bh_enable();
3265}
3266
3267#else /* CONFIG_NET_RX_BUSY_POLL */
3268
3269static inline bool be_lock_napi(struct be_eq_obj *eqo)
3270{
3271 return true;
3272}
3273
3274static inline void be_unlock_napi(struct be_eq_obj *eqo)
3275{
3276}
3277
3278static inline bool be_lock_busy_poll(struct be_eq_obj *eqo)
3279{
3280 return false;
3281}
3282
3283static inline void be_unlock_busy_poll(struct be_eq_obj *eqo)
3284{
3285}
3286
3287static inline void be_enable_busy_poll(struct be_eq_obj *eqo)
3288{
3289}
3290
3291static inline void be_disable_busy_poll(struct be_eq_obj *eqo)
3292{
3293}
3294#endif /* CONFIG_NET_RX_BUSY_POLL */
3295
68d7bdcb 3296int be_poll(struct napi_struct *napi, int budget)
10ef9ab4
SP
3297{
3298 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
3299 struct be_adapter *adapter = eqo->adapter;
0b545a62 3300 int max_work = 0, work, i, num_evts;
6384a4d0 3301 struct be_rx_obj *rxo;
a4906ea0 3302 struct be_tx_obj *txo;
20947770 3303 u32 mult_enc = 0;
f31e50a8 3304
0b545a62
SP
3305 num_evts = events_get(eqo);
3306
a4906ea0
SP
3307 for_all_tx_queues_on_eq(adapter, eqo, txo, i)
3308 be_process_tx(adapter, txo, i);
f31e50a8 3309
6384a4d0
SP
3310 if (be_lock_napi(eqo)) {
3311 /* This loop will iterate twice for EQ0 in which
3312 * completions of the last RXQ (default one) are also processed
3313 * For other EQs the loop iterates only once
3314 */
3315 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
3316 work = be_process_rx(rxo, napi, budget, NAPI_POLLING);
3317 max_work = max(work, max_work);
3318 }
3319 be_unlock_napi(eqo);
3320 } else {
3321 max_work = budget;
10ef9ab4 3322 }
6b7c5b94 3323
10ef9ab4
SP
3324 if (is_mcc_eqo(eqo))
3325 be_process_mcc(adapter);
93c86700 3326
10ef9ab4
SP
3327 if (max_work < budget) {
3328 napi_complete(napi);
20947770
PR
3329
3330 /* Skyhawk EQ_DB has a provision to set the rearm to interrupt
3331 * delay via a delay multiplier encoding value
3332 */
3333 if (skyhawk_chip(adapter))
3334 mult_enc = be_get_eq_delay_mult_enc(eqo);
3335
3336 be_eq_notify(adapter, eqo->q.id, true, false, num_evts,
3337 mult_enc);
10ef9ab4
SP
3338 } else {
3339 /* As we'll continue in polling mode, count and clear events */
20947770 3340 be_eq_notify(adapter, eqo->q.id, false, false, num_evts, 0);
93c86700 3341 }
10ef9ab4 3342 return max_work;
6b7c5b94
SP
3343}
3344
6384a4d0
SP
3345#ifdef CONFIG_NET_RX_BUSY_POLL
3346static int be_busy_poll(struct napi_struct *napi)
3347{
3348 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
3349 struct be_adapter *adapter = eqo->adapter;
3350 struct be_rx_obj *rxo;
3351 int i, work = 0;
3352
3353 if (!be_lock_busy_poll(eqo))
3354 return LL_FLUSH_BUSY;
3355
3356 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
3357 work = be_process_rx(rxo, napi, 4, BUSY_POLLING);
3358 if (work)
3359 break;
3360 }
3361
3362 be_unlock_busy_poll(eqo);
3363 return work;
3364}
3365#endif
3366
f67ef7ba 3367void be_detect_error(struct be_adapter *adapter)
7c185276 3368{
e1cfb67a
PR
3369 u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
3370 u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
7c185276 3371 u32 i;
eb0eecc1 3372 struct device *dev = &adapter->pdev->dev;
7c185276 3373
954f6825 3374 if (be_check_error(adapter, BE_ERROR_HW))
72f02485
SP
3375 return;
3376
e1cfb67a
PR
3377 if (lancer_chip(adapter)) {
3378 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3379 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
954f6825 3380 be_set_error(adapter, BE_ERROR_UE);
e1cfb67a 3381 sliport_err1 = ioread32(adapter->db +
748b539a 3382 SLIPORT_ERROR1_OFFSET);
e1cfb67a 3383 sliport_err2 = ioread32(adapter->db +
748b539a 3384 SLIPORT_ERROR2_OFFSET);
eb0eecc1
SK
3385 /* Do not log error messages if its a FW reset */
3386 if (sliport_err1 == SLIPORT_ERROR_FW_RESET1 &&
3387 sliport_err2 == SLIPORT_ERROR_FW_RESET2) {
3388 dev_info(dev, "Firmware update in progress\n");
3389 } else {
eb0eecc1
SK
3390 dev_err(dev, "Error detected in the card\n");
3391 dev_err(dev, "ERR: sliport status 0x%x\n",
3392 sliport_status);
3393 dev_err(dev, "ERR: sliport error1 0x%x\n",
3394 sliport_err1);
3395 dev_err(dev, "ERR: sliport error2 0x%x\n",
3396 sliport_err2);
3397 }
e1cfb67a
PR
3398 }
3399 } else {
25848c90
SR
3400 ue_lo = ioread32(adapter->pcicfg + PCICFG_UE_STATUS_LOW);
3401 ue_hi = ioread32(adapter->pcicfg + PCICFG_UE_STATUS_HIGH);
3402 ue_lo_mask = ioread32(adapter->pcicfg +
3403 PCICFG_UE_STATUS_LOW_MASK);
3404 ue_hi_mask = ioread32(adapter->pcicfg +
3405 PCICFG_UE_STATUS_HI_MASK);
e1cfb67a 3406
f67ef7ba
PR
3407 ue_lo = (ue_lo & ~ue_lo_mask);
3408 ue_hi = (ue_hi & ~ue_hi_mask);
7c185276 3409
eb0eecc1
SK
3410 /* On certain platforms BE hardware can indicate spurious UEs.
3411 * Allow HW to stop working completely in case of a real UE.
3412 * Hence not setting the hw_error for UE detection.
3413 */
f67ef7ba 3414
eb0eecc1 3415 if (ue_lo || ue_hi) {
710f3e59 3416 dev_err(dev, "Error detected in the adapter");
eb0eecc1 3417 if (skyhawk_chip(adapter))
954f6825
VD
3418 be_set_error(adapter, BE_ERROR_UE);
3419
eb0eecc1
SK
3420 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
3421 if (ue_lo & 1)
3422 dev_err(dev, "UE: %s bit set\n",
3423 ue_status_low_desc[i]);
3424 }
3425 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
3426 if (ue_hi & 1)
3427 dev_err(dev, "UE: %s bit set\n",
3428 ue_status_hi_desc[i]);
3429 }
7c185276
AK
3430 }
3431 }
7c185276
AK
3432}
3433
8d56ff11
SP
3434static void be_msix_disable(struct be_adapter *adapter)
3435{
ac6a0c4a 3436 if (msix_enabled(adapter)) {
8d56ff11 3437 pci_disable_msix(adapter->pdev);
ac6a0c4a 3438 adapter->num_msix_vec = 0;
68d7bdcb 3439 adapter->num_msix_roce_vec = 0;
3abcdeda
SP
3440 }
3441}
3442
c2bba3df 3443static int be_msix_enable(struct be_adapter *adapter)
6b7c5b94 3444{
6fde0e63 3445 unsigned int i, max_roce_eqs;
d379142b 3446 struct device *dev = &adapter->pdev->dev;
6fde0e63 3447 int num_vec;
6b7c5b94 3448
ce7faf0a
SP
3449 /* If RoCE is supported, program the max number of vectors that
3450 * could be used for NIC and RoCE, else, just program the number
3451 * we'll use initially.
92bf14ab 3452 */
e261768e
SP
3453 if (be_roce_supported(adapter)) {
3454 max_roce_eqs =
3455 be_max_func_eqs(adapter) - be_max_nic_eqs(adapter);
3456 max_roce_eqs = min(max_roce_eqs, num_online_cpus());
3457 num_vec = be_max_any_irqs(adapter) + max_roce_eqs;
3458 } else {
3459 num_vec = max(adapter->cfg_num_rx_irqs,
3460 adapter->cfg_num_tx_irqs);
3461 }
3abcdeda 3462
ac6a0c4a 3463 for (i = 0; i < num_vec; i++)
6b7c5b94
SP
3464 adapter->msix_entries[i].entry = i;
3465
7dc4c064
AG
3466 num_vec = pci_enable_msix_range(adapter->pdev, adapter->msix_entries,
3467 MIN_MSIX_VECTORS, num_vec);
3468 if (num_vec < 0)
3469 goto fail;
92bf14ab 3470
92bf14ab
SP
3471 if (be_roce_supported(adapter) && num_vec > MIN_MSIX_VECTORS) {
3472 adapter->num_msix_roce_vec = num_vec / 2;
3473 dev_info(dev, "enabled %d MSI-x vector(s) for RoCE\n",
3474 adapter->num_msix_roce_vec);
3475 }
3476
3477 adapter->num_msix_vec = num_vec - adapter->num_msix_roce_vec;
3478
3479 dev_info(dev, "enabled %d MSI-x vector(s) for NIC\n",
3480 adapter->num_msix_vec);
c2bba3df 3481 return 0;
7dc4c064
AG
3482
3483fail:
3484 dev_warn(dev, "MSIx enable failed\n");
3485
3486 /* INTx is not supported in VFs, so fail probe if enable_msix fails */
18c57c74 3487 if (be_virtfn(adapter))
7dc4c064
AG
3488 return num_vec;
3489 return 0;
6b7c5b94
SP
3490}
3491
fe6d2a38 3492static inline int be_msix_vec_get(struct be_adapter *adapter,
748b539a 3493 struct be_eq_obj *eqo)
b628bde2 3494{
f2f781a7 3495 return adapter->msix_entries[eqo->msix_idx].vector;
b628bde2 3496}
6b7c5b94 3497
b628bde2
SP
3498static int be_msix_register(struct be_adapter *adapter)
3499{
10ef9ab4
SP
3500 struct net_device *netdev = adapter->netdev;
3501 struct be_eq_obj *eqo;
3502 int status, i, vec;
6b7c5b94 3503
10ef9ab4
SP
3504 for_all_evt_queues(adapter, eqo, i) {
3505 sprintf(eqo->desc, "%s-q%d", netdev->name, i);
3506 vec = be_msix_vec_get(adapter, eqo);
3507 status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
3abcdeda
SP
3508 if (status)
3509 goto err_msix;
d658d98a
PR
3510
3511 irq_set_affinity_hint(vec, eqo->affinity_mask);
3abcdeda 3512 }
b628bde2 3513
6b7c5b94 3514 return 0;
3abcdeda 3515err_msix:
6e3cd5fa
VD
3516 for (i--; i >= 0; i--) {
3517 eqo = &adapter->eq_obj[i];
10ef9ab4 3518 free_irq(be_msix_vec_get(adapter, eqo), eqo);
6e3cd5fa 3519 }
10ef9ab4 3520 dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
748b539a 3521 status);
ac6a0c4a 3522 be_msix_disable(adapter);
6b7c5b94
SP
3523 return status;
3524}
3525
3526static int be_irq_register(struct be_adapter *adapter)
3527{
3528 struct net_device *netdev = adapter->netdev;
3529 int status;
3530
ac6a0c4a 3531 if (msix_enabled(adapter)) {
6b7c5b94
SP
3532 status = be_msix_register(adapter);
3533 if (status == 0)
3534 goto done;
ba343c77 3535 /* INTx is not supported for VF */
18c57c74 3536 if (be_virtfn(adapter))
ba343c77 3537 return status;
6b7c5b94
SP
3538 }
3539
e49cc34f 3540 /* INTx: only the first EQ is used */
6b7c5b94
SP
3541 netdev->irq = adapter->pdev->irq;
3542 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
e49cc34f 3543 &adapter->eq_obj[0]);
6b7c5b94
SP
3544 if (status) {
3545 dev_err(&adapter->pdev->dev,
3546 "INTx request IRQ failed - err %d\n", status);
3547 return status;
3548 }
3549done:
3550 adapter->isr_registered = true;
3551 return 0;
3552}
3553
3554static void be_irq_unregister(struct be_adapter *adapter)
3555{
3556 struct net_device *netdev = adapter->netdev;
10ef9ab4 3557 struct be_eq_obj *eqo;
d658d98a 3558 int i, vec;
6b7c5b94
SP
3559
3560 if (!adapter->isr_registered)
3561 return;
3562
3563 /* INTx */
ac6a0c4a 3564 if (!msix_enabled(adapter)) {
e49cc34f 3565 free_irq(netdev->irq, &adapter->eq_obj[0]);
6b7c5b94
SP
3566 goto done;
3567 }
3568
3569 /* MSIx */
d658d98a
PR
3570 for_all_evt_queues(adapter, eqo, i) {
3571 vec = be_msix_vec_get(adapter, eqo);
3572 irq_set_affinity_hint(vec, NULL);
3573 free_irq(vec, eqo);
3574 }
3abcdeda 3575
6b7c5b94
SP
3576done:
3577 adapter->isr_registered = false;
6b7c5b94
SP
3578}
3579
10ef9ab4 3580static void be_rx_qs_destroy(struct be_adapter *adapter)
482c9e79 3581{
62219066 3582 struct rss_info *rss = &adapter->rss_info;
482c9e79
SP
3583 struct be_queue_info *q;
3584 struct be_rx_obj *rxo;
3585 int i;
3586
3587 for_all_rx_queues(adapter, rxo, i) {
3588 q = &rxo->q;
3589 if (q->created) {
99b44304
KA
3590 /* If RXQs are destroyed while in an "out of buffer"
3591 * state, there is a possibility of an HW stall on
3592 * Lancer. So, post 64 buffers to each queue to relieve
3593 * the "out of buffer" condition.
3594 * Make sure there's space in the RXQ before posting.
3595 */
3596 if (lancer_chip(adapter)) {
3597 be_rx_cq_clean(rxo);
3598 if (atomic_read(&q->used) == 0)
3599 be_post_rx_frags(rxo, GFP_KERNEL,
3600 MAX_RX_POST);
3601 }
3602
482c9e79 3603 be_cmd_rxq_destroy(adapter, q);
10ef9ab4 3604 be_rx_cq_clean(rxo);
99b44304 3605 be_rxq_clean(rxo);
482c9e79 3606 }
10ef9ab4 3607 be_queue_free(adapter, q);
482c9e79 3608 }
62219066
AK
3609
3610 if (rss->rss_flags) {
3611 rss->rss_flags = RSS_ENABLE_NONE;
3612 be_cmd_rss_config(adapter, rss->rsstable, rss->rss_flags,
3613 128, rss->rss_hkey);
3614 }
482c9e79
SP
3615}
3616
bcc84140
KA
3617static void be_disable_if_filters(struct be_adapter *adapter)
3618{
6d928ae5
IV
3619 /* Don't delete MAC on BE3 VFs without FILTMGMT privilege */
3620 if (!BEx_chip(adapter) || !be_virtfn(adapter) ||
3621 check_privilege(adapter, BE_PRIV_FILTMGMT))
3622 be_dev_mac_del(adapter, adapter->pmac_id[0]);
3623
bcc84140 3624 be_clear_uc_list(adapter);
92fbb1df 3625 be_clear_mc_list(adapter);
bcc84140
KA
3626
3627 /* The IFACE flags are enabled in the open path and cleared
3628 * in the close path. When a VF gets detached from the host and
3629 * assigned to a VM the following happens:
3630 * - VF's IFACE flags get cleared in the detach path
3631 * - IFACE create is issued by the VF in the attach path
3632 * Due to a bug in the BE3/Skyhawk-R FW
3633 * (Lancer FW doesn't have the bug), the IFACE capability flags
3634 * specified along with the IFACE create cmd issued by a VF are not
3635 * honoured by FW. As a consequence, if a *new* driver
3636 * (that enables/disables IFACE flags in open/close)
3637 * is loaded in the host and an *old* driver is * used by a VM/VF,
3638 * the IFACE gets created *without* the needed flags.
3639 * To avoid this, disable RX-filter flags only for Lancer.
3640 */
3641 if (lancer_chip(adapter)) {
3642 be_cmd_rx_filter(adapter, BE_IF_ALL_FILT_FLAGS, OFF);
3643 adapter->if_flags &= ~BE_IF_ALL_FILT_FLAGS;
3644 }
3645}
3646
889cd4b2
SP
3647static int be_close(struct net_device *netdev)
3648{
3649 struct be_adapter *adapter = netdev_priv(netdev);
10ef9ab4
SP
3650 struct be_eq_obj *eqo;
3651 int i;
889cd4b2 3652
e1ad8e33
KA
3653 /* This protection is needed as be_close() may be called even when the
3654 * adapter is in cleared state (after eeh perm failure)
3655 */
3656 if (!(adapter->flags & BE_FLAGS_SETUP_DONE))
3657 return 0;
3658
b7172414
SP
3659 /* Before attempting cleanup ensure all the pending cmds in the
3660 * config_wq have finished execution
3661 */
3662 flush_workqueue(be_wq);
3663
bcc84140
KA
3664 be_disable_if_filters(adapter);
3665
dff345c5
IV
3666 if (adapter->flags & BE_FLAGS_NAPI_ENABLED) {
3667 for_all_evt_queues(adapter, eqo, i) {
04d3d624 3668 napi_disable(&eqo->napi);
6384a4d0
SP
3669 be_disable_busy_poll(eqo);
3670 }
71237b6f 3671 adapter->flags &= ~BE_FLAGS_NAPI_ENABLED;
04d3d624 3672 }
a323d9bf
SP
3673
3674 be_async_mcc_disable(adapter);
3675
3676 /* Wait for all pending tx completions to arrive so that
3677 * all tx skbs are freed.
3678 */
fba87559 3679 netif_tx_disable(netdev);
6e1f9975 3680 be_tx_compl_clean(adapter);
a323d9bf
SP
3681
3682 be_rx_qs_destroy(adapter);
d11a347d 3683
a323d9bf 3684 for_all_evt_queues(adapter, eqo, i) {
10ef9ab4
SP
3685 if (msix_enabled(adapter))
3686 synchronize_irq(be_msix_vec_get(adapter, eqo));
3687 else
3688 synchronize_irq(netdev->irq);
3689 be_eq_clean(eqo);
63fcb27f
PR
3690 }
3691
889cd4b2
SP
3692 be_irq_unregister(adapter);
3693
482c9e79
SP
3694 return 0;
3695}
3696
10ef9ab4 3697static int be_rx_qs_create(struct be_adapter *adapter)
482c9e79 3698{
1dcf7b1c
ED
3699 struct rss_info *rss = &adapter->rss_info;
3700 u8 rss_key[RSS_HASH_KEY_LEN];
482c9e79 3701 struct be_rx_obj *rxo;
e9008ee9 3702 int rc, i, j;
482c9e79
SP
3703
3704 for_all_rx_queues(adapter, rxo, i) {
10ef9ab4
SP
3705 rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
3706 sizeof(struct be_eth_rx_d));
3707 if (rc)
3708 return rc;
3709 }
3710
71bb8bd0
VV
3711 if (adapter->need_def_rxq || !adapter->num_rss_qs) {
3712 rxo = default_rxo(adapter);
3713 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
3714 rx_frag_size, adapter->if_handle,
3715 false, &rxo->rss_id);
3716 if (rc)
3717 return rc;
3718 }
10ef9ab4
SP
3719
3720 for_all_rss_queues(adapter, rxo, i) {
482c9e79 3721 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
10ef9ab4
SP
3722 rx_frag_size, adapter->if_handle,
3723 true, &rxo->rss_id);
482c9e79
SP
3724 if (rc)
3725 return rc;
3726 }
3727
3728 if (be_multi_rxq(adapter)) {
71bb8bd0 3729 for (j = 0; j < RSS_INDIR_TABLE_LEN; j += adapter->num_rss_qs) {
e9008ee9 3730 for_all_rss_queues(adapter, rxo, i) {
e2557877 3731 if ((j + i) >= RSS_INDIR_TABLE_LEN)
e9008ee9 3732 break;
e2557877
VD
3733 rss->rsstable[j + i] = rxo->rss_id;
3734 rss->rss_queue[j + i] = i;
e9008ee9
PR
3735 }
3736 }
e2557877
VD
3737 rss->rss_flags = RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_IPV4 |
3738 RSS_ENABLE_TCP_IPV6 | RSS_ENABLE_IPV6;
594ad54a
SR
3739
3740 if (!BEx_chip(adapter))
e2557877
VD
3741 rss->rss_flags |= RSS_ENABLE_UDP_IPV4 |
3742 RSS_ENABLE_UDP_IPV6;
62219066
AK
3743
3744 netdev_rss_key_fill(rss_key, RSS_HASH_KEY_LEN);
3745 rc = be_cmd_rss_config(adapter, rss->rsstable, rss->rss_flags,
3746 RSS_INDIR_TABLE_LEN, rss_key);
3747 if (rc) {
3748 rss->rss_flags = RSS_ENABLE_NONE;
3749 return rc;
3750 }
3751
3752 memcpy(rss->rss_hkey, rss_key, RSS_HASH_KEY_LEN);
da1388d6
VV
3753 } else {
3754 /* Disable RSS, if only default RX Q is created */
e2557877 3755 rss->rss_flags = RSS_ENABLE_NONE;
da1388d6 3756 }
594ad54a 3757
e2557877 3758
b02e60c8
SR
3759 /* Post 1 less than RXQ-len to avoid head being equal to tail,
3760 * which is a queue empty condition
3761 */
10ef9ab4 3762 for_all_rx_queues(adapter, rxo, i)
b02e60c8
SR
3763 be_post_rx_frags(rxo, GFP_KERNEL, RX_Q_LEN - 1);
3764
889cd4b2
SP
3765 return 0;
3766}
3767
bcc84140
KA
3768static int be_enable_if_filters(struct be_adapter *adapter)
3769{
3770 int status;
3771
c1bb0a55 3772 status = be_cmd_rx_filter(adapter, BE_IF_FILT_FLAGS_BASIC, ON);
bcc84140
KA
3773 if (status)
3774 return status;
3775
34393529
IV
3776 /* Don't add MAC on BE3 VFs without FILTMGMT privilege */
3777 if (!BEx_chip(adapter) || !be_virtfn(adapter) ||
3778 check_privilege(adapter, BE_PRIV_FILTMGMT)) {
988d44b1 3779 status = be_dev_mac_add(adapter, adapter->netdev->dev_addr);
bcc84140
KA
3780 if (status)
3781 return status;
c27ebf58 3782 ether_addr_copy(adapter->dev_mac, adapter->netdev->dev_addr);
bcc84140
KA
3783 }
3784
3785 if (adapter->vlans_added)
3786 be_vid_config(adapter);
3787
b7172414 3788 __be_set_rx_mode(adapter);
bcc84140
KA
3789
3790 return 0;
3791}
3792
6b7c5b94
SP
3793static int be_open(struct net_device *netdev)
3794{
3795 struct be_adapter *adapter = netdev_priv(netdev);
10ef9ab4 3796 struct be_eq_obj *eqo;
3abcdeda 3797 struct be_rx_obj *rxo;
10ef9ab4 3798 struct be_tx_obj *txo;
b236916a 3799 u8 link_status;
3abcdeda 3800 int status, i;
5fb379ee 3801
10ef9ab4 3802 status = be_rx_qs_create(adapter);
482c9e79
SP
3803 if (status)
3804 goto err;
3805
bcc84140
KA
3806 status = be_enable_if_filters(adapter);
3807 if (status)
3808 goto err;
3809
c2bba3df
SK
3810 status = be_irq_register(adapter);
3811 if (status)
3812 goto err;
5fb379ee 3813
10ef9ab4 3814 for_all_rx_queues(adapter, rxo, i)
3abcdeda 3815 be_cq_notify(adapter, rxo->cq.id, true, 0);
5fb379ee 3816
10ef9ab4
SP
3817 for_all_tx_queues(adapter, txo, i)
3818 be_cq_notify(adapter, txo->cq.id, true, 0);
3819
7a1e9b20
SP
3820 be_async_mcc_enable(adapter);
3821
10ef9ab4
SP
3822 for_all_evt_queues(adapter, eqo, i) {
3823 napi_enable(&eqo->napi);
6384a4d0 3824 be_enable_busy_poll(eqo);
20947770 3825 be_eq_notify(adapter, eqo->q.id, true, true, 0, 0);
10ef9ab4 3826 }
04d3d624 3827 adapter->flags |= BE_FLAGS_NAPI_ENABLED;
10ef9ab4 3828
323ff71e 3829 status = be_cmd_link_status_query(adapter, NULL, &link_status, 0);
b236916a
AK
3830 if (!status)
3831 be_link_status_update(adapter, link_status);
3832
fba87559 3833 netif_tx_start_all_queues(netdev);
c9c47142 3834 if (skyhawk_chip(adapter))
bde6b7cd 3835 udp_tunnel_get_rx_info(netdev);
c5abe7c0 3836
889cd4b2
SP
3837 return 0;
3838err:
3839 be_close(adapter->netdev);
3840 return -EIO;
5fb379ee
SP
3841}
3842
f7062ee5
SP
3843static void be_vf_eth_addr_generate(struct be_adapter *adapter, u8 *mac)
3844{
3845 u32 addr;
3846
3847 addr = jhash(adapter->netdev->dev_addr, ETH_ALEN, 0);
3848
3849 mac[5] = (u8)(addr & 0xFF);
3850 mac[4] = (u8)((addr >> 8) & 0xFF);
3851 mac[3] = (u8)((addr >> 16) & 0xFF);
3852 /* Use the OUI from the current MAC address */
3853 memcpy(mac, adapter->netdev->dev_addr, 3);
3854}
3855
6d87f5c3
AK
3856/*
3857 * Generate a seed MAC address from the PF MAC Address using jhash.
3858 * MAC Address for VFs are assigned incrementally starting from the seed.
3859 * These addresses are programmed in the ASIC by the PF and the VF driver
3860 * queries for the MAC address during its probe.
3861 */
4c876616 3862static int be_vf_eth_addr_config(struct be_adapter *adapter)
6d87f5c3 3863{
f9449ab7 3864 u32 vf;
3abcdeda 3865 int status = 0;
6d87f5c3 3866 u8 mac[ETH_ALEN];
11ac75ed 3867 struct be_vf_cfg *vf_cfg;
6d87f5c3
AK
3868
3869 be_vf_eth_addr_generate(adapter, mac);
3870
11ac75ed 3871 for_all_vfs(adapter, vf_cfg, vf) {
3175d8c2 3872 if (BEx_chip(adapter))
590c391d 3873 status = be_cmd_pmac_add(adapter, mac,
11ac75ed
SP
3874 vf_cfg->if_handle,
3875 &vf_cfg->pmac_id, vf + 1);
3175d8c2
SP
3876 else
3877 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
3878 vf + 1);
590c391d 3879
6d87f5c3
AK
3880 if (status)
3881 dev_err(&adapter->pdev->dev,
748b539a
SP
3882 "Mac address assignment failed for VF %d\n",
3883 vf);
6d87f5c3 3884 else
11ac75ed 3885 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
6d87f5c3
AK
3886
3887 mac[5] += 1;
3888 }
3889 return status;
3890}
3891
4c876616
SP
3892static int be_vfs_mac_query(struct be_adapter *adapter)
3893{
3894 int status, vf;
3895 u8 mac[ETH_ALEN];
3896 struct be_vf_cfg *vf_cfg;
4c876616
SP
3897
3898 for_all_vfs(adapter, vf_cfg, vf) {
b188f090
SR
3899 status = be_cmd_get_active_mac(adapter, vf_cfg->pmac_id,
3900 mac, vf_cfg->if_handle,
3901 false, vf+1);
4c876616
SP
3902 if (status)
3903 return status;
3904 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
3905 }
3906 return 0;
3907}
3908
f9449ab7 3909static void be_vf_clear(struct be_adapter *adapter)
6d87f5c3 3910{
11ac75ed 3911 struct be_vf_cfg *vf_cfg;
6d87f5c3
AK
3912 u32 vf;
3913
257a3feb 3914 if (pci_vfs_assigned(adapter->pdev)) {
4c876616
SP
3915 dev_warn(&adapter->pdev->dev,
3916 "VFs are assigned to VMs: not disabling VFs\n");
39f1d94d
SP
3917 goto done;
3918 }
3919
b4c1df93
SP
3920 pci_disable_sriov(adapter->pdev);
3921
11ac75ed 3922 for_all_vfs(adapter, vf_cfg, vf) {
3175d8c2 3923 if (BEx_chip(adapter))
11ac75ed
SP
3924 be_cmd_pmac_del(adapter, vf_cfg->if_handle,
3925 vf_cfg->pmac_id, vf + 1);
3175d8c2
SP
3926 else
3927 be_cmd_set_mac(adapter, NULL, vf_cfg->if_handle,
3928 vf + 1);
f9449ab7 3929
11ac75ed
SP
3930 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
3931 }
884476be
SK
3932
3933 if (BE3_chip(adapter))
3934 be_cmd_set_hsw_config(adapter, 0, 0,
3935 adapter->if_handle,
3936 PORT_FWD_TYPE_PASSTHRU, 0);
39f1d94d
SP
3937done:
3938 kfree(adapter->vf_cfg);
3939 adapter->num_vfs = 0;
f174c7ec 3940 adapter->flags &= ~BE_FLAGS_SRIOV_ENABLED;
6d87f5c3
AK
3941}
3942
7707133c
SP
3943static void be_clear_queues(struct be_adapter *adapter)
3944{
3945 be_mcc_queues_destroy(adapter);
3946 be_rx_cqs_destroy(adapter);
3947 be_tx_queues_destroy(adapter);
3948 be_evt_queues_destroy(adapter);
3949}
3950
68d7bdcb 3951static void be_cancel_worker(struct be_adapter *adapter)
a54769f5 3952{
191eb756
SP
3953 if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) {
3954 cancel_delayed_work_sync(&adapter->work);
3955 adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED;
3956 }
68d7bdcb
SP
3957}
3958
eb7dd46c
SP
3959static void be_cancel_err_detection(struct be_adapter *adapter)
3960{
710f3e59
SB
3961 struct be_error_recovery *err_rec = &adapter->error_recovery;
3962
3963 if (!be_err_recovery_workq)
3964 return;
3965
eb7dd46c 3966 if (adapter->flags & BE_FLAGS_ERR_DETECTION_SCHEDULED) {
710f3e59 3967 cancel_delayed_work_sync(&err_rec->err_detection_work);
eb7dd46c
SP
3968 adapter->flags &= ~BE_FLAGS_ERR_DETECTION_SCHEDULED;
3969 }
3970}
3971
c9c47142
SP
3972static void be_disable_vxlan_offloads(struct be_adapter *adapter)
3973{
630f4b70
SB
3974 struct net_device *netdev = adapter->netdev;
3975
c9c47142
SP
3976 if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS)
3977 be_cmd_manage_iface(adapter, adapter->if_handle,
3978 OP_CONVERT_TUNNEL_TO_NORMAL);
3979
3980 if (adapter->vxlan_port)
3981 be_cmd_set_vxlan_port(adapter, 0);
3982
3983 adapter->flags &= ~BE_FLAGS_VXLAN_OFFLOADS;
3984 adapter->vxlan_port = 0;
630f4b70
SB
3985
3986 netdev->hw_enc_features = 0;
3987 netdev->hw_features &= ~(NETIF_F_GSO_UDP_TUNNEL);
ac9a3d84 3988 netdev->features &= ~(NETIF_F_GSO_UDP_TUNNEL);
c9c47142
SP
3989}
3990
b9263cbf
SR
3991static void be_calculate_vf_res(struct be_adapter *adapter, u16 num_vfs,
3992 struct be_resources *vft_res)
f2858738
VV
3993{
3994 struct be_resources res = adapter->pool_res;
b9263cbf
SR
3995 u32 vf_if_cap_flags = res.vf_if_cap_flags;
3996 struct be_resources res_mod = {0};
f2858738
VV
3997 u16 num_vf_qs = 1;
3998
de2b1e03
SK
3999 /* Distribute the queue resources among the PF and it's VFs */
4000 if (num_vfs) {
4001 /* Divide the rx queues evenly among the VFs and the PF, capped
4002 * at VF-EQ-count. Any remainder queues belong to the PF.
4003 */
ee9ad280
SB
4004 num_vf_qs = min(SH_VF_MAX_NIC_EQS,
4005 res.max_rss_qs / (num_vfs + 1));
f2858738 4006
de2b1e03
SK
4007 /* Skyhawk-R chip supports only MAX_PORT_RSS_TABLES
4008 * RSS Tables per port. Provide RSS on VFs, only if number of
4009 * VFs requested is less than it's PF Pool's RSS Tables limit.
f2858738 4010 */
de2b1e03 4011 if (num_vfs >= be_max_pf_pool_rss_tables(adapter))
f2858738
VV
4012 num_vf_qs = 1;
4013 }
b9263cbf
SR
4014
4015 /* Resource with fields set to all '1's by GET_PROFILE_CONFIG cmd,
4016 * which are modifiable using SET_PROFILE_CONFIG cmd.
4017 */
de2b1e03
SK
4018 be_cmd_get_profile_config(adapter, &res_mod, NULL, ACTIVE_PROFILE_TYPE,
4019 RESOURCE_MODIFIABLE, 0);
b9263cbf
SR
4020
4021 /* If RSS IFACE capability flags are modifiable for a VF, set the
4022 * capability flag as valid and set RSS and DEFQ_RSS IFACE flags if
4023 * more than 1 RSSQ is available for a VF.
4024 * Otherwise, provision only 1 queue pair for VF.
4025 */
4026 if (res_mod.vf_if_cap_flags & BE_IF_FLAGS_RSS) {
4027 vft_res->flags |= BIT(IF_CAPS_FLAGS_VALID_SHIFT);
4028 if (num_vf_qs > 1) {
4029 vf_if_cap_flags |= BE_IF_FLAGS_RSS;
4030 if (res.if_cap_flags & BE_IF_FLAGS_DEFQ_RSS)
4031 vf_if_cap_flags |= BE_IF_FLAGS_DEFQ_RSS;
4032 } else {
4033 vf_if_cap_flags &= ~(BE_IF_FLAGS_RSS |
4034 BE_IF_FLAGS_DEFQ_RSS);
4035 }
4036 } else {
4037 num_vf_qs = 1;
4038 }
4039
4040 if (res_mod.vf_if_cap_flags & BE_IF_FLAGS_VLAN_PROMISCUOUS) {
4041 vft_res->flags |= BIT(IF_CAPS_FLAGS_VALID_SHIFT);
4042 vf_if_cap_flags &= ~BE_IF_FLAGS_VLAN_PROMISCUOUS;
4043 }
4044
4045 vft_res->vf_if_cap_flags = vf_if_cap_flags;
4046 vft_res->max_rx_qs = num_vf_qs;
4047 vft_res->max_rss_qs = num_vf_qs;
4048 vft_res->max_tx_qs = res.max_tx_qs / (num_vfs + 1);
4049 vft_res->max_cq_count = res.max_cq_count / (num_vfs + 1);
4050
4051 /* Distribute unicast MACs, VLANs, IFACE count and MCCQ count equally
4052 * among the PF and it's VFs, if the fields are changeable
4053 */
4054 if (res_mod.max_uc_mac == FIELD_MODIFIABLE)
4055 vft_res->max_uc_mac = res.max_uc_mac / (num_vfs + 1);
4056
4057 if (res_mod.max_vlans == FIELD_MODIFIABLE)
4058 vft_res->max_vlans = res.max_vlans / (num_vfs + 1);
4059
4060 if (res_mod.max_iface_count == FIELD_MODIFIABLE)
4061 vft_res->max_iface_count = res.max_iface_count / (num_vfs + 1);
4062
4063 if (res_mod.max_mcc_count == FIELD_MODIFIABLE)
4064 vft_res->max_mcc_count = res.max_mcc_count / (num_vfs + 1);
f2858738
VV
4065}
4066
b7172414
SP
4067static void be_if_destroy(struct be_adapter *adapter)
4068{
4069 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
4070
4071 kfree(adapter->pmac_id);
4072 adapter->pmac_id = NULL;
4073
4074 kfree(adapter->mc_list);
4075 adapter->mc_list = NULL;
4076
4077 kfree(adapter->uc_list);
4078 adapter->uc_list = NULL;
4079}
4080
b05004ad
SK
4081static int be_clear(struct be_adapter *adapter)
4082{
f2858738 4083 struct pci_dev *pdev = adapter->pdev;
b9263cbf 4084 struct be_resources vft_res = {0};
f2858738 4085
68d7bdcb 4086 be_cancel_worker(adapter);
191eb756 4087
b7172414
SP
4088 flush_workqueue(be_wq);
4089
11ac75ed 4090 if (sriov_enabled(adapter))
f9449ab7
SP
4091 be_vf_clear(adapter);
4092
bec84e6b
VV
4093 /* Re-configure FW to distribute resources evenly across max-supported
4094 * number of VFs, only when VFs are not already enabled.
4095 */
ace40aff
VV
4096 if (skyhawk_chip(adapter) && be_physfn(adapter) &&
4097 !pci_vfs_assigned(pdev)) {
b9263cbf
SR
4098 be_calculate_vf_res(adapter,
4099 pci_sriov_get_totalvfs(pdev),
4100 &vft_res);
bec84e6b 4101 be_cmd_set_sriov_config(adapter, adapter->pool_res,
f2858738 4102 pci_sriov_get_totalvfs(pdev),
b9263cbf 4103 &vft_res);
f2858738 4104 }
bec84e6b 4105
c9c47142 4106 be_disable_vxlan_offloads(adapter);
fbc13f01 4107
b7172414 4108 be_if_destroy(adapter);
a54769f5 4109
7707133c 4110 be_clear_queues(adapter);
a54769f5 4111
10ef9ab4 4112 be_msix_disable(adapter);
e1ad8e33 4113 adapter->flags &= ~BE_FLAGS_SETUP_DONE;
a54769f5
SP
4114 return 0;
4115}
4116
4c876616 4117static int be_vfs_if_create(struct be_adapter *adapter)
abb93951 4118{
92bf14ab 4119 struct be_resources res = {0};
bcc84140 4120 u32 cap_flags, en_flags, vf;
4c876616 4121 struct be_vf_cfg *vf_cfg;
0700d816 4122 int status;
abb93951 4123
0700d816 4124 /* If a FW profile exists, then cap_flags are updated */
c1bb0a55 4125 cap_flags = BE_VF_IF_EN_FLAGS;
abb93951 4126
4c876616 4127 for_all_vfs(adapter, vf_cfg, vf) {
92bf14ab 4128 if (!BE3_chip(adapter)) {
de2b1e03
SK
4129 status = be_cmd_get_profile_config(adapter, &res, NULL,
4130 ACTIVE_PROFILE_TYPE,
f2858738 4131 RESOURCE_LIMITS,
92bf14ab 4132 vf + 1);
435452aa 4133 if (!status) {
92bf14ab 4134 cap_flags = res.if_cap_flags;
435452aa
VV
4135 /* Prevent VFs from enabling VLAN promiscuous
4136 * mode
4137 */
4138 cap_flags &= ~BE_IF_FLAGS_VLAN_PROMISCUOUS;
4139 }
92bf14ab 4140 }
4c876616 4141
c1bb0a55
VD
4142 /* PF should enable IF flags during proxy if_create call */
4143 en_flags = cap_flags & BE_VF_IF_EN_FLAGS;
bcc84140
KA
4144 status = be_cmd_if_create(adapter, cap_flags, en_flags,
4145 &vf_cfg->if_handle, vf + 1);
4c876616 4146 if (status)
0700d816 4147 return status;
4c876616 4148 }
0700d816
KA
4149
4150 return 0;
abb93951
PR
4151}
4152
39f1d94d 4153static int be_vf_setup_init(struct be_adapter *adapter)
30128031 4154{
11ac75ed 4155 struct be_vf_cfg *vf_cfg;
30128031
SP
4156 int vf;
4157
39f1d94d
SP
4158 adapter->vf_cfg = kcalloc(adapter->num_vfs, sizeof(*vf_cfg),
4159 GFP_KERNEL);
4160 if (!adapter->vf_cfg)
4161 return -ENOMEM;
4162
11ac75ed
SP
4163 for_all_vfs(adapter, vf_cfg, vf) {
4164 vf_cfg->if_handle = -1;
4165 vf_cfg->pmac_id = -1;
30128031 4166 }
39f1d94d 4167 return 0;
30128031
SP
4168}
4169
f9449ab7
SP
4170static int be_vf_setup(struct be_adapter *adapter)
4171{
c502224e 4172 struct device *dev = &adapter->pdev->dev;
11ac75ed 4173 struct be_vf_cfg *vf_cfg;
4c876616 4174 int status, old_vfs, vf;
e7bcbd7b 4175 bool spoofchk;
39f1d94d 4176
257a3feb 4177 old_vfs = pci_num_vf(adapter->pdev);
39f1d94d
SP
4178
4179 status = be_vf_setup_init(adapter);
4180 if (status)
4181 goto err;
30128031 4182
4c876616
SP
4183 if (old_vfs) {
4184 for_all_vfs(adapter, vf_cfg, vf) {
4185 status = be_cmd_get_if_id(adapter, vf_cfg, vf);
4186 if (status)
4187 goto err;
4188 }
f9449ab7 4189
4c876616
SP
4190 status = be_vfs_mac_query(adapter);
4191 if (status)
4192 goto err;
4193 } else {
bec84e6b
VV
4194 status = be_vfs_if_create(adapter);
4195 if (status)
4196 goto err;
4197
39f1d94d
SP
4198 status = be_vf_eth_addr_config(adapter);
4199 if (status)
4200 goto err;
4201 }
f9449ab7 4202
11ac75ed 4203 for_all_vfs(adapter, vf_cfg, vf) {
04a06028 4204 /* Allow VFs to programs MAC/VLAN filters */
435452aa
VV
4205 status = be_cmd_get_fn_privileges(adapter, &vf_cfg->privileges,
4206 vf + 1);
4207 if (!status && !(vf_cfg->privileges & BE_PRIV_FILTMGMT)) {
04a06028 4208 status = be_cmd_set_fn_privileges(adapter,
435452aa 4209 vf_cfg->privileges |
04a06028
SP
4210 BE_PRIV_FILTMGMT,
4211 vf + 1);
435452aa
VV
4212 if (!status) {
4213 vf_cfg->privileges |= BE_PRIV_FILTMGMT;
04a06028
SP
4214 dev_info(dev, "VF%d has FILTMGMT privilege\n",
4215 vf);
435452aa 4216 }
04a06028
SP
4217 }
4218
0f77ba73
RN
4219 /* Allow full available bandwidth */
4220 if (!old_vfs)
4221 be_cmd_config_qos(adapter, 0, 0, vf + 1);
f1f3ee1b 4222
e7bcbd7b
KA
4223 status = be_cmd_get_hsw_config(adapter, NULL, vf + 1,
4224 vf_cfg->if_handle, NULL,
4225 &spoofchk);
4226 if (!status)
4227 vf_cfg->spoofchk = spoofchk;
4228
bdce2ad7 4229 if (!old_vfs) {
0599863d 4230 be_cmd_enable_vf(adapter, vf + 1);
bdce2ad7
SR
4231 be_cmd_set_logical_link_config(adapter,
4232 IFLA_VF_LINK_STATE_AUTO,
4233 vf+1);
4234 }
f9449ab7 4235 }
b4c1df93
SP
4236
4237 if (!old_vfs) {
4238 status = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
4239 if (status) {
4240 dev_err(dev, "SRIOV enable failed\n");
4241 adapter->num_vfs = 0;
4242 goto err;
4243 }
4244 }
f174c7ec 4245
884476be
SK
4246 if (BE3_chip(adapter)) {
4247 /* On BE3, enable VEB only when SRIOV is enabled */
4248 status = be_cmd_set_hsw_config(adapter, 0, 0,
4249 adapter->if_handle,
4250 PORT_FWD_TYPE_VEB, 0);
4251 if (status)
4252 goto err;
4253 }
4254
f174c7ec 4255 adapter->flags |= BE_FLAGS_SRIOV_ENABLED;
f9449ab7
SP
4256 return 0;
4257err:
4c876616
SP
4258 dev_err(dev, "VF setup failed\n");
4259 be_vf_clear(adapter);
f9449ab7
SP
4260 return status;
4261}
4262
f93f160b
VV
4263/* Converting function_mode bits on BE3 to SH mc_type enums */
4264
4265static u8 be_convert_mc_type(u32 function_mode)
4266{
66064dbc 4267 if (function_mode & VNIC_MODE && function_mode & QNQ_MODE)
f93f160b 4268 return vNIC1;
66064dbc 4269 else if (function_mode & QNQ_MODE)
f93f160b
VV
4270 return FLEX10;
4271 else if (function_mode & VNIC_MODE)
4272 return vNIC2;
4273 else if (function_mode & UMC_ENABLED)
4274 return UMC;
4275 else
4276 return MC_NONE;
4277}
4278
92bf14ab
SP
4279/* On BE2/BE3 FW does not suggest the supported limits */
4280static void BEx_get_resources(struct be_adapter *adapter,
4281 struct be_resources *res)
4282{
bec84e6b 4283 bool use_sriov = adapter->num_vfs ? 1 : 0;
92bf14ab
SP
4284
4285 if (be_physfn(adapter))
4286 res->max_uc_mac = BE_UC_PMAC_COUNT;
4287 else
4288 res->max_uc_mac = BE_VF_UC_PMAC_COUNT;
4289
f93f160b
VV
4290 adapter->mc_type = be_convert_mc_type(adapter->function_mode);
4291
4292 if (be_is_mc(adapter)) {
4293 /* Assuming that there are 4 channels per port,
4294 * when multi-channel is enabled
4295 */
4296 if (be_is_qnq_mode(adapter))
4297 res->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
4298 else
4299 /* In a non-qnq multichannel mode, the pvid
4300 * takes up one vlan entry
4301 */
4302 res->max_vlans = (BE_NUM_VLANS_SUPPORTED / 4) - 1;
4303 } else {
92bf14ab 4304 res->max_vlans = BE_NUM_VLANS_SUPPORTED;
f93f160b
VV
4305 }
4306
92bf14ab
SP
4307 res->max_mcast_mac = BE_MAX_MC;
4308
a5243dab
VV
4309 /* 1) For BE3 1Gb ports, FW does not support multiple TXQs
4310 * 2) Create multiple TX rings on a BE3-R multi-channel interface
4311 * *only* if it is RSS-capable.
4312 */
4313 if (BE2_chip(adapter) || use_sriov || (adapter->port_num > 1) ||
18c57c74
KA
4314 be_virtfn(adapter) ||
4315 (be_is_mc(adapter) &&
4316 !(adapter->function_caps & BE_FUNCTION_CAPS_RSS))) {
92bf14ab 4317 res->max_tx_qs = 1;
a28277dc
SR
4318 } else if (adapter->function_caps & BE_FUNCTION_CAPS_SUPER_NIC) {
4319 struct be_resources super_nic_res = {0};
4320
4321 /* On a SuperNIC profile, the driver needs to use the
4322 * GET_PROFILE_CONFIG cmd to query the per-function TXQ limits
4323 */
de2b1e03
SK
4324 be_cmd_get_profile_config(adapter, &super_nic_res, NULL,
4325 ACTIVE_PROFILE_TYPE, RESOURCE_LIMITS,
4326 0);
a28277dc
SR
4327 /* Some old versions of BE3 FW don't report max_tx_qs value */
4328 res->max_tx_qs = super_nic_res.max_tx_qs ? : BE3_MAX_TX_QS;
4329 } else {
92bf14ab 4330 res->max_tx_qs = BE3_MAX_TX_QS;
a28277dc 4331 }
92bf14ab
SP
4332
4333 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
4334 !use_sriov && be_physfn(adapter))
4335 res->max_rss_qs = (adapter->be3_native) ?
4336 BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
4337 res->max_rx_qs = res->max_rss_qs + 1;
4338
e3dc867c 4339 if (be_physfn(adapter))
d3518e21 4340 res->max_evt_qs = (be_max_vfs(adapter) > 0) ?
e3dc867c
SR
4341 BE3_SRIOV_MAX_EVT_QS : BE3_MAX_EVT_QS;
4342 else
4343 res->max_evt_qs = 1;
92bf14ab
SP
4344
4345 res->if_cap_flags = BE_IF_CAP_FLAGS_WANT;
71bb8bd0 4346 res->if_cap_flags &= ~BE_IF_FLAGS_DEFQ_RSS;
92bf14ab
SP
4347 if (!(adapter->function_caps & BE_FUNCTION_CAPS_RSS))
4348 res->if_cap_flags &= ~BE_IF_FLAGS_RSS;
4349}
4350
30128031
SP
4351static void be_setup_init(struct be_adapter *adapter)
4352{
4353 adapter->vlan_prio_bmap = 0xff;
42f11cf2 4354 adapter->phy.link_speed = -1;
30128031
SP
4355 adapter->if_handle = -1;
4356 adapter->be3_native = false;
f66b7cfd 4357 adapter->if_flags = 0;
51d1f98a 4358 adapter->phy_state = BE_UNKNOWN_PHY_STATE;
f25b119c
PR
4359 if (be_physfn(adapter))
4360 adapter->cmd_privileges = MAX_PRIVILEGES;
4361 else
4362 adapter->cmd_privileges = MIN_PRIVILEGES;
30128031
SP
4363}
4364
de2b1e03
SK
4365/* HW supports only MAX_PORT_RSS_TABLES RSS Policy Tables per port.
4366 * However, this HW limitation is not exposed to the host via any SLI cmd.
4367 * As a result, in the case of SRIOV and in particular multi-partition configs
4368 * the driver needs to calcuate a proportional share of RSS Tables per PF-pool
4369 * for distribution between the VFs. This self-imposed limit will determine the
4370 * no: of VFs for which RSS can be enabled.
4371 */
d766e7e6 4372static void be_calculate_pf_pool_rss_tables(struct be_adapter *adapter)
de2b1e03
SK
4373{
4374 struct be_port_resources port_res = {0};
4375 u8 rss_tables_on_port;
4376 u16 max_vfs = be_max_vfs(adapter);
4377
4378 be_cmd_get_profile_config(adapter, NULL, &port_res, SAVED_PROFILE_TYPE,
4379 RESOURCE_LIMITS, 0);
4380
4381 rss_tables_on_port = MAX_PORT_RSS_TABLES - port_res.nic_pfs;
4382
4383 /* Each PF Pool's RSS Tables limit =
4384 * PF's Max VFs / Total_Max_VFs on Port * RSS Tables on Port
4385 */
4386 adapter->pool_res.max_rss_tables =
4387 max_vfs * rss_tables_on_port / port_res.max_vfs;
4388}
4389
bec84e6b
VV
4390static int be_get_sriov_config(struct be_adapter *adapter)
4391{
bec84e6b 4392 struct be_resources res = {0};
d3d18312 4393 int max_vfs, old_vfs;
bec84e6b 4394
de2b1e03
SK
4395 be_cmd_get_profile_config(adapter, &res, NULL, ACTIVE_PROFILE_TYPE,
4396 RESOURCE_LIMITS, 0);
d3d18312 4397
ace40aff 4398 /* Some old versions of BE3 FW don't report max_vfs value */
bec84e6b
VV
4399 if (BE3_chip(adapter) && !res.max_vfs) {
4400 max_vfs = pci_sriov_get_totalvfs(adapter->pdev);
4401 res.max_vfs = max_vfs > 0 ? min(MAX_VFS, max_vfs) : 0;
4402 }
4403
d3d18312 4404 adapter->pool_res = res;
bec84e6b 4405
ace40aff
VV
4406 /* If during previous unload of the driver, the VFs were not disabled,
4407 * then we cannot rely on the PF POOL limits for the TotalVFs value.
4408 * Instead use the TotalVFs value stored in the pci-dev struct.
4409 */
bec84e6b
VV
4410 old_vfs = pci_num_vf(adapter->pdev);
4411 if (old_vfs) {
ace40aff
VV
4412 dev_info(&adapter->pdev->dev, "%d VFs are already enabled\n",
4413 old_vfs);
4414
4415 adapter->pool_res.max_vfs =
4416 pci_sriov_get_totalvfs(adapter->pdev);
bec84e6b 4417 adapter->num_vfs = old_vfs;
bec84e6b
VV
4418 }
4419
de2b1e03
SK
4420 if (skyhawk_chip(adapter) && be_max_vfs(adapter) && !old_vfs) {
4421 be_calculate_pf_pool_rss_tables(adapter);
4422 dev_info(&adapter->pdev->dev,
4423 "RSS can be enabled for all VFs if num_vfs <= %d\n",
4424 be_max_pf_pool_rss_tables(adapter));
4425 }
bec84e6b
VV
4426 return 0;
4427}
4428
ace40aff
VV
4429static void be_alloc_sriov_res(struct be_adapter *adapter)
4430{
4431 int old_vfs = pci_num_vf(adapter->pdev);
b9263cbf 4432 struct be_resources vft_res = {0};
ace40aff
VV
4433 int status;
4434
4435 be_get_sriov_config(adapter);
4436
4437 if (!old_vfs)
4438 pci_sriov_set_totalvfs(adapter->pdev, be_max_vfs(adapter));
4439
4440 /* When the HW is in SRIOV capable configuration, the PF-pool
4441 * resources are given to PF during driver load, if there are no
4442 * old VFs. This facility is not available in BE3 FW.
4443 * Also, this is done by FW in Lancer chip.
4444 */
4445 if (skyhawk_chip(adapter) && be_max_vfs(adapter) && !old_vfs) {
b9263cbf 4446 be_calculate_vf_res(adapter, 0, &vft_res);
ace40aff 4447 status = be_cmd_set_sriov_config(adapter, adapter->pool_res, 0,
b9263cbf 4448 &vft_res);
ace40aff
VV
4449 if (status)
4450 dev_err(&adapter->pdev->dev,
4451 "Failed to optimize SRIOV resources\n");
4452 }
4453}
4454
92bf14ab 4455static int be_get_resources(struct be_adapter *adapter)
abb93951 4456{
92bf14ab
SP
4457 struct device *dev = &adapter->pdev->dev;
4458 struct be_resources res = {0};
4459 int status;
abb93951 4460
92bf14ab
SP
4461 /* For Lancer, SH etc read per-function resource limits from FW.
4462 * GET_FUNC_CONFIG returns per function guaranteed limits.
4463 * GET_PROFILE_CONFIG returns PCI-E related limits PF-pool limits
4464 */
ce7faf0a
SP
4465 if (BEx_chip(adapter)) {
4466 BEx_get_resources(adapter, &res);
4467 } else {
92bf14ab
SP
4468 status = be_cmd_get_func_config(adapter, &res);
4469 if (status)
4470 return status;
abb93951 4471
71bb8bd0
VV
4472 /* If a deafault RXQ must be created, we'll use up one RSSQ*/
4473 if (res.max_rss_qs && res.max_rss_qs == res.max_rx_qs &&
4474 !(res.if_cap_flags & BE_IF_FLAGS_DEFQ_RSS))
4475 res.max_rss_qs -= 1;
abb93951 4476 }
4c876616 4477
ce7faf0a
SP
4478 /* If RoCE is supported stash away half the EQs for RoCE */
4479 res.max_nic_evt_qs = be_roce_supported(adapter) ?
4480 res.max_evt_qs / 2 : res.max_evt_qs;
4481 adapter->res = res;
4482
71bb8bd0
VV
4483 /* If FW supports RSS default queue, then skip creating non-RSS
4484 * queue for non-IP traffic.
4485 */
4486 adapter->need_def_rxq = (be_if_cap_flags(adapter) &
4487 BE_IF_FLAGS_DEFQ_RSS) ? 0 : 1;
4488
acbafeb1
SP
4489 dev_info(dev, "Max: txqs %d, rxqs %d, rss %d, eqs %d, vfs %d\n",
4490 be_max_txqs(adapter), be_max_rxqs(adapter),
ce7faf0a 4491 be_max_rss(adapter), be_max_nic_eqs(adapter),
acbafeb1
SP
4492 be_max_vfs(adapter));
4493 dev_info(dev, "Max: uc-macs %d, mc-macs %d, vlans %d\n",
4494 be_max_uc(adapter), be_max_mc(adapter),
4495 be_max_vlans(adapter));
4496
e261768e
SP
4497 /* Ensure RX and TX queues are created in pairs at init time */
4498 adapter->cfg_num_rx_irqs =
4499 min_t(u16, netif_get_num_default_rss_queues(),
4500 be_max_qp_irqs(adapter));
4501 adapter->cfg_num_tx_irqs = adapter->cfg_num_rx_irqs;
92bf14ab 4502 return 0;
abb93951
PR
4503}
4504
39f1d94d
SP
4505static int be_get_config(struct be_adapter *adapter)
4506{
6b085ba9 4507 int status, level;
542963b7 4508 u16 profile_id;
6b085ba9 4509
980df249
SR
4510 status = be_cmd_get_cntl_attributes(adapter);
4511 if (status)
4512 return status;
4513
e97e3cda 4514 status = be_cmd_query_fw_cfg(adapter);
abb93951 4515 if (status)
92bf14ab 4516 return status;
abb93951 4517
fd7ff6f0
VD
4518 if (!lancer_chip(adapter) && be_physfn(adapter))
4519 be_cmd_get_fat_dump_len(adapter, &adapter->fat_dump_len);
4520
6b085ba9
SP
4521 if (BEx_chip(adapter)) {
4522 level = be_cmd_get_fw_log_level(adapter);
4523 adapter->msg_enable =
4524 level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0;
4525 }
4526
4527 be_cmd_get_acpi_wol_cap(adapter);
45f13df7
SB
4528 pci_enable_wake(adapter->pdev, PCI_D3hot, adapter->wol_en);
4529 pci_enable_wake(adapter->pdev, PCI_D3cold, adapter->wol_en);
6b085ba9 4530
21252377
VV
4531 be_cmd_query_port_name(adapter);
4532
4533 if (be_physfn(adapter)) {
542963b7
VV
4534 status = be_cmd_get_active_profile(adapter, &profile_id);
4535 if (!status)
4536 dev_info(&adapter->pdev->dev,
4537 "Using profile 0x%x\n", profile_id);
962bcb75 4538 }
bec84e6b 4539
92bf14ab 4540 return 0;
39f1d94d
SP
4541}
4542
95046b92
SP
4543static int be_mac_setup(struct be_adapter *adapter)
4544{
4545 u8 mac[ETH_ALEN];
4546 int status;
4547
4548 if (is_zero_ether_addr(adapter->netdev->dev_addr)) {
4549 status = be_cmd_get_perm_mac(adapter, mac);
4550 if (status)
4551 return status;
4552
4553 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
4554 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
95046b92
SP
4555 }
4556
95046b92
SP
4557 return 0;
4558}
4559
68d7bdcb
SP
4560static void be_schedule_worker(struct be_adapter *adapter)
4561{
b7172414 4562 queue_delayed_work(be_wq, &adapter->work, msecs_to_jiffies(1000));
68d7bdcb
SP
4563 adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
4564}
4565
710f3e59
SB
4566static void be_destroy_err_recovery_workq(void)
4567{
4568 if (!be_err_recovery_workq)
4569 return;
4570
4571 flush_workqueue(be_err_recovery_workq);
4572 destroy_workqueue(be_err_recovery_workq);
4573 be_err_recovery_workq = NULL;
4574}
4575
972f37b4 4576static void be_schedule_err_detection(struct be_adapter *adapter, u32 delay)
eb7dd46c 4577{
710f3e59
SB
4578 struct be_error_recovery *err_rec = &adapter->error_recovery;
4579
4580 if (!be_err_recovery_workq)
4581 return;
4582
4583 queue_delayed_work(be_err_recovery_workq, &err_rec->err_detection_work,
4584 msecs_to_jiffies(delay));
eb7dd46c
SP
4585 adapter->flags |= BE_FLAGS_ERR_DETECTION_SCHEDULED;
4586}
4587
7707133c 4588static int be_setup_queues(struct be_adapter *adapter)
5fb379ee 4589{
68d7bdcb 4590 struct net_device *netdev = adapter->netdev;
10ef9ab4 4591 int status;
ba343c77 4592
7707133c 4593 status = be_evt_queues_create(adapter);
abb93951
PR
4594 if (status)
4595 goto err;
73d540f2 4596
7707133c 4597 status = be_tx_qs_create(adapter);
c2bba3df
SK
4598 if (status)
4599 goto err;
10ef9ab4 4600
7707133c 4601 status = be_rx_cqs_create(adapter);
10ef9ab4 4602 if (status)
a54769f5 4603 goto err;
6b7c5b94 4604
7707133c 4605 status = be_mcc_queues_create(adapter);
10ef9ab4
SP
4606 if (status)
4607 goto err;
4608
68d7bdcb
SP
4609 status = netif_set_real_num_rx_queues(netdev, adapter->num_rx_qs);
4610 if (status)
4611 goto err;
4612
4613 status = netif_set_real_num_tx_queues(netdev, adapter->num_tx_qs);
4614 if (status)
4615 goto err;
4616
7707133c
SP
4617 return 0;
4618err:
4619 dev_err(&adapter->pdev->dev, "queue_setup failed\n");
4620 return status;
4621}
4622
62219066
AK
4623static int be_if_create(struct be_adapter *adapter)
4624{
4625 u32 en_flags = BE_IF_FLAGS_RSS | BE_IF_FLAGS_DEFQ_RSS;
4626 u32 cap_flags = be_if_cap_flags(adapter);
4627 int status;
4628
b7172414
SP
4629 /* alloc required memory for other filtering fields */
4630 adapter->pmac_id = kcalloc(be_max_uc(adapter),
4631 sizeof(*adapter->pmac_id), GFP_KERNEL);
4632 if (!adapter->pmac_id)
4633 return -ENOMEM;
4634
4635 adapter->mc_list = kcalloc(be_max_mc(adapter),
4636 sizeof(*adapter->mc_list), GFP_KERNEL);
4637 if (!adapter->mc_list)
4638 return -ENOMEM;
4639
4640 adapter->uc_list = kcalloc(be_max_uc(adapter),
4641 sizeof(*adapter->uc_list), GFP_KERNEL);
4642 if (!adapter->uc_list)
4643 return -ENOMEM;
4644
e261768e 4645 if (adapter->cfg_num_rx_irqs == 1)
62219066
AK
4646 cap_flags &= ~(BE_IF_FLAGS_DEFQ_RSS | BE_IF_FLAGS_RSS);
4647
4648 en_flags &= cap_flags;
4649 /* will enable all the needed filter flags in be_open() */
4650 status = be_cmd_if_create(adapter, be_if_cap_flags(adapter), en_flags,
4651 &adapter->if_handle, 0);
4652
b7172414
SP
4653 if (status)
4654 return status;
4655
4656 return 0;
62219066
AK
4657}
4658
68d7bdcb
SP
4659int be_update_queues(struct be_adapter *adapter)
4660{
4661 struct net_device *netdev = adapter->netdev;
4662 int status;
4663
4664 if (netif_running(netdev))
4665 be_close(netdev);
4666
4667 be_cancel_worker(adapter);
4668
4669 /* If any vectors have been shared with RoCE we cannot re-program
4670 * the MSIx table.
4671 */
4672 if (!adapter->num_msix_roce_vec)
4673 be_msix_disable(adapter);
4674
4675 be_clear_queues(adapter);
62219066
AK
4676 status = be_cmd_if_destroy(adapter, adapter->if_handle, 0);
4677 if (status)
4678 return status;
68d7bdcb
SP
4679
4680 if (!msix_enabled(adapter)) {
4681 status = be_msix_enable(adapter);
4682 if (status)
4683 return status;
4684 }
4685
62219066
AK
4686 status = be_if_create(adapter);
4687 if (status)
4688 return status;
4689
68d7bdcb
SP
4690 status = be_setup_queues(adapter);
4691 if (status)
4692 return status;
4693
4694 be_schedule_worker(adapter);
4695
4696 if (netif_running(netdev))
4697 status = be_open(netdev);
4698
4699 return status;
4700}
4701
f7062ee5
SP
4702static inline int fw_major_num(const char *fw_ver)
4703{
4704 int fw_major = 0, i;
4705
4706 i = sscanf(fw_ver, "%d.", &fw_major);
4707 if (i != 1)
4708 return 0;
4709
4710 return fw_major;
4711}
4712
710f3e59
SB
4713/* If it is error recovery, FLR the PF
4714 * Else if any VFs are already enabled don't FLR the PF
4715 */
f962f840
SP
4716static bool be_reset_required(struct be_adapter *adapter)
4717{
710f3e59
SB
4718 if (be_error_recovering(adapter))
4719 return true;
4720 else
4721 return pci_num_vf(adapter->pdev) == 0;
f962f840
SP
4722}
4723
4724/* Wait for the FW to be ready and perform the required initialization */
4725static int be_func_init(struct be_adapter *adapter)
4726{
4727 int status;
4728
4729 status = be_fw_wait_ready(adapter);
4730 if (status)
4731 return status;
4732
710f3e59
SB
4733 /* FW is now ready; clear errors to allow cmds/doorbell */
4734 be_clear_error(adapter, BE_CLEAR_ALL);
4735
f962f840
SP
4736 if (be_reset_required(adapter)) {
4737 status = be_cmd_reset_function(adapter);
4738 if (status)
4739 return status;
4740
4741 /* Wait for interrupts to quiesce after an FLR */
4742 msleep(100);
f962f840
SP
4743 }
4744
4745 /* Tell FW we're ready to fire cmds */
4746 status = be_cmd_fw_init(adapter);
4747 if (status)
4748 return status;
4749
4750 /* Allow interrupts for other ULPs running on NIC function */
4751 be_intr_set(adapter, true);
4752
4753 return 0;
4754}
4755
7707133c
SP
4756static int be_setup(struct be_adapter *adapter)
4757{
4758 struct device *dev = &adapter->pdev->dev;
7707133c
SP
4759 int status;
4760
f962f840
SP
4761 status = be_func_init(adapter);
4762 if (status)
4763 return status;
4764
7707133c
SP
4765 be_setup_init(adapter);
4766
4767 if (!lancer_chip(adapter))
4768 be_cmd_req_native_mode(adapter);
4769
980df249
SR
4770 /* invoke this cmd first to get pf_num and vf_num which are needed
4771 * for issuing profile related cmds
4772 */
4773 if (!BEx_chip(adapter)) {
4774 status = be_cmd_get_func_config(adapter, NULL);
4775 if (status)
4776 return status;
4777 }
72ef3a88 4778
de2b1e03
SK
4779 status = be_get_config(adapter);
4780 if (status)
4781 goto err;
4782
ace40aff
VV
4783 if (!BE2_chip(adapter) && be_physfn(adapter))
4784 be_alloc_sriov_res(adapter);
4785
de2b1e03 4786 status = be_get_resources(adapter);
10ef9ab4 4787 if (status)
a54769f5 4788 goto err;
6b7c5b94 4789
7707133c 4790 status = be_msix_enable(adapter);
10ef9ab4 4791 if (status)
a54769f5 4792 goto err;
6b7c5b94 4793
bcc84140 4794 /* will enable all the needed filter flags in be_open() */
62219066 4795 status = be_if_create(adapter);
7707133c 4796 if (status)
a54769f5 4797 goto err;
6b7c5b94 4798
68d7bdcb
SP
4799 /* Updating real_num_tx/rx_queues() requires rtnl_lock() */
4800 rtnl_lock();
7707133c 4801 status = be_setup_queues(adapter);
68d7bdcb 4802 rtnl_unlock();
95046b92 4803 if (status)
1578e777
PR
4804 goto err;
4805
7707133c 4806 be_cmd_get_fn_privileges(adapter, &adapter->cmd_privileges, 0);
7707133c
SP
4807
4808 status = be_mac_setup(adapter);
10ef9ab4
SP
4809 if (status)
4810 goto err;
4811
e97e3cda 4812 be_cmd_get_fw_ver(adapter);
acbafeb1 4813 dev_info(dev, "FW version is %s\n", adapter->fw_ver);
5a56eb10 4814
e9e2a904 4815 if (BE2_chip(adapter) && fw_major_num(adapter->fw_ver) < 4) {
50762667 4816 dev_err(dev, "Firmware on card is old(%s), IRQs may not work",
e9e2a904
SK
4817 adapter->fw_ver);
4818 dev_err(dev, "Please upgrade firmware to version >= 4.0\n");
4819 }
4820
00d594c3
KA
4821 status = be_cmd_set_flow_control(adapter, adapter->tx_fc,
4822 adapter->rx_fc);
4823 if (status)
4824 be_cmd_get_flow_control(adapter, &adapter->tx_fc,
4825 &adapter->rx_fc);
590c391d 4826
00d594c3
KA
4827 dev_info(&adapter->pdev->dev, "HW Flow control - TX:%d RX:%d\n",
4828 adapter->tx_fc, adapter->rx_fc);
2dc1deb6 4829
bdce2ad7
SR
4830 if (be_physfn(adapter))
4831 be_cmd_set_logical_link_config(adapter,
4832 IFLA_VF_LINK_STATE_AUTO, 0);
4833
884476be
SK
4834 /* BE3 EVB echoes broadcast/multicast packets back to PF's vport
4835 * confusing a linux bridge or OVS that it might be connected to.
4836 * Set the EVB to PASSTHRU mode which effectively disables the EVB
4837 * when SRIOV is not enabled.
4838 */
4839 if (BE3_chip(adapter))
4840 be_cmd_set_hsw_config(adapter, 0, 0, adapter->if_handle,
4841 PORT_FWD_TYPE_PASSTHRU, 0);
4842
bec84e6b
VV
4843 if (adapter->num_vfs)
4844 be_vf_setup(adapter);
f9449ab7 4845
f25b119c
PR
4846 status = be_cmd_get_phy_info(adapter);
4847 if (!status && be_pause_supported(adapter))
42f11cf2
AK
4848 adapter->phy.fc_autoneg = 1;
4849
710f3e59
SB
4850 if (be_physfn(adapter) && !lancer_chip(adapter))
4851 be_cmd_set_features(adapter);
4852
68d7bdcb 4853 be_schedule_worker(adapter);
e1ad8e33 4854 adapter->flags |= BE_FLAGS_SETUP_DONE;
f9449ab7 4855 return 0;
a54769f5
SP
4856err:
4857 be_clear(adapter);
4858 return status;
4859}
6b7c5b94 4860
66268739
IV
4861#ifdef CONFIG_NET_POLL_CONTROLLER
4862static void be_netpoll(struct net_device *netdev)
4863{
4864 struct be_adapter *adapter = netdev_priv(netdev);
10ef9ab4 4865 struct be_eq_obj *eqo;
66268739
IV
4866 int i;
4867
e49cc34f 4868 for_all_evt_queues(adapter, eqo, i) {
20947770 4869 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0, 0);
e49cc34f
SP
4870 napi_schedule(&eqo->napi);
4871 }
66268739
IV
4872}
4873#endif
4874
485bf569
SN
4875int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
4876{
4877 const struct firmware *fw;
4878 int status;
4879
4880 if (!netif_running(adapter->netdev)) {
4881 dev_err(&adapter->pdev->dev,
4882 "Firmware load not allowed (interface is down)\n");
940a3fcd 4883 return -ENETDOWN;
485bf569
SN
4884 }
4885
4886 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
4887 if (status)
4888 goto fw_exit;
4889
4890 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
4891
4892 if (lancer_chip(adapter))
4893 status = lancer_fw_download(adapter, fw);
4894 else
4895 status = be_fw_download(adapter, fw);
4896
eeb65ced 4897 if (!status)
e97e3cda 4898 be_cmd_get_fw_ver(adapter);
eeb65ced 4899
84517482
AK
4900fw_exit:
4901 release_firmware(fw);
4902 return status;
4903}
4904
add511b3
RP
4905static int be_ndo_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
4906 u16 flags)
a77dcb8c
AK
4907{
4908 struct be_adapter *adapter = netdev_priv(dev);
4909 struct nlattr *attr, *br_spec;
4910 int rem;
4911 int status = 0;
4912 u16 mode = 0;
4913
4914 if (!sriov_enabled(adapter))
4915 return -EOPNOTSUPP;
4916
4917 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
4ea85e83
TG
4918 if (!br_spec)
4919 return -EINVAL;
a77dcb8c
AK
4920
4921 nla_for_each_nested(attr, br_spec, rem) {
4922 if (nla_type(attr) != IFLA_BRIDGE_MODE)
4923 continue;
4924
b7c1a314
TG
4925 if (nla_len(attr) < sizeof(mode))
4926 return -EINVAL;
4927
a77dcb8c 4928 mode = nla_get_u16(attr);
ac0f5fba
SR
4929 if (BE3_chip(adapter) && mode == BRIDGE_MODE_VEPA)
4930 return -EOPNOTSUPP;
4931
a77dcb8c
AK
4932 if (mode != BRIDGE_MODE_VEPA && mode != BRIDGE_MODE_VEB)
4933 return -EINVAL;
4934
4935 status = be_cmd_set_hsw_config(adapter, 0, 0,
4936 adapter->if_handle,
4937 mode == BRIDGE_MODE_VEPA ?
4938 PORT_FWD_TYPE_VEPA :
e7bcbd7b 4939 PORT_FWD_TYPE_VEB, 0);
a77dcb8c
AK
4940 if (status)
4941 goto err;
4942
4943 dev_info(&adapter->pdev->dev, "enabled switch mode: %s\n",
4944 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
4945
4946 return status;
4947 }
4948err:
4949 dev_err(&adapter->pdev->dev, "Failed to set switch mode %s\n",
4950 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
4951
4952 return status;
4953}
4954
4955static int be_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
46c264da
ND
4956 struct net_device *dev, u32 filter_mask,
4957 int nlflags)
a77dcb8c
AK
4958{
4959 struct be_adapter *adapter = netdev_priv(dev);
4960 int status = 0;
4961 u8 hsw_mode;
4962
a77dcb8c
AK
4963 /* BE and Lancer chips support VEB mode only */
4964 if (BEx_chip(adapter) || lancer_chip(adapter)) {
8431706b
IV
4965 /* VEB is disabled in non-SR-IOV profiles on BE3/Lancer */
4966 if (!pci_sriov_get_totalvfs(adapter->pdev))
4967 return 0;
a77dcb8c
AK
4968 hsw_mode = PORT_FWD_TYPE_VEB;
4969 } else {
4970 status = be_cmd_get_hsw_config(adapter, NULL, 0,
e7bcbd7b
KA
4971 adapter->if_handle, &hsw_mode,
4972 NULL);
a77dcb8c
AK
4973 if (status)
4974 return 0;
ff9ed19d
KP
4975
4976 if (hsw_mode == PORT_FWD_TYPE_PASSTHRU)
4977 return 0;
a77dcb8c
AK
4978 }
4979
4980 return ndo_dflt_bridge_getlink(skb, pid, seq, dev,
4981 hsw_mode == PORT_FWD_TYPE_VEPA ?
2c3c031c 4982 BRIDGE_MODE_VEPA : BRIDGE_MODE_VEB,
7d4f8d87 4983 0, 0, nlflags, filter_mask, NULL);
a77dcb8c
AK
4984}
4985
b7172414
SP
4986static struct be_cmd_work *be_alloc_work(struct be_adapter *adapter,
4987 void (*func)(struct work_struct *))
4988{
4989 struct be_cmd_work *work;
4990
4991 work = kzalloc(sizeof(*work), GFP_ATOMIC);
4992 if (!work) {
4993 dev_err(&adapter->pdev->dev,
4994 "be_work memory allocation failed\n");
4995 return NULL;
4996 }
4997
4998 INIT_WORK(&work->work, func);
4999 work->adapter = adapter;
5000 return work;
5001}
5002
630f4b70
SB
5003/* VxLAN offload Notes:
5004 *
5005 * The stack defines tunnel offload flags (hw_enc_features) for IP and doesn't
5006 * distinguish various types of transports (VxLAN, GRE, NVGRE ..). So, offload
5007 * is expected to work across all types of IP tunnels once exported. Skyhawk
5008 * supports offloads for either VxLAN or NVGRE, exclusively. So we export VxLAN
16dde0d6
SB
5009 * offloads in hw_enc_features only when a VxLAN port is added. If other (non
5010 * VxLAN) tunnels are configured while VxLAN offloads are enabled, offloads for
5011 * those other tunnels are unexported on the fly through ndo_features_check().
630f4b70
SB
5012 *
5013 * Skyhawk supports VxLAN offloads only for one UDP dport. So, if the stack
5014 * adds more than one port, disable offloads and don't re-enable them again
5015 * until after all the tunnels are removed.
5016 */
b7172414 5017static void be_work_add_vxlan_port(struct work_struct *work)
c9c47142 5018{
b7172414
SP
5019 struct be_cmd_work *cmd_work =
5020 container_of(work, struct be_cmd_work, work);
5021 struct be_adapter *adapter = cmd_work->adapter;
5022 struct net_device *netdev = adapter->netdev;
c9c47142 5023 struct device *dev = &adapter->pdev->dev;
b7172414 5024 __be16 port = cmd_work->info.vxlan_port;
c9c47142
SP
5025 int status;
5026
1e5b311a
JB
5027 if (adapter->vxlan_port == port && adapter->vxlan_port_count) {
5028 adapter->vxlan_port_aliases++;
b7172414 5029 goto done;
1e5b311a
JB
5030 }
5031
c9c47142 5032 if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS) {
c9c47142
SP
5033 dev_info(dev,
5034 "Only one UDP port supported for VxLAN offloads\n");
630f4b70
SB
5035 dev_info(dev, "Disabling VxLAN offloads\n");
5036 adapter->vxlan_port_count++;
5037 goto err;
c9c47142
SP
5038 }
5039
630f4b70 5040 if (adapter->vxlan_port_count++ >= 1)
b7172414 5041 goto done;
630f4b70 5042
c9c47142
SP
5043 status = be_cmd_manage_iface(adapter, adapter->if_handle,
5044 OP_CONVERT_NORMAL_TO_TUNNEL);
5045 if (status) {
5046 dev_warn(dev, "Failed to convert normal interface to tunnel\n");
5047 goto err;
5048 }
5049
5050 status = be_cmd_set_vxlan_port(adapter, port);
5051 if (status) {
5052 dev_warn(dev, "Failed to add VxLAN port\n");
5053 goto err;
5054 }
5055 adapter->flags |= BE_FLAGS_VXLAN_OFFLOADS;
5056 adapter->vxlan_port = port;
5057
630f4b70
SB
5058 netdev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
5059 NETIF_F_TSO | NETIF_F_TSO6 |
5060 NETIF_F_GSO_UDP_TUNNEL;
5061 netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL;
ac9a3d84 5062 netdev->features |= NETIF_F_GSO_UDP_TUNNEL;
630f4b70 5063
c9c47142
SP
5064 dev_info(dev, "Enabled VxLAN offloads for UDP port %d\n",
5065 be16_to_cpu(port));
b7172414 5066 goto done;
c9c47142
SP
5067err:
5068 be_disable_vxlan_offloads(adapter);
b7172414
SP
5069done:
5070 kfree(cmd_work);
c9c47142
SP
5071}
5072
b7172414 5073static void be_work_del_vxlan_port(struct work_struct *work)
c9c47142 5074{
b7172414
SP
5075 struct be_cmd_work *cmd_work =
5076 container_of(work, struct be_cmd_work, work);
5077 struct be_adapter *adapter = cmd_work->adapter;
5078 __be16 port = cmd_work->info.vxlan_port;
c9c47142
SP
5079
5080 if (adapter->vxlan_port != port)
630f4b70 5081 goto done;
c9c47142 5082
1e5b311a
JB
5083 if (adapter->vxlan_port_aliases) {
5084 adapter->vxlan_port_aliases--;
b7172414 5085 goto out;
1e5b311a
JB
5086 }
5087
c9c47142
SP
5088 be_disable_vxlan_offloads(adapter);
5089
5090 dev_info(&adapter->pdev->dev,
5091 "Disabled VxLAN offloads for UDP port %d\n",
5092 be16_to_cpu(port));
630f4b70
SB
5093done:
5094 adapter->vxlan_port_count--;
b7172414
SP
5095out:
5096 kfree(cmd_work);
5097}
5098
5099static void be_cfg_vxlan_port(struct net_device *netdev,
5100 struct udp_tunnel_info *ti,
5101 void (*func)(struct work_struct *))
5102{
5103 struct be_adapter *adapter = netdev_priv(netdev);
5104 struct be_cmd_work *cmd_work;
5105
5106 if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
5107 return;
5108
5109 if (lancer_chip(adapter) || BEx_chip(adapter) || be_is_mc(adapter))
5110 return;
5111
5112 cmd_work = be_alloc_work(adapter, func);
5113 if (cmd_work) {
5114 cmd_work->info.vxlan_port = ti->port;
5115 queue_work(be_wq, &cmd_work->work);
5116 }
5117}
5118
5119static void be_del_vxlan_port(struct net_device *netdev,
5120 struct udp_tunnel_info *ti)
5121{
5122 be_cfg_vxlan_port(netdev, ti, be_work_del_vxlan_port);
5123}
5124
5125static void be_add_vxlan_port(struct net_device *netdev,
5126 struct udp_tunnel_info *ti)
5127{
5128 be_cfg_vxlan_port(netdev, ti, be_work_add_vxlan_port);
c9c47142 5129}
725d548f 5130
5f35227e
JG
5131static netdev_features_t be_features_check(struct sk_buff *skb,
5132 struct net_device *dev,
5133 netdev_features_t features)
725d548f 5134{
16dde0d6
SB
5135 struct be_adapter *adapter = netdev_priv(dev);
5136 u8 l4_hdr = 0;
5137
5138 /* The code below restricts offload features for some tunneled packets.
5139 * Offload features for normal (non tunnel) packets are unchanged.
5140 */
5141 if (!skb->encapsulation ||
5142 !(adapter->flags & BE_FLAGS_VXLAN_OFFLOADS))
5143 return features;
5144
5145 /* It's an encapsulated packet and VxLAN offloads are enabled. We
5146 * should disable tunnel offload features if it's not a VxLAN packet,
5147 * as tunnel offloads have been enabled only for VxLAN. This is done to
5148 * allow other tunneled traffic like GRE work fine while VxLAN
5149 * offloads are configured in Skyhawk-R.
5150 */
5151 switch (vlan_get_protocol(skb)) {
5152 case htons(ETH_P_IP):
5153 l4_hdr = ip_hdr(skb)->protocol;
5154 break;
5155 case htons(ETH_P_IPV6):
5156 l4_hdr = ipv6_hdr(skb)->nexthdr;
5157 break;
5158 default:
5159 return features;
5160 }
5161
5162 if (l4_hdr != IPPROTO_UDP ||
5163 skb->inner_protocol_type != ENCAP_TYPE_ETHER ||
5164 skb->inner_protocol != htons(ETH_P_TEB) ||
5165 skb_inner_mac_header(skb) - skb_transport_header(skb) !=
096de2f8
SD
5166 sizeof(struct udphdr) + sizeof(struct vxlanhdr) ||
5167 !adapter->vxlan_port ||
5168 udp_hdr(skb)->dest != adapter->vxlan_port)
a188222b 5169 return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
16dde0d6
SB
5170
5171 return features;
725d548f 5172}
c9c47142 5173
a155a5db
SB
5174static int be_get_phys_port_id(struct net_device *dev,
5175 struct netdev_phys_item_id *ppid)
5176{
5177 int i, id_len = CNTL_SERIAL_NUM_WORDS * CNTL_SERIAL_NUM_WORD_SZ + 1;
5178 struct be_adapter *adapter = netdev_priv(dev);
5179 u8 *id;
5180
5181 if (MAX_PHYS_ITEM_ID_LEN < id_len)
5182 return -ENOSPC;
5183
5184 ppid->id[0] = adapter->hba_port_num + 1;
5185 id = &ppid->id[1];
5186 for (i = CNTL_SERIAL_NUM_WORDS - 1; i >= 0;
5187 i--, id += CNTL_SERIAL_NUM_WORD_SZ)
5188 memcpy(id, &adapter->serial_num[i], CNTL_SERIAL_NUM_WORD_SZ);
5189
5190 ppid->id_len = id_len;
5191
5192 return 0;
5193}
5194
b7172414
SP
5195static void be_set_rx_mode(struct net_device *dev)
5196{
5197 struct be_adapter *adapter = netdev_priv(dev);
5198 struct be_cmd_work *work;
5199
5200 work = be_alloc_work(adapter, be_work_set_rx_mode);
5201 if (work)
5202 queue_work(be_wq, &work->work);
5203}
5204
e5686ad8 5205static const struct net_device_ops be_netdev_ops = {
6b7c5b94
SP
5206 .ndo_open = be_open,
5207 .ndo_stop = be_close,
5208 .ndo_start_xmit = be_xmit,
a54769f5 5209 .ndo_set_rx_mode = be_set_rx_mode,
6b7c5b94 5210 .ndo_set_mac_address = be_mac_addr_set,
ab1594e9 5211 .ndo_get_stats64 = be_get_stats64,
6b7c5b94 5212 .ndo_validate_addr = eth_validate_addr,
6b7c5b94
SP
5213 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
5214 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
64600ea5 5215 .ndo_set_vf_mac = be_set_vf_mac,
1da87b7f 5216 .ndo_set_vf_vlan = be_set_vf_vlan,
ed616689 5217 .ndo_set_vf_rate = be_set_vf_tx_rate,
66268739 5218 .ndo_get_vf_config = be_get_vf_config,
bdce2ad7 5219 .ndo_set_vf_link_state = be_set_vf_link_state,
e7bcbd7b 5220 .ndo_set_vf_spoofchk = be_set_vf_spoofchk,
66268739
IV
5221#ifdef CONFIG_NET_POLL_CONTROLLER
5222 .ndo_poll_controller = be_netpoll,
5223#endif
a77dcb8c
AK
5224 .ndo_bridge_setlink = be_ndo_bridge_setlink,
5225 .ndo_bridge_getlink = be_ndo_bridge_getlink,
6384a4d0 5226#ifdef CONFIG_NET_RX_BUSY_POLL
c9c47142 5227 .ndo_busy_poll = be_busy_poll,
6384a4d0 5228#endif
bde6b7cd
AD
5229 .ndo_udp_tunnel_add = be_add_vxlan_port,
5230 .ndo_udp_tunnel_del = be_del_vxlan_port,
5f35227e 5231 .ndo_features_check = be_features_check,
a155a5db 5232 .ndo_get_phys_port_id = be_get_phys_port_id,
6b7c5b94
SP
5233};
5234
5235static void be_netdev_init(struct net_device *netdev)
5236{
5237 struct be_adapter *adapter = netdev_priv(netdev);
5238
6332c8d3 5239 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
8b8ddc68 5240 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
f646968f 5241 NETIF_F_HW_VLAN_CTAG_TX;
62219066 5242 if ((be_if_cap_flags(adapter) & BE_IF_FLAGS_RSS))
8b8ddc68 5243 netdev->hw_features |= NETIF_F_RXHASH;
6332c8d3
MM
5244
5245 netdev->features |= netdev->hw_features |
f646968f 5246 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER;
4b972914 5247
eb8a50d9 5248 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
79032644 5249 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
51c59870 5250
fbc13f01
AK
5251 netdev->priv_flags |= IFF_UNICAST_FLT;
5252
6b7c5b94
SP
5253 netdev->flags |= IFF_MULTICAST;
5254
127bfce5 5255 netif_set_gso_max_size(netdev, BE_MAX_GSO_SIZE - ETH_HLEN);
c190e3c8 5256
10ef9ab4 5257 netdev->netdev_ops = &be_netdev_ops;
6b7c5b94 5258
7ad24ea4 5259 netdev->ethtool_ops = &be_ethtool_ops;
d894be57
JW
5260
5261 /* MTU range: 256 - 9000 */
5262 netdev->min_mtu = BE_MIN_MTU;
5263 netdev->max_mtu = BE_MAX_MTU;
6b7c5b94
SP
5264}
5265
87ac1a52
KA
5266static void be_cleanup(struct be_adapter *adapter)
5267{
5268 struct net_device *netdev = adapter->netdev;
5269
5270 rtnl_lock();
5271 netif_device_detach(netdev);
5272 if (netif_running(netdev))
5273 be_close(netdev);
5274 rtnl_unlock();
5275
5276 be_clear(adapter);
5277}
5278
484d76fd 5279static int be_resume(struct be_adapter *adapter)
78fad34e 5280{
d0e1b319 5281 struct net_device *netdev = adapter->netdev;
78fad34e
SP
5282 int status;
5283
78fad34e
SP
5284 status = be_setup(adapter);
5285 if (status)
484d76fd 5286 return status;
78fad34e 5287
08d9910c
HFS
5288 rtnl_lock();
5289 if (netif_running(netdev))
d0e1b319 5290 status = be_open(netdev);
08d9910c
HFS
5291 rtnl_unlock();
5292
5293 if (status)
5294 return status;
78fad34e 5295
d0e1b319
KA
5296 netif_device_attach(netdev);
5297
484d76fd
KA
5298 return 0;
5299}
5300
710f3e59
SB
5301static void be_soft_reset(struct be_adapter *adapter)
5302{
5303 u32 val;
5304
5305 dev_info(&adapter->pdev->dev, "Initiating chip soft reset\n");
5306 val = ioread32(adapter->pcicfg + SLIPORT_SOFTRESET_OFFSET);
5307 val |= SLIPORT_SOFTRESET_SR_MASK;
5308 iowrite32(val, adapter->pcicfg + SLIPORT_SOFTRESET_OFFSET);
5309}
5310
5311static bool be_err_is_recoverable(struct be_adapter *adapter)
5312{
5313 struct be_error_recovery *err_rec = &adapter->error_recovery;
5314 unsigned long initial_idle_time =
5315 msecs_to_jiffies(ERR_RECOVERY_IDLE_TIME);
5316 unsigned long recovery_interval =
5317 msecs_to_jiffies(ERR_RECOVERY_INTERVAL);
5318 u16 ue_err_code;
5319 u32 val;
5320
5321 val = be_POST_stage_get(adapter);
5322 if ((val & POST_STAGE_RECOVERABLE_ERR) != POST_STAGE_RECOVERABLE_ERR)
5323 return false;
5324 ue_err_code = val & POST_ERR_RECOVERY_CODE_MASK;
5325 if (ue_err_code == 0)
5326 return false;
5327
5328 dev_err(&adapter->pdev->dev, "Recoverable HW error code: 0x%x\n",
5329 ue_err_code);
5330
5331 if (jiffies - err_rec->probe_time <= initial_idle_time) {
5332 dev_err(&adapter->pdev->dev,
5333 "Cannot recover within %lu sec from driver load\n",
5334 jiffies_to_msecs(initial_idle_time) / MSEC_PER_SEC);
5335 return false;
5336 }
5337
5338 if (err_rec->last_recovery_time &&
5339 (jiffies - err_rec->last_recovery_time <= recovery_interval)) {
5340 dev_err(&adapter->pdev->dev,
5341 "Cannot recover within %lu sec from last recovery\n",
5342 jiffies_to_msecs(recovery_interval) / MSEC_PER_SEC);
5343 return false;
5344 }
5345
5346 if (ue_err_code == err_rec->last_err_code) {
5347 dev_err(&adapter->pdev->dev,
5348 "Cannot recover from a consecutive TPE error\n");
5349 return false;
5350 }
5351
5352 err_rec->last_recovery_time = jiffies;
5353 err_rec->last_err_code = ue_err_code;
5354 return true;
5355}
5356
5357static int be_tpe_recover(struct be_adapter *adapter)
5358{
5359 struct be_error_recovery *err_rec = &adapter->error_recovery;
5360 int status = -EAGAIN;
5361 u32 val;
5362
5363 switch (err_rec->recovery_state) {
5364 case ERR_RECOVERY_ST_NONE:
5365 err_rec->recovery_state = ERR_RECOVERY_ST_DETECT;
5366 err_rec->resched_delay = ERR_RECOVERY_UE_DETECT_DURATION;
5367 break;
5368
5369 case ERR_RECOVERY_ST_DETECT:
5370 val = be_POST_stage_get(adapter);
5371 if ((val & POST_STAGE_RECOVERABLE_ERR) !=
5372 POST_STAGE_RECOVERABLE_ERR) {
5373 dev_err(&adapter->pdev->dev,
5374 "Unrecoverable HW error detected: 0x%x\n", val);
5375 status = -EINVAL;
5376 err_rec->resched_delay = 0;
5377 break;
5378 }
5379
5380 dev_err(&adapter->pdev->dev, "Recoverable HW error detected\n");
5381
5382 /* Only PF0 initiates Chip Soft Reset. But PF0 must wait UE2SR
5383 * milliseconds before it checks for final error status in
5384 * SLIPORT_SEMAPHORE to determine if recovery criteria is met.
5385 * If it does, then PF0 initiates a Soft Reset.
5386 */
5387 if (adapter->pf_num == 0) {
5388 err_rec->recovery_state = ERR_RECOVERY_ST_RESET;
5389 err_rec->resched_delay = err_rec->ue_to_reset_time -
5390 ERR_RECOVERY_UE_DETECT_DURATION;
5391 break;
5392 }
5393
5394 err_rec->recovery_state = ERR_RECOVERY_ST_PRE_POLL;
5395 err_rec->resched_delay = err_rec->ue_to_poll_time -
5396 ERR_RECOVERY_UE_DETECT_DURATION;
5397 break;
5398
5399 case ERR_RECOVERY_ST_RESET:
5400 if (!be_err_is_recoverable(adapter)) {
5401 dev_err(&adapter->pdev->dev,
5402 "Failed to meet recovery criteria\n");
5403 status = -EIO;
5404 err_rec->resched_delay = 0;
5405 break;
5406 }
5407 be_soft_reset(adapter);
5408 err_rec->recovery_state = ERR_RECOVERY_ST_PRE_POLL;
5409 err_rec->resched_delay = err_rec->ue_to_poll_time -
5410 err_rec->ue_to_reset_time;
5411 break;
5412
5413 case ERR_RECOVERY_ST_PRE_POLL:
5414 err_rec->recovery_state = ERR_RECOVERY_ST_REINIT;
5415 err_rec->resched_delay = 0;
5416 status = 0; /* done */
5417 break;
5418
5419 default:
5420 status = -EINVAL;
5421 err_rec->resched_delay = 0;
5422 break;
5423 }
5424
5425 return status;
5426}
5427
484d76fd
KA
5428static int be_err_recover(struct be_adapter *adapter)
5429{
484d76fd
KA
5430 int status;
5431
710f3e59
SB
5432 if (!lancer_chip(adapter)) {
5433 if (!adapter->error_recovery.recovery_supported ||
5434 adapter->priv_flags & BE_DISABLE_TPE_RECOVERY)
5435 return -EIO;
5436 status = be_tpe_recover(adapter);
5437 if (status)
5438 goto err;
5439 }
1babbad4
PR
5440
5441 /* Wait for adapter to reach quiescent state before
5442 * destroying queues
5443 */
5444 status = be_fw_wait_ready(adapter);
5445 if (status)
5446 goto err;
5447
710f3e59
SB
5448 adapter->flags |= BE_FLAGS_TRY_RECOVERY;
5449
1babbad4
PR
5450 be_cleanup(adapter);
5451
484d76fd
KA
5452 status = be_resume(adapter);
5453 if (status)
5454 goto err;
5455
710f3e59
SB
5456 adapter->flags &= ~BE_FLAGS_TRY_RECOVERY;
5457
78fad34e 5458err:
78fad34e
SP
5459 return status;
5460}
5461
eb7dd46c 5462static void be_err_detection_task(struct work_struct *work)
78fad34e 5463{
710f3e59
SB
5464 struct be_error_recovery *err_rec =
5465 container_of(work, struct be_error_recovery,
5466 err_detection_work.work);
78fad34e 5467 struct be_adapter *adapter =
710f3e59
SB
5468 container_of(err_rec, struct be_adapter,
5469 error_recovery);
5470 u32 resched_delay = ERR_RECOVERY_DETECTION_DELAY;
1babbad4
PR
5471 struct device *dev = &adapter->pdev->dev;
5472 int recovery_status;
78fad34e
SP
5473
5474 be_detect_error(adapter);
710f3e59 5475 if (!be_check_error(adapter, BE_ERROR_HW))
1babbad4
PR
5476 goto reschedule_task;
5477
710f3e59 5478 recovery_status = be_err_recover(adapter);
1babbad4 5479 if (!recovery_status) {
710f3e59
SB
5480 err_rec->recovery_retries = 0;
5481 err_rec->recovery_state = ERR_RECOVERY_ST_NONE;
1babbad4
PR
5482 dev_info(dev, "Adapter recovery successful\n");
5483 goto reschedule_task;
710f3e59
SB
5484 } else if (!lancer_chip(adapter) && err_rec->resched_delay) {
5485 /* BEx/SH recovery state machine */
5486 if (adapter->pf_num == 0 &&
5487 err_rec->recovery_state > ERR_RECOVERY_ST_DETECT)
5488 dev_err(&adapter->pdev->dev,
5489 "Adapter recovery in progress\n");
5490 resched_delay = err_rec->resched_delay;
5491 goto reschedule_task;
5492 } else if (lancer_chip(adapter) && be_virtfn(adapter)) {
1babbad4
PR
5493 /* For VFs, check if PF have allocated resources
5494 * every second.
5495 */
5496 dev_err(dev, "Re-trying adapter recovery\n");
5497 goto reschedule_task;
710f3e59
SB
5498 } else if (lancer_chip(adapter) && err_rec->recovery_retries++ <
5499 ERR_RECOVERY_MAX_RETRY_COUNT) {
972f37b4
PR
5500 /* In case of another error during recovery, it takes 30 sec
5501 * for adapter to come out of error. Retry error recovery after
5502 * this time interval.
5503 */
5504 dev_err(&adapter->pdev->dev, "Re-trying adapter recovery\n");
710f3e59 5505 resched_delay = ERR_RECOVERY_RETRY_DELAY;
972f37b4 5506 goto reschedule_task;
1babbad4
PR
5507 } else {
5508 dev_err(dev, "Adapter recovery failed\n");
710f3e59 5509 dev_err(dev, "Please reboot server to recover\n");
78fad34e
SP
5510 }
5511
1babbad4 5512 return;
710f3e59 5513
1babbad4 5514reschedule_task:
710f3e59 5515 be_schedule_err_detection(adapter, resched_delay);
78fad34e
SP
5516}
5517
5518static void be_log_sfp_info(struct be_adapter *adapter)
5519{
5520 int status;
5521
5522 status = be_cmd_query_sfp_info(adapter);
5523 if (!status) {
5524 dev_err(&adapter->pdev->dev,
51d1f98a
AK
5525 "Port %c: %s Vendor: %s part no: %s",
5526 adapter->port_name,
5527 be_misconfig_evt_port_state[adapter->phy_state],
5528 adapter->phy.vendor_name,
78fad34e
SP
5529 adapter->phy.vendor_pn);
5530 }
51d1f98a 5531 adapter->flags &= ~BE_FLAGS_PHY_MISCONFIGURED;
78fad34e
SP
5532}
5533
5534static void be_worker(struct work_struct *work)
5535{
5536 struct be_adapter *adapter =
5537 container_of(work, struct be_adapter, work.work);
5538 struct be_rx_obj *rxo;
5539 int i;
5540
d3480615
GP
5541 if (be_physfn(adapter) &&
5542 MODULO(adapter->work_counter, adapter->be_get_temp_freq) == 0)
5543 be_cmd_get_die_temperature(adapter);
5544
78fad34e
SP
5545 /* when interrupts are not yet enabled, just reap any pending
5546 * mcc completions
5547 */
5548 if (!netif_running(adapter->netdev)) {
5549 local_bh_disable();
5550 be_process_mcc(adapter);
5551 local_bh_enable();
5552 goto reschedule;
5553 }
5554
5555 if (!adapter->stats_cmd_sent) {
5556 if (lancer_chip(adapter))
5557 lancer_cmd_get_pport_stats(adapter,
5558 &adapter->stats_cmd);
5559 else
5560 be_cmd_get_stats(adapter, &adapter->stats_cmd);
5561 }
5562
78fad34e
SP
5563 for_all_rx_queues(adapter, rxo, i) {
5564 /* Replenish RX-queues starved due to memory
5565 * allocation failures.
5566 */
5567 if (rxo->rx_post_starved)
5568 be_post_rx_frags(rxo, GFP_KERNEL, MAX_RX_POST);
5569 }
5570
20947770
PR
5571 /* EQ-delay update for Skyhawk is done while notifying EQ */
5572 if (!skyhawk_chip(adapter))
5573 be_eqd_update(adapter, false);
78fad34e 5574
51d1f98a 5575 if (adapter->flags & BE_FLAGS_PHY_MISCONFIGURED)
78fad34e
SP
5576 be_log_sfp_info(adapter);
5577
5578reschedule:
5579 adapter->work_counter++;
b7172414 5580 queue_delayed_work(be_wq, &adapter->work, msecs_to_jiffies(1000));
78fad34e
SP
5581}
5582
6b7c5b94
SP
5583static void be_unmap_pci_bars(struct be_adapter *adapter)
5584{
c5b3ad4c
SP
5585 if (adapter->csr)
5586 pci_iounmap(adapter->pdev, adapter->csr);
8788fdc2 5587 if (adapter->db)
ce66f781 5588 pci_iounmap(adapter->pdev, adapter->db);
a69bf3c5
DM
5589 if (adapter->pcicfg && adapter->pcicfg_mapped)
5590 pci_iounmap(adapter->pdev, adapter->pcicfg);
045508a8
PP
5591}
5592
ce66f781
SP
5593static int db_bar(struct be_adapter *adapter)
5594{
18c57c74 5595 if (lancer_chip(adapter) || be_virtfn(adapter))
ce66f781
SP
5596 return 0;
5597 else
5598 return 4;
5599}
5600
5601static int be_roce_map_pci_bars(struct be_adapter *adapter)
045508a8 5602{
dbf0f2a7 5603 if (skyhawk_chip(adapter)) {
ce66f781
SP
5604 adapter->roce_db.size = 4096;
5605 adapter->roce_db.io_addr = pci_resource_start(adapter->pdev,
5606 db_bar(adapter));
5607 adapter->roce_db.total_size = pci_resource_len(adapter->pdev,
5608 db_bar(adapter));
5609 }
045508a8 5610 return 0;
6b7c5b94
SP
5611}
5612
5613static int be_map_pci_bars(struct be_adapter *adapter)
5614{
0fa74a4b 5615 struct pci_dev *pdev = adapter->pdev;
6b7c5b94 5616 u8 __iomem *addr;
78fad34e
SP
5617 u32 sli_intf;
5618
5619 pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
5620 adapter->sli_family = (sli_intf & SLI_INTF_FAMILY_MASK) >>
5621 SLI_INTF_FAMILY_SHIFT;
5622 adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
fe6d2a38 5623
c5b3ad4c 5624 if (BEx_chip(adapter) && be_physfn(adapter)) {
0fa74a4b 5625 adapter->csr = pci_iomap(pdev, 2, 0);
ddf1169f 5626 if (!adapter->csr)
c5b3ad4c
SP
5627 return -ENOMEM;
5628 }
5629
25848c90 5630 addr = pci_iomap(pdev, db_bar(adapter), 0);
ddf1169f 5631 if (!addr)
6b7c5b94 5632 goto pci_map_err;
ba343c77 5633 adapter->db = addr;
ce66f781 5634
25848c90
SR
5635 if (skyhawk_chip(adapter) || BEx_chip(adapter)) {
5636 if (be_physfn(adapter)) {
5637 /* PCICFG is the 2nd BAR in BE2 */
5638 addr = pci_iomap(pdev, BE2_chip(adapter) ? 1 : 0, 0);
5639 if (!addr)
5640 goto pci_map_err;
5641 adapter->pcicfg = addr;
a69bf3c5 5642 adapter->pcicfg_mapped = true;
25848c90
SR
5643 } else {
5644 adapter->pcicfg = adapter->db + SRIOV_VF_PCICFG_OFFSET;
a69bf3c5 5645 adapter->pcicfg_mapped = false;
25848c90
SR
5646 }
5647 }
5648
ce66f781 5649 be_roce_map_pci_bars(adapter);
6b7c5b94 5650 return 0;
ce66f781 5651
6b7c5b94 5652pci_map_err:
25848c90 5653 dev_err(&pdev->dev, "Error in mapping PCI BARs\n");
6b7c5b94
SP
5654 be_unmap_pci_bars(adapter);
5655 return -ENOMEM;
5656}
5657
78fad34e 5658static void be_drv_cleanup(struct be_adapter *adapter)
6b7c5b94 5659{
8788fdc2 5660 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
78fad34e 5661 struct device *dev = &adapter->pdev->dev;
6b7c5b94
SP
5662
5663 if (mem->va)
78fad34e 5664 dma_free_coherent(dev, mem->size, mem->va, mem->dma);
e7b909a6 5665
5b8821b7 5666 mem = &adapter->rx_filter;
e7b909a6 5667 if (mem->va)
78fad34e
SP
5668 dma_free_coherent(dev, mem->size, mem->va, mem->dma);
5669
5670 mem = &adapter->stats_cmd;
5671 if (mem->va)
5672 dma_free_coherent(dev, mem->size, mem->va, mem->dma);
6b7c5b94
SP
5673}
5674
78fad34e
SP
5675/* Allocate and initialize various fields in be_adapter struct */
5676static int be_drv_init(struct be_adapter *adapter)
6b7c5b94 5677{
8788fdc2
SP
5678 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
5679 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
5b8821b7 5680 struct be_dma_mem *rx_filter = &adapter->rx_filter;
78fad34e
SP
5681 struct be_dma_mem *stats_cmd = &adapter->stats_cmd;
5682 struct device *dev = &adapter->pdev->dev;
5683 int status = 0;
6b7c5b94
SP
5684
5685 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
e51000db
SB
5686 mbox_mem_alloc->va = dma_zalloc_coherent(dev, mbox_mem_alloc->size,
5687 &mbox_mem_alloc->dma,
5688 GFP_KERNEL);
78fad34e
SP
5689 if (!mbox_mem_alloc->va)
5690 return -ENOMEM;
5691
6b7c5b94
SP
5692 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
5693 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
5694 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
e7b909a6 5695
5b8821b7 5696 rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
78fad34e
SP
5697 rx_filter->va = dma_zalloc_coherent(dev, rx_filter->size,
5698 &rx_filter->dma, GFP_KERNEL);
ddf1169f 5699 if (!rx_filter->va) {
e7b909a6
SP
5700 status = -ENOMEM;
5701 goto free_mbox;
5702 }
1f9061d2 5703
78fad34e
SP
5704 if (lancer_chip(adapter))
5705 stats_cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
5706 else if (BE2_chip(adapter))
5707 stats_cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
5708 else if (BE3_chip(adapter))
5709 stats_cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
5710 else
5711 stats_cmd->size = sizeof(struct be_cmd_req_get_stats_v2);
5712 stats_cmd->va = dma_zalloc_coherent(dev, stats_cmd->size,
5713 &stats_cmd->dma, GFP_KERNEL);
5714 if (!stats_cmd->va) {
5715 status = -ENOMEM;
5716 goto free_rx_filter;
5717 }
5718
2984961c 5719 mutex_init(&adapter->mbox_lock);
b7172414
SP
5720 mutex_init(&adapter->mcc_lock);
5721 mutex_init(&adapter->rx_filter_lock);
8788fdc2 5722 spin_lock_init(&adapter->mcc_cq_lock);
5eeff635 5723 init_completion(&adapter->et_cmd_compl);
e7b909a6 5724
78fad34e 5725 pci_save_state(adapter->pdev);
6b7c5b94 5726
78fad34e 5727 INIT_DELAYED_WORK(&adapter->work, be_worker);
710f3e59
SB
5728
5729 adapter->error_recovery.recovery_state = ERR_RECOVERY_ST_NONE;
5730 adapter->error_recovery.resched_delay = 0;
5731 INIT_DELAYED_WORK(&adapter->error_recovery.err_detection_work,
eb7dd46c 5732 be_err_detection_task);
6b7c5b94 5733
78fad34e
SP
5734 adapter->rx_fc = true;
5735 adapter->tx_fc = true;
6b7c5b94 5736
78fad34e
SP
5737 /* Must be a power of 2 or else MODULO will BUG_ON */
5738 adapter->be_get_temp_freq = 64;
ca34fe38 5739
6b7c5b94 5740 return 0;
78fad34e
SP
5741
5742free_rx_filter:
5743 dma_free_coherent(dev, rx_filter->size, rx_filter->va, rx_filter->dma);
5744free_mbox:
5745 dma_free_coherent(dev, mbox_mem_alloc->size, mbox_mem_alloc->va,
5746 mbox_mem_alloc->dma);
5747 return status;
6b7c5b94
SP
5748}
5749
3bc6b06c 5750static void be_remove(struct pci_dev *pdev)
6b7c5b94
SP
5751{
5752 struct be_adapter *adapter = pci_get_drvdata(pdev);
8d56ff11 5753
6b7c5b94
SP
5754 if (!adapter)
5755 return;
5756
045508a8 5757 be_roce_dev_remove(adapter);
8cef7a78 5758 be_intr_set(adapter, false);
045508a8 5759
eb7dd46c 5760 be_cancel_err_detection(adapter);
f67ef7ba 5761
6b7c5b94
SP
5762 unregister_netdev(adapter->netdev);
5763
5fb379ee
SP
5764 be_clear(adapter);
5765
f72099e0
SK
5766 if (!pci_vfs_assigned(adapter->pdev))
5767 be_cmd_reset_function(adapter);
5768
bf99e50d
PR
5769 /* tell fw we're done with firing cmds */
5770 be_cmd_fw_clean(adapter);
5771
78fad34e
SP
5772 be_unmap_pci_bars(adapter);
5773 be_drv_cleanup(adapter);
6b7c5b94 5774
d6b6d987
SP
5775 pci_disable_pcie_error_reporting(pdev);
5776
6b7c5b94
SP
5777 pci_release_regions(pdev);
5778 pci_disable_device(pdev);
5779
5780 free_netdev(adapter->netdev);
5781}
5782
9a03259c
AB
5783static ssize_t be_hwmon_show_temp(struct device *dev,
5784 struct device_attribute *dev_attr,
5785 char *buf)
29e9122b
VD
5786{
5787 struct be_adapter *adapter = dev_get_drvdata(dev);
5788
5789 /* Unit: millidegree Celsius */
5790 if (adapter->hwmon_info.be_on_die_temp == BE_INVALID_DIE_TEMP)
5791 return -EIO;
5792 else
5793 return sprintf(buf, "%u\n",
5794 adapter->hwmon_info.be_on_die_temp * 1000);
5795}
5796
5797static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO,
5798 be_hwmon_show_temp, NULL, 1);
5799
5800static struct attribute *be_hwmon_attrs[] = {
5801 &sensor_dev_attr_temp1_input.dev_attr.attr,
5802 NULL
5803};
5804
5805ATTRIBUTE_GROUPS(be_hwmon);
5806
d379142b
SP
5807static char *mc_name(struct be_adapter *adapter)
5808{
f93f160b
VV
5809 char *str = ""; /* default */
5810
5811 switch (adapter->mc_type) {
5812 case UMC:
5813 str = "UMC";
5814 break;
5815 case FLEX10:
5816 str = "FLEX10";
5817 break;
5818 case vNIC1:
5819 str = "vNIC-1";
5820 break;
5821 case nPAR:
5822 str = "nPAR";
5823 break;
5824 case UFP:
5825 str = "UFP";
5826 break;
5827 case vNIC2:
5828 str = "vNIC-2";
5829 break;
5830 default:
5831 str = "";
5832 }
5833
5834 return str;
d379142b
SP
5835}
5836
5837static inline char *func_name(struct be_adapter *adapter)
5838{
5839 return be_physfn(adapter) ? "PF" : "VF";
5840}
5841
f7062ee5
SP
5842static inline char *nic_name(struct pci_dev *pdev)
5843{
5844 switch (pdev->device) {
5845 case OC_DEVICE_ID1:
5846 return OC_NAME;
5847 case OC_DEVICE_ID2:
5848 return OC_NAME_BE;
5849 case OC_DEVICE_ID3:
5850 case OC_DEVICE_ID4:
5851 return OC_NAME_LANCER;
5852 case BE_DEVICE_ID2:
5853 return BE3_NAME;
5854 case OC_DEVICE_ID5:
5855 case OC_DEVICE_ID6:
5856 return OC_NAME_SH;
5857 default:
5858 return BE_NAME;
5859 }
5860}
5861
1dd06ae8 5862static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id)
6b7c5b94 5863{
6b7c5b94
SP
5864 struct be_adapter *adapter;
5865 struct net_device *netdev;
21252377 5866 int status = 0;
6b7c5b94 5867
acbafeb1
SP
5868 dev_info(&pdev->dev, "%s version is %s\n", DRV_NAME, DRV_VER);
5869
6b7c5b94
SP
5870 status = pci_enable_device(pdev);
5871 if (status)
5872 goto do_none;
5873
5874 status = pci_request_regions(pdev, DRV_NAME);
5875 if (status)
5876 goto disable_dev;
5877 pci_set_master(pdev);
5878
7f640062 5879 netdev = alloc_etherdev_mqs(sizeof(*adapter), MAX_TX_QS, MAX_RX_QS);
ddf1169f 5880 if (!netdev) {
6b7c5b94
SP
5881 status = -ENOMEM;
5882 goto rel_reg;
5883 }
5884 adapter = netdev_priv(netdev);
5885 adapter->pdev = pdev;
5886 pci_set_drvdata(pdev, adapter);
5887 adapter->netdev = netdev;
2243e2e9 5888 SET_NETDEV_DEV(netdev, &pdev->dev);
6b7c5b94 5889
4c15c243 5890 status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
6b7c5b94
SP
5891 if (!status) {
5892 netdev->features |= NETIF_F_HIGHDMA;
5893 } else {
4c15c243 5894 status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
6b7c5b94
SP
5895 if (status) {
5896 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
5897 goto free_netdev;
5898 }
5899 }
5900
2f951a9a
KA
5901 status = pci_enable_pcie_error_reporting(pdev);
5902 if (!status)
5903 dev_info(&pdev->dev, "PCIe error reporting enabled\n");
d6b6d987 5904
78fad34e 5905 status = be_map_pci_bars(adapter);
6b7c5b94 5906 if (status)
39f1d94d 5907 goto free_netdev;
6b7c5b94 5908
78fad34e
SP
5909 status = be_drv_init(adapter);
5910 if (status)
5911 goto unmap_bars;
5912
5fb379ee
SP
5913 status = be_setup(adapter);
5914 if (status)
78fad34e 5915 goto drv_cleanup;
2243e2e9 5916
3abcdeda 5917 be_netdev_init(netdev);
6b7c5b94
SP
5918 status = register_netdev(netdev);
5919 if (status != 0)
5fb379ee 5920 goto unsetup;
6b7c5b94 5921
045508a8
PP
5922 be_roce_dev_add(adapter);
5923
972f37b4 5924 be_schedule_err_detection(adapter, ERR_DETECTION_DELAY);
710f3e59 5925 adapter->error_recovery.probe_time = jiffies;
b4e32a71 5926
29e9122b 5927 /* On Die temperature not supported for VF. */
9a03259c 5928 if (be_physfn(adapter) && IS_ENABLED(CONFIG_BE2NET_HWMON)) {
29e9122b
VD
5929 adapter->hwmon_info.hwmon_dev =
5930 devm_hwmon_device_register_with_groups(&pdev->dev,
5931 DRV_NAME,
5932 adapter,
5933 be_hwmon_groups);
5934 adapter->hwmon_info.be_on_die_temp = BE_INVALID_DIE_TEMP;
5935 }
5936
d379142b 5937 dev_info(&pdev->dev, "%s: %s %s port %c\n", nic_name(pdev),
21252377 5938 func_name(adapter), mc_name(adapter), adapter->port_name);
34b1ef04 5939
6b7c5b94
SP
5940 return 0;
5941
5fb379ee
SP
5942unsetup:
5943 be_clear(adapter);
78fad34e
SP
5944drv_cleanup:
5945 be_drv_cleanup(adapter);
5946unmap_bars:
5947 be_unmap_pci_bars(adapter);
f9449ab7 5948free_netdev:
fe6d2a38 5949 free_netdev(netdev);
6b7c5b94
SP
5950rel_reg:
5951 pci_release_regions(pdev);
5952disable_dev:
5953 pci_disable_device(pdev);
5954do_none:
c4ca2374 5955 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
6b7c5b94
SP
5956 return status;
5957}
5958
5959static int be_suspend(struct pci_dev *pdev, pm_message_t state)
5960{
5961 struct be_adapter *adapter = pci_get_drvdata(pdev);
6b7c5b94 5962
d4360d6f 5963 be_intr_set(adapter, false);
eb7dd46c 5964 be_cancel_err_detection(adapter);
f67ef7ba 5965
87ac1a52 5966 be_cleanup(adapter);
6b7c5b94
SP
5967
5968 pci_save_state(pdev);
5969 pci_disable_device(pdev);
5970 pci_set_power_state(pdev, pci_choose_state(pdev, state));
5971 return 0;
5972}
5973
484d76fd 5974static int be_pci_resume(struct pci_dev *pdev)
6b7c5b94 5975{
6b7c5b94 5976 struct be_adapter *adapter = pci_get_drvdata(pdev);
484d76fd 5977 int status = 0;
6b7c5b94
SP
5978
5979 status = pci_enable_device(pdev);
5980 if (status)
5981 return status;
5982
6b7c5b94
SP
5983 pci_restore_state(pdev);
5984
484d76fd 5985 status = be_resume(adapter);
2243e2e9
SP
5986 if (status)
5987 return status;
5988
972f37b4 5989 be_schedule_err_detection(adapter, ERR_DETECTION_DELAY);
eb7dd46c 5990
6b7c5b94
SP
5991 return 0;
5992}
5993
82456b03
SP
5994/*
5995 * An FLR will stop BE from DMAing any data.
5996 */
5997static void be_shutdown(struct pci_dev *pdev)
5998{
5999 struct be_adapter *adapter = pci_get_drvdata(pdev);
82456b03 6000
2d5d4154
AK
6001 if (!adapter)
6002 return;
82456b03 6003
d114f99a 6004 be_roce_dev_shutdown(adapter);
0f4a6828 6005 cancel_delayed_work_sync(&adapter->work);
eb7dd46c 6006 be_cancel_err_detection(adapter);
a4ca055f 6007
2d5d4154 6008 netif_device_detach(adapter->netdev);
82456b03 6009
57841869
AK
6010 be_cmd_reset_function(adapter);
6011
82456b03 6012 pci_disable_device(pdev);
82456b03
SP
6013}
6014
cf588477 6015static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
748b539a 6016 pci_channel_state_t state)
cf588477
SP
6017{
6018 struct be_adapter *adapter = pci_get_drvdata(pdev);
cf588477
SP
6019
6020 dev_err(&adapter->pdev->dev, "EEH error detected\n");
6021
68f22793
PR
6022 be_roce_dev_remove(adapter);
6023
954f6825
VD
6024 if (!be_check_error(adapter, BE_ERROR_EEH)) {
6025 be_set_error(adapter, BE_ERROR_EEH);
cf588477 6026
eb7dd46c 6027 be_cancel_err_detection(adapter);
cf588477 6028
87ac1a52 6029 be_cleanup(adapter);
cf588477 6030 }
cf588477
SP
6031
6032 if (state == pci_channel_io_perm_failure)
6033 return PCI_ERS_RESULT_DISCONNECT;
6034
6035 pci_disable_device(pdev);
6036
eeb7fc7b
SK
6037 /* The error could cause the FW to trigger a flash debug dump.
6038 * Resetting the card while flash dump is in progress
c8a54163
PR
6039 * can cause it not to recover; wait for it to finish.
6040 * Wait only for first function as it is needed only once per
6041 * adapter.
eeb7fc7b 6042 */
c8a54163
PR
6043 if (pdev->devfn == 0)
6044 ssleep(30);
6045
cf588477
SP
6046 return PCI_ERS_RESULT_NEED_RESET;
6047}
6048
6049static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
6050{
6051 struct be_adapter *adapter = pci_get_drvdata(pdev);
6052 int status;
6053
6054 dev_info(&adapter->pdev->dev, "EEH reset\n");
cf588477
SP
6055
6056 status = pci_enable_device(pdev);
6057 if (status)
6058 return PCI_ERS_RESULT_DISCONNECT;
6059
6060 pci_set_master(pdev);
cf588477
SP
6061 pci_restore_state(pdev);
6062
6063 /* Check if card is ok and fw is ready */
c5b3ad4c
SP
6064 dev_info(&adapter->pdev->dev,
6065 "Waiting for FW to be ready after EEH reset\n");
bf99e50d 6066 status = be_fw_wait_ready(adapter);
cf588477
SP
6067 if (status)
6068 return PCI_ERS_RESULT_DISCONNECT;
6069
d6b6d987 6070 pci_cleanup_aer_uncorrect_error_status(pdev);
954f6825 6071 be_clear_error(adapter, BE_CLEAR_ALL);
cf588477
SP
6072 return PCI_ERS_RESULT_RECOVERED;
6073}
6074
6075static void be_eeh_resume(struct pci_dev *pdev)
6076{
6077 int status = 0;
6078 struct be_adapter *adapter = pci_get_drvdata(pdev);
cf588477
SP
6079
6080 dev_info(&adapter->pdev->dev, "EEH resume\n");
6081
6082 pci_save_state(pdev);
6083
484d76fd 6084 status = be_resume(adapter);
bf99e50d
PR
6085 if (status)
6086 goto err;
6087
68f22793
PR
6088 be_roce_dev_add(adapter);
6089
972f37b4 6090 be_schedule_err_detection(adapter, ERR_DETECTION_DELAY);
cf588477
SP
6091 return;
6092err:
6093 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
cf588477
SP
6094}
6095
ace40aff
VV
6096static int be_pci_sriov_configure(struct pci_dev *pdev, int num_vfs)
6097{
6098 struct be_adapter *adapter = pci_get_drvdata(pdev);
b9263cbf 6099 struct be_resources vft_res = {0};
ace40aff
VV
6100 int status;
6101
6102 if (!num_vfs)
6103 be_vf_clear(adapter);
6104
6105 adapter->num_vfs = num_vfs;
6106
6107 if (adapter->num_vfs == 0 && pci_vfs_assigned(pdev)) {
6108 dev_warn(&pdev->dev,
6109 "Cannot disable VFs while they are assigned\n");
6110 return -EBUSY;
6111 }
6112
6113 /* When the HW is in SRIOV capable configuration, the PF-pool resources
6114 * are equally distributed across the max-number of VFs. The user may
6115 * request only a subset of the max-vfs to be enabled.
6116 * Based on num_vfs, redistribute the resources across num_vfs so that
6117 * each VF will have access to more number of resources.
6118 * This facility is not available in BE3 FW.
6119 * Also, this is done by FW in Lancer chip.
6120 */
6121 if (skyhawk_chip(adapter) && !pci_num_vf(pdev)) {
b9263cbf
SR
6122 be_calculate_vf_res(adapter, adapter->num_vfs,
6123 &vft_res);
ace40aff 6124 status = be_cmd_set_sriov_config(adapter, adapter->pool_res,
b9263cbf 6125 adapter->num_vfs, &vft_res);
ace40aff
VV
6126 if (status)
6127 dev_err(&pdev->dev,
6128 "Failed to optimize SR-IOV resources\n");
6129 }
6130
6131 status = be_get_resources(adapter);
6132 if (status)
6133 return be_cmd_status(status);
6134
6135 /* Updating real_num_tx/rx_queues() requires rtnl_lock() */
6136 rtnl_lock();
6137 status = be_update_queues(adapter);
6138 rtnl_unlock();
6139 if (status)
6140 return be_cmd_status(status);
6141
6142 if (adapter->num_vfs)
6143 status = be_vf_setup(adapter);
6144
6145 if (!status)
6146 return adapter->num_vfs;
6147
6148 return 0;
6149}
6150
3646f0e5 6151static const struct pci_error_handlers be_eeh_handlers = {
cf588477
SP
6152 .error_detected = be_eeh_err_detected,
6153 .slot_reset = be_eeh_reset,
6154 .resume = be_eeh_resume,
6155};
6156
6b7c5b94
SP
6157static struct pci_driver be_driver = {
6158 .name = DRV_NAME,
6159 .id_table = be_dev_ids,
6160 .probe = be_probe,
6161 .remove = be_remove,
6162 .suspend = be_suspend,
484d76fd 6163 .resume = be_pci_resume,
82456b03 6164 .shutdown = be_shutdown,
ace40aff 6165 .sriov_configure = be_pci_sriov_configure,
cf588477 6166 .err_handler = &be_eeh_handlers
6b7c5b94
SP
6167};
6168
6169static int __init be_init_module(void)
6170{
710f3e59
SB
6171 int status;
6172
8e95a202
JP
6173 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
6174 rx_frag_size != 2048) {
6b7c5b94
SP
6175 printk(KERN_WARNING DRV_NAME
6176 " : Module param rx_frag_size must be 2048/4096/8192."
6177 " Using 2048\n");
6178 rx_frag_size = 2048;
6179 }
6b7c5b94 6180
ace40aff
VV
6181 if (num_vfs > 0) {
6182 pr_info(DRV_NAME " : Module param num_vfs is obsolete.");
6183 pr_info(DRV_NAME " : Use sysfs method to enable VFs\n");
6184 }
6185
b7172414
SP
6186 be_wq = create_singlethread_workqueue("be_wq");
6187 if (!be_wq) {
6188 pr_warn(DRV_NAME "workqueue creation failed\n");
6189 return -1;
6190 }
6191
710f3e59
SB
6192 be_err_recovery_workq =
6193 create_singlethread_workqueue("be_err_recover");
6194 if (!be_err_recovery_workq)
6195 pr_warn(DRV_NAME "Could not create error recovery workqueue\n");
6196
6197 status = pci_register_driver(&be_driver);
6198 if (status) {
6199 destroy_workqueue(be_wq);
6200 be_destroy_err_recovery_workq();
6201 }
6202 return status;
6b7c5b94
SP
6203}
6204module_init(be_init_module);
6205
6206static void __exit be_exit_module(void)
6207{
6208 pci_unregister_driver(&be_driver);
b7172414 6209
710f3e59
SB
6210 be_destroy_err_recovery_workq();
6211
b7172414
SP
6212 if (be_wq)
6213 destroy_workqueue(be_wq);
6b7c5b94
SP
6214}
6215module_exit(be_exit_module);