Merge branch 'regulator-4.20' into regulator-next
[linux-2.6-block.git] / drivers / net / ethernet / emulex / benet / be_main.c
CommitLineData
6b7c5b94 1/*
7dfbe7d7 2 * Copyright (C) 2005 - 2016 Broadcom
6b7c5b94
SP
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
d2145cde 11 * linux-drivers@emulex.com
6b7c5b94 12 *
d2145cde
AK
13 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
6b7c5b94
SP
16 */
17
70c71606 18#include <linux/prefetch.h>
9d9779e7 19#include <linux/module.h>
6b7c5b94 20#include "be.h"
8788fdc2 21#include "be_cmds.h"
65f71b8b 22#include <asm/div64.h>
d6b6d987 23#include <linux/aer.h>
a77dcb8c 24#include <linux/if_bridge.h>
6384a4d0 25#include <net/busy_poll.h>
c9c47142 26#include <net/vxlan.h>
6b7c5b94
SP
27
28MODULE_VERSION(DRV_VER);
6b7c5b94 29MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
00d3d51e 30MODULE_AUTHOR("Emulex Corporation");
6b7c5b94
SP
31MODULE_LICENSE("GPL");
32
ace40aff
VV
33/* num_vfs module param is obsolete.
34 * Use sysfs method to enable/disable VFs.
35 */
ba343c77 36static unsigned int num_vfs;
d3757ba4 37module_param(num_vfs, uint, 0444);
ba343c77 38MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
6b7c5b94 39
11ac75ed 40static ushort rx_frag_size = 2048;
d3757ba4 41module_param(rx_frag_size, ushort, 0444);
11ac75ed
SP
42MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
43
710f3e59
SB
44/* Per-module error detection/recovery workq shared across all functions.
45 * Each function schedules its own work request on this shared workq.
46 */
e6053dd5 47static struct workqueue_struct *be_err_recovery_workq;
710f3e59 48
9baa3c34 49static const struct pci_device_id be_dev_ids[] = {
98471b5b 50#ifdef CONFIG_BE2NET_BE2
c4ca2374
AK
51 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
52 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
98471b5b
PO
53#endif /* CONFIG_BE2NET_BE2 */
54#ifdef CONFIG_BE2NET_BE3
55 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
c4ca2374 56 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
98471b5b
PO
57#endif /* CONFIG_BE2NET_BE3 */
58#ifdef CONFIG_BE2NET_LANCER
fe6d2a38 59 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
12f4d0a8 60 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
98471b5b
PO
61#endif /* CONFIG_BE2NET_LANCER */
62#ifdef CONFIG_BE2NET_SKYHAWK
ecedb6ae 63 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
76b73530 64 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID6)},
98471b5b 65#endif /* CONFIG_BE2NET_SKYHAWK */
6b7c5b94
SP
66 { 0 }
67};
68MODULE_DEVICE_TABLE(pci, be_dev_ids);
b7172414
SP
69
70/* Workqueue used by all functions for defering cmd calls to the adapter */
e6053dd5 71static struct workqueue_struct *be_wq;
b7172414 72
7c185276 73/* UE Status Low CSR */
42c8b11e 74static const char * const ue_status_low_desc[] = {
7c185276
AK
75 "CEV",
76 "CTX",
77 "DBUF",
78 "ERX",
79 "Host",
80 "MPU",
81 "NDMA",
82 "PTC ",
83 "RDMA ",
84 "RXF ",
85 "RXIPS ",
86 "RXULP0 ",
87 "RXULP1 ",
88 "RXULP2 ",
89 "TIM ",
90 "TPOST ",
91 "TPRE ",
92 "TXIPS ",
93 "TXULP0 ",
94 "TXULP1 ",
95 "UC ",
96 "WDMA ",
97 "TXULP2 ",
98 "HOST1 ",
99 "P0_OB_LINK ",
100 "P1_OB_LINK ",
101 "HOST_GPIO ",
102 "MBOX ",
6bdf8f55
VV
103 "ERX2 ",
104 "SPARE ",
105 "JTAG ",
106 "MPU_INTPEND "
7c185276 107};
e2fb1afa 108
7c185276 109/* UE Status High CSR */
42c8b11e 110static const char * const ue_status_hi_desc[] = {
7c185276
AK
111 "LPCMEMHOST",
112 "MGMT_MAC",
113 "PCS0ONLINE",
114 "MPU_IRAM",
115 "PCS1ONLINE",
116 "PCTL0",
117 "PCTL1",
118 "PMEM",
119 "RR",
120 "TXPB",
121 "RXPP",
122 "XAUI",
123 "TXP",
124 "ARM",
125 "IPC",
126 "HOST2",
127 "HOST3",
128 "HOST4",
129 "HOST5",
130 "HOST6",
131 "HOST7",
6bdf8f55
VV
132 "ECRC",
133 "Poison TLP",
42c8b11e 134 "NETC",
6bdf8f55
VV
135 "PERIPH",
136 "LLTXULP",
137 "D2P",
138 "RCON",
139 "LDMA",
140 "LLTXP",
141 "LLTXPB",
7c185276
AK
142 "Unknown"
143};
6b7c5b94 144
c1bb0a55
VD
145#define BE_VF_IF_EN_FLAGS (BE_IF_FLAGS_UNTAGGED | \
146 BE_IF_FLAGS_BROADCAST | \
147 BE_IF_FLAGS_MULTICAST | \
148 BE_IF_FLAGS_PASS_L3L4_ERRORS)
149
6b7c5b94
SP
150static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
151{
152 struct be_dma_mem *mem = &q->dma_mem;
03d28ffe 153
1cfafab9 154 if (mem->va) {
2b7bcebf
IV
155 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
156 mem->dma);
1cfafab9
SP
157 mem->va = NULL;
158 }
6b7c5b94
SP
159}
160
161static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
748b539a 162 u16 len, u16 entry_size)
6b7c5b94
SP
163{
164 struct be_dma_mem *mem = &q->dma_mem;
165
166 memset(q, 0, sizeof(*q));
167 q->len = len;
168 q->entry_size = entry_size;
169 mem->size = len * entry_size;
ede23fa8
JP
170 mem->va = dma_zalloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
171 GFP_KERNEL);
6b7c5b94 172 if (!mem->va)
10ef9ab4 173 return -ENOMEM;
6b7c5b94
SP
174 return 0;
175}
176
68c45a2d 177static void be_reg_intr_set(struct be_adapter *adapter, bool enable)
6b7c5b94 178{
db3ea781 179 u32 reg, enabled;
5f0b849e 180
db3ea781 181 pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
748b539a 182 &reg);
db3ea781
SP
183 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
184
5f0b849e 185 if (!enabled && enable)
6b7c5b94 186 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
5f0b849e 187 else if (enabled && !enable)
6b7c5b94 188 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
5f0b849e 189 else
6b7c5b94 190 return;
5f0b849e 191
db3ea781 192 pci_write_config_dword(adapter->pdev,
748b539a 193 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
6b7c5b94
SP
194}
195
68c45a2d
SK
196static void be_intr_set(struct be_adapter *adapter, bool enable)
197{
198 int status = 0;
199
200 /* On lancer interrupts can't be controlled via this register */
201 if (lancer_chip(adapter))
202 return;
203
954f6825 204 if (be_check_error(adapter, BE_ERROR_EEH))
68c45a2d
SK
205 return;
206
207 status = be_cmd_intr_set(adapter, enable);
208 if (status)
209 be_reg_intr_set(adapter, enable);
210}
211
8788fdc2 212static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
6b7c5b94
SP
213{
214 u32 val = 0;
03d28ffe 215
954f6825
VD
216 if (be_check_error(adapter, BE_ERROR_HW))
217 return;
218
6b7c5b94
SP
219 val |= qid & DB_RQ_RING_ID_MASK;
220 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
f3eb62d2
SP
221
222 wmb();
8788fdc2 223 iowrite32(val, adapter->db + DB_RQ_OFFSET);
6b7c5b94
SP
224}
225
94d73aaa
VV
226static void be_txq_notify(struct be_adapter *adapter, struct be_tx_obj *txo,
227 u16 posted)
6b7c5b94
SP
228{
229 u32 val = 0;
03d28ffe 230
954f6825
VD
231 if (be_check_error(adapter, BE_ERROR_HW))
232 return;
233
94d73aaa 234 val |= txo->q.id & DB_TXULP_RING_ID_MASK;
6b7c5b94 235 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
f3eb62d2
SP
236
237 wmb();
94d73aaa 238 iowrite32(val, adapter->db + txo->db_offset);
6b7c5b94
SP
239}
240
8788fdc2 241static void be_eq_notify(struct be_adapter *adapter, u16 qid,
20947770
PR
242 bool arm, bool clear_int, u16 num_popped,
243 u32 eq_delay_mult_enc)
6b7c5b94
SP
244{
245 u32 val = 0;
03d28ffe 246
6b7c5b94 247 val |= qid & DB_EQ_RING_ID_MASK;
748b539a 248 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) << DB_EQ_RING_ID_EXT_MASK_SHIFT);
cf588477 249
954f6825 250 if (be_check_error(adapter, BE_ERROR_HW))
cf588477
SP
251 return;
252
6b7c5b94
SP
253 if (arm)
254 val |= 1 << DB_EQ_REARM_SHIFT;
255 if (clear_int)
256 val |= 1 << DB_EQ_CLR_SHIFT;
257 val |= 1 << DB_EQ_EVNT_SHIFT;
258 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
20947770 259 val |= eq_delay_mult_enc << DB_EQ_R2I_DLY_SHIFT;
8788fdc2 260 iowrite32(val, adapter->db + DB_EQ_OFFSET);
6b7c5b94
SP
261}
262
8788fdc2 263void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
6b7c5b94
SP
264{
265 u32 val = 0;
03d28ffe 266
6b7c5b94 267 val |= qid & DB_CQ_RING_ID_MASK;
fe6d2a38
SP
268 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
269 DB_CQ_RING_ID_EXT_MASK_SHIFT);
cf588477 270
954f6825 271 if (be_check_error(adapter, BE_ERROR_HW))
cf588477
SP
272 return;
273
6b7c5b94
SP
274 if (arm)
275 val |= 1 << DB_CQ_REARM_SHIFT;
276 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
8788fdc2 277 iowrite32(val, adapter->db + DB_CQ_OFFSET);
6b7c5b94
SP
278}
279
988d44b1
SR
280static int be_dev_mac_add(struct be_adapter *adapter, u8 *mac)
281{
282 int i;
283
284 /* Check if mac has already been added as part of uc-list */
285 for (i = 0; i < adapter->uc_macs; i++) {
1d0f110a 286 if (ether_addr_equal(adapter->uc_list[i].mac, mac)) {
988d44b1
SR
287 /* mac already added, skip addition */
288 adapter->pmac_id[0] = adapter->pmac_id[i + 1];
289 return 0;
290 }
291 }
292
293 return be_cmd_pmac_add(adapter, mac, adapter->if_handle,
294 &adapter->pmac_id[0], 0);
295}
296
297static void be_dev_mac_del(struct be_adapter *adapter, int pmac_id)
298{
299 int i;
300
301 /* Skip deletion if the programmed mac is
302 * being used in uc-list
303 */
304 for (i = 0; i < adapter->uc_macs; i++) {
305 if (adapter->pmac_id[i + 1] == pmac_id)
306 return;
307 }
308 be_cmd_pmac_del(adapter, adapter->if_handle, pmac_id, 0);
309}
310
6b7c5b94
SP
311static int be_mac_addr_set(struct net_device *netdev, void *p)
312{
313 struct be_adapter *adapter = netdev_priv(netdev);
5a712c13 314 struct device *dev = &adapter->pdev->dev;
6b7c5b94 315 struct sockaddr *addr = p;
5a712c13
SP
316 int status;
317 u8 mac[ETH_ALEN];
988d44b1 318 u32 old_pmac_id = adapter->pmac_id[0];
6b7c5b94 319
ca9e4988
AK
320 if (!is_valid_ether_addr(addr->sa_data))
321 return -EADDRNOTAVAIL;
322
ff32f8ab
VV
323 /* Proceed further only if, User provided MAC is different
324 * from active MAC
325 */
c27ebf58 326 if (ether_addr_equal(addr->sa_data, adapter->dev_mac))
ff32f8ab
VV
327 return 0;
328
34393529
IV
329 /* BE3 VFs without FILTMGMT privilege are not allowed to set its MAC
330 * address
331 */
332 if (BEx_chip(adapter) && be_virtfn(adapter) &&
333 !check_privilege(adapter, BE_PRIV_FILTMGMT))
334 return -EPERM;
335
bcc84140
KA
336 /* if device is not running, copy MAC to netdev->dev_addr */
337 if (!netif_running(netdev))
338 goto done;
339
5a712c13
SP
340 /* The PMAC_ADD cmd may fail if the VF doesn't have FILTMGMT
341 * privilege or if PF did not provision the new MAC address.
342 * On BE3, this cmd will always fail if the VF doesn't have the
343 * FILTMGMT privilege. This failure is OK, only if the PF programmed
344 * the MAC for the VF.
704e4c88 345 */
988d44b1
SR
346 mutex_lock(&adapter->rx_filter_lock);
347 status = be_dev_mac_add(adapter, (u8 *)addr->sa_data);
5a712c13 348 if (!status) {
5a712c13
SP
349
350 /* Delete the old programmed MAC. This call may fail if the
351 * old MAC was already deleted by the PF driver.
352 */
353 if (adapter->pmac_id[0] != old_pmac_id)
988d44b1 354 be_dev_mac_del(adapter, old_pmac_id);
704e4c88
PR
355 }
356
988d44b1 357 mutex_unlock(&adapter->rx_filter_lock);
5a712c13
SP
358 /* Decide if the new MAC is successfully activated only after
359 * querying the FW
704e4c88 360 */
988d44b1 361 status = be_cmd_get_active_mac(adapter, adapter->pmac_id[0], mac,
b188f090 362 adapter->if_handle, true, 0);
a65027e4 363 if (status)
e3a7ae2c 364 goto err;
6b7c5b94 365
5a712c13
SP
366 /* The MAC change did not happen, either due to lack of privilege
367 * or PF didn't pre-provision.
368 */
61d23e9f 369 if (!ether_addr_equal(addr->sa_data, mac)) {
5a712c13
SP
370 status = -EPERM;
371 goto err;
372 }
4993b39a
IV
373
374 /* Remember currently programmed MAC */
c27ebf58 375 ether_addr_copy(adapter->dev_mac, addr->sa_data);
4993b39a 376done:
bcc84140
KA
377 ether_addr_copy(netdev->dev_addr, addr->sa_data);
378 dev_info(dev, "MAC address changed to %pM\n", addr->sa_data);
e3a7ae2c
SK
379 return 0;
380err:
5a712c13 381 dev_warn(dev, "MAC address change to %pM failed\n", addr->sa_data);
6b7c5b94
SP
382 return status;
383}
384
ca34fe38
SP
385/* BE2 supports only v0 cmd */
386static void *hw_stats_from_cmd(struct be_adapter *adapter)
387{
388 if (BE2_chip(adapter)) {
389 struct be_cmd_resp_get_stats_v0 *cmd = adapter->stats_cmd.va;
390
391 return &cmd->hw_stats;
61000861 392 } else if (BE3_chip(adapter)) {
ca34fe38
SP
393 struct be_cmd_resp_get_stats_v1 *cmd = adapter->stats_cmd.va;
394
61000861
AK
395 return &cmd->hw_stats;
396 } else {
397 struct be_cmd_resp_get_stats_v2 *cmd = adapter->stats_cmd.va;
398
ca34fe38
SP
399 return &cmd->hw_stats;
400 }
401}
402
403/* BE2 supports only v0 cmd */
404static void *be_erx_stats_from_cmd(struct be_adapter *adapter)
405{
406 if (BE2_chip(adapter)) {
407 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
408
409 return &hw_stats->erx;
61000861 410 } else if (BE3_chip(adapter)) {
ca34fe38
SP
411 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
412
61000861
AK
413 return &hw_stats->erx;
414 } else {
415 struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
416
ca34fe38
SP
417 return &hw_stats->erx;
418 }
419}
420
421static void populate_be_v0_stats(struct be_adapter *adapter)
89a88ab8 422{
ac124ff9
SP
423 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
424 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
425 struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
89a88ab8 426 struct be_port_rxf_stats_v0 *port_stats =
ac124ff9
SP
427 &rxf_stats->port[adapter->port_num];
428 struct be_drv_stats *drvs = &adapter->drv_stats;
89a88ab8 429
ac124ff9 430 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
89a88ab8
AK
431 drvs->rx_pause_frames = port_stats->rx_pause_frames;
432 drvs->rx_crc_errors = port_stats->rx_crc_errors;
433 drvs->rx_control_frames = port_stats->rx_control_frames;
434 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
435 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
436 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
437 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
438 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
439 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
440 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
441 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
442 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
443 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
444 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
ac124ff9 445 drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
89a88ab8
AK
446 drvs->rx_dropped_header_too_small =
447 port_stats->rx_dropped_header_too_small;
18fb06a1
SR
448 drvs->rx_address_filtered =
449 port_stats->rx_address_filtered +
450 port_stats->rx_vlan_filtered;
89a88ab8
AK
451 drvs->rx_alignment_symbol_errors =
452 port_stats->rx_alignment_symbol_errors;
453
454 drvs->tx_pauseframes = port_stats->tx_pauseframes;
455 drvs->tx_controlframes = port_stats->tx_controlframes;
456
457 if (adapter->port_num)
ac124ff9 458 drvs->jabber_events = rxf_stats->port1_jabber_events;
89a88ab8 459 else
ac124ff9 460 drvs->jabber_events = rxf_stats->port0_jabber_events;
89a88ab8 461 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
89a88ab8 462 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
89a88ab8
AK
463 drvs->forwarded_packets = rxf_stats->forwarded_packets;
464 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
ac124ff9
SP
465 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
466 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
89a88ab8
AK
467 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
468}
469
ca34fe38 470static void populate_be_v1_stats(struct be_adapter *adapter)
89a88ab8 471{
ac124ff9
SP
472 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
473 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
474 struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
89a88ab8 475 struct be_port_rxf_stats_v1 *port_stats =
ac124ff9
SP
476 &rxf_stats->port[adapter->port_num];
477 struct be_drv_stats *drvs = &adapter->drv_stats;
89a88ab8 478
ac124ff9 479 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
02fe7027
AK
480 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
481 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
89a88ab8
AK
482 drvs->rx_pause_frames = port_stats->rx_pause_frames;
483 drvs->rx_crc_errors = port_stats->rx_crc_errors;
484 drvs->rx_control_frames = port_stats->rx_control_frames;
485 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
486 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
487 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
488 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
489 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
490 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
491 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
492 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
493 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
494 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
495 drvs->rx_dropped_header_too_small =
496 port_stats->rx_dropped_header_too_small;
497 drvs->rx_input_fifo_overflow_drop =
498 port_stats->rx_input_fifo_overflow_drop;
18fb06a1 499 drvs->rx_address_filtered = port_stats->rx_address_filtered;
89a88ab8
AK
500 drvs->rx_alignment_symbol_errors =
501 port_stats->rx_alignment_symbol_errors;
ac124ff9 502 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
89a88ab8
AK
503 drvs->tx_pauseframes = port_stats->tx_pauseframes;
504 drvs->tx_controlframes = port_stats->tx_controlframes;
b5adffc4 505 drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
89a88ab8
AK
506 drvs->jabber_events = port_stats->jabber_events;
507 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
89a88ab8 508 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
89a88ab8
AK
509 drvs->forwarded_packets = rxf_stats->forwarded_packets;
510 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
ac124ff9
SP
511 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
512 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
89a88ab8
AK
513 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
514}
515
61000861
AK
516static void populate_be_v2_stats(struct be_adapter *adapter)
517{
518 struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
519 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
520 struct be_rxf_stats_v2 *rxf_stats = &hw_stats->rxf;
521 struct be_port_rxf_stats_v2 *port_stats =
522 &rxf_stats->port[adapter->port_num];
523 struct be_drv_stats *drvs = &adapter->drv_stats;
524
525 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
526 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
527 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
528 drvs->rx_pause_frames = port_stats->rx_pause_frames;
529 drvs->rx_crc_errors = port_stats->rx_crc_errors;
530 drvs->rx_control_frames = port_stats->rx_control_frames;
531 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
532 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
533 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
534 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
535 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
536 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
537 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
538 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
539 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
540 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
541 drvs->rx_dropped_header_too_small =
542 port_stats->rx_dropped_header_too_small;
543 drvs->rx_input_fifo_overflow_drop =
544 port_stats->rx_input_fifo_overflow_drop;
545 drvs->rx_address_filtered = port_stats->rx_address_filtered;
546 drvs->rx_alignment_symbol_errors =
547 port_stats->rx_alignment_symbol_errors;
548 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
549 drvs->tx_pauseframes = port_stats->tx_pauseframes;
550 drvs->tx_controlframes = port_stats->tx_controlframes;
551 drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
552 drvs->jabber_events = port_stats->jabber_events;
553 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
554 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
555 drvs->forwarded_packets = rxf_stats->forwarded_packets;
556 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
557 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
558 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
559 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
748b539a 560 if (be_roce_supported(adapter)) {
461ae379
AK
561 drvs->rx_roce_bytes_lsd = port_stats->roce_bytes_received_lsd;
562 drvs->rx_roce_bytes_msd = port_stats->roce_bytes_received_msd;
563 drvs->rx_roce_frames = port_stats->roce_frames_received;
564 drvs->roce_drops_crc = port_stats->roce_drops_crc;
565 drvs->roce_drops_payload_len =
566 port_stats->roce_drops_payload_len;
567 }
61000861
AK
568}
569
005d5696
SX
570static void populate_lancer_stats(struct be_adapter *adapter)
571{
005d5696 572 struct be_drv_stats *drvs = &adapter->drv_stats;
748b539a 573 struct lancer_pport_stats *pport_stats = pport_stats_from_cmd(adapter);
ac124ff9
SP
574
575 be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
576 drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
577 drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
578 drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
005d5696 579 drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
ac124ff9 580 drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
005d5696
SX
581 drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
582 drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
583 drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
584 drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
585 drvs->rx_dropped_tcp_length =
586 pport_stats->rx_dropped_invalid_tcp_length;
587 drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
588 drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
589 drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
590 drvs->rx_dropped_header_too_small =
591 pport_stats->rx_dropped_header_too_small;
592 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
18fb06a1
SR
593 drvs->rx_address_filtered =
594 pport_stats->rx_address_filtered +
595 pport_stats->rx_vlan_filtered;
ac124ff9 596 drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
005d5696 597 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
ac124ff9
SP
598 drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
599 drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
005d5696 600 drvs->jabber_events = pport_stats->rx_jabbers;
ac124ff9
SP
601 drvs->forwarded_packets = pport_stats->num_forwards_lo;
602 drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
005d5696 603 drvs->rx_drops_too_many_frags =
ac124ff9 604 pport_stats->rx_drops_too_many_frags_lo;
005d5696 605}
89a88ab8 606
09c1c68f
SP
607static void accumulate_16bit_val(u32 *acc, u16 val)
608{
609#define lo(x) (x & 0xFFFF)
610#define hi(x) (x & 0xFFFF0000)
611 bool wrapped = val < lo(*acc);
612 u32 newacc = hi(*acc) + val;
613
614 if (wrapped)
615 newacc += 65536;
6aa7de05 616 WRITE_ONCE(*acc, newacc);
09c1c68f
SP
617}
618
4188e7df 619static void populate_erx_stats(struct be_adapter *adapter,
748b539a 620 struct be_rx_obj *rxo, u32 erx_stat)
a6c578ef
AK
621{
622 if (!BEx_chip(adapter))
623 rx_stats(rxo)->rx_drops_no_frags = erx_stat;
624 else
625 /* below erx HW counter can actually wrap around after
626 * 65535. Driver accumulates a 32-bit value
627 */
628 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
629 (u16)erx_stat);
630}
631
89a88ab8
AK
632void be_parse_stats(struct be_adapter *adapter)
633{
61000861 634 struct be_erx_stats_v2 *erx = be_erx_stats_from_cmd(adapter);
ac124ff9
SP
635 struct be_rx_obj *rxo;
636 int i;
a6c578ef 637 u32 erx_stat;
ac124ff9 638
ca34fe38
SP
639 if (lancer_chip(adapter)) {
640 populate_lancer_stats(adapter);
005d5696 641 } else {
ca34fe38
SP
642 if (BE2_chip(adapter))
643 populate_be_v0_stats(adapter);
61000861
AK
644 else if (BE3_chip(adapter))
645 /* for BE3 */
ca34fe38 646 populate_be_v1_stats(adapter);
61000861
AK
647 else
648 populate_be_v2_stats(adapter);
d51ebd33 649
61000861 650 /* erx_v2 is longer than v0, v1. use v2 for v0, v1 access */
ca34fe38 651 for_all_rx_queues(adapter, rxo, i) {
a6c578ef
AK
652 erx_stat = erx->rx_drops_no_fragments[rxo->q.id];
653 populate_erx_stats(adapter, rxo, erx_stat);
ca34fe38 654 }
09c1c68f 655 }
89a88ab8
AK
656}
657
bc1f4470 658static void be_get_stats64(struct net_device *netdev,
659 struct rtnl_link_stats64 *stats)
6b7c5b94 660{
ab1594e9 661 struct be_adapter *adapter = netdev_priv(netdev);
89a88ab8 662 struct be_drv_stats *drvs = &adapter->drv_stats;
3abcdeda 663 struct be_rx_obj *rxo;
3c8def97 664 struct be_tx_obj *txo;
ab1594e9
SP
665 u64 pkts, bytes;
666 unsigned int start;
3abcdeda 667 int i;
6b7c5b94 668
3abcdeda 669 for_all_rx_queues(adapter, rxo, i) {
ab1594e9 670 const struct be_rx_stats *rx_stats = rx_stats(rxo);
03d28ffe 671
ab1594e9 672 do {
57a7744e 673 start = u64_stats_fetch_begin_irq(&rx_stats->sync);
ab1594e9
SP
674 pkts = rx_stats(rxo)->rx_pkts;
675 bytes = rx_stats(rxo)->rx_bytes;
57a7744e 676 } while (u64_stats_fetch_retry_irq(&rx_stats->sync, start));
ab1594e9
SP
677 stats->rx_packets += pkts;
678 stats->rx_bytes += bytes;
679 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
680 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
681 rx_stats(rxo)->rx_drops_no_frags;
3abcdeda
SP
682 }
683
3c8def97 684 for_all_tx_queues(adapter, txo, i) {
ab1594e9 685 const struct be_tx_stats *tx_stats = tx_stats(txo);
03d28ffe 686
ab1594e9 687 do {
57a7744e 688 start = u64_stats_fetch_begin_irq(&tx_stats->sync);
ab1594e9
SP
689 pkts = tx_stats(txo)->tx_pkts;
690 bytes = tx_stats(txo)->tx_bytes;
57a7744e 691 } while (u64_stats_fetch_retry_irq(&tx_stats->sync, start));
ab1594e9
SP
692 stats->tx_packets += pkts;
693 stats->tx_bytes += bytes;
3c8def97 694 }
6b7c5b94
SP
695
696 /* bad pkts received */
ab1594e9 697 stats->rx_errors = drvs->rx_crc_errors +
89a88ab8
AK
698 drvs->rx_alignment_symbol_errors +
699 drvs->rx_in_range_errors +
700 drvs->rx_out_range_errors +
701 drvs->rx_frame_too_long +
702 drvs->rx_dropped_too_small +
703 drvs->rx_dropped_too_short +
704 drvs->rx_dropped_header_too_small +
705 drvs->rx_dropped_tcp_length +
ab1594e9 706 drvs->rx_dropped_runt;
68110868 707
6b7c5b94 708 /* detailed rx errors */
ab1594e9 709 stats->rx_length_errors = drvs->rx_in_range_errors +
89a88ab8
AK
710 drvs->rx_out_range_errors +
711 drvs->rx_frame_too_long;
68110868 712
ab1594e9 713 stats->rx_crc_errors = drvs->rx_crc_errors;
6b7c5b94
SP
714
715 /* frame alignment errors */
ab1594e9 716 stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
68110868 717
6b7c5b94
SP
718 /* receiver fifo overrun */
719 /* drops_no_pbuf is no per i/f, it's per BE card */
ab1594e9 720 stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
89a88ab8
AK
721 drvs->rx_input_fifo_overflow_drop +
722 drvs->rx_drops_no_pbuf;
6b7c5b94
SP
723}
724
b236916a 725void be_link_status_update(struct be_adapter *adapter, u8 link_status)
6b7c5b94 726{
6b7c5b94
SP
727 struct net_device *netdev = adapter->netdev;
728
b236916a 729 if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
ea172a01 730 netif_carrier_off(netdev);
b236916a 731 adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
6b7c5b94 732 }
b236916a 733
bdce2ad7 734 if (link_status)
b236916a
AK
735 netif_carrier_on(netdev);
736 else
737 netif_carrier_off(netdev);
18824894
IV
738
739 netdev_info(netdev, "Link is %s\n", link_status ? "Up" : "Down");
6b7c5b94
SP
740}
741
f3d6ad84
SB
742static int be_gso_hdr_len(struct sk_buff *skb)
743{
744 if (skb->encapsulation)
745 return skb_inner_transport_offset(skb) +
746 inner_tcp_hdrlen(skb);
747 return skb_transport_offset(skb) + tcp_hdrlen(skb);
748}
749
5f07b3c5 750static void be_tx_stats_update(struct be_tx_obj *txo, struct sk_buff *skb)
6b7c5b94 751{
3c8def97 752 struct be_tx_stats *stats = tx_stats(txo);
f3d6ad84
SB
753 u32 tx_pkts = skb_shinfo(skb)->gso_segs ? : 1;
754 /* Account for headers which get duplicated in TSO pkt */
755 u32 dup_hdr_len = tx_pkts > 1 ? be_gso_hdr_len(skb) * (tx_pkts - 1) : 0;
3c8def97 756
ab1594e9 757 u64_stats_update_begin(&stats->sync);
ac124ff9 758 stats->tx_reqs++;
f3d6ad84 759 stats->tx_bytes += skb->len + dup_hdr_len;
8670f2a5
SB
760 stats->tx_pkts += tx_pkts;
761 if (skb->encapsulation && skb->ip_summed == CHECKSUM_PARTIAL)
762 stats->tx_vxlan_offload_pkts += tx_pkts;
ab1594e9 763 u64_stats_update_end(&stats->sync);
6b7c5b94
SP
764}
765
5f07b3c5
SP
766/* Returns number of WRBs needed for the skb */
767static u32 skb_wrb_cnt(struct sk_buff *skb)
6b7c5b94 768{
5f07b3c5
SP
769 /* +1 for the header wrb */
770 return 1 + (skb_headlen(skb) ? 1 : 0) + skb_shinfo(skb)->nr_frags;
6b7c5b94
SP
771}
772
773static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
774{
f986afcb
SP
775 wrb->frag_pa_hi = cpu_to_le32(upper_32_bits(addr));
776 wrb->frag_pa_lo = cpu_to_le32(lower_32_bits(addr));
777 wrb->frag_len = cpu_to_le32(len & ETH_WRB_FRAG_LEN_MASK);
778 wrb->rsvd0 = 0;
779}
780
781/* A dummy wrb is just all zeros. Using a separate routine for dummy-wrb
782 * to avoid the swap and shift/mask operations in wrb_fill().
783 */
784static inline void wrb_fill_dummy(struct be_eth_wrb *wrb)
785{
786 wrb->frag_pa_hi = 0;
787 wrb->frag_pa_lo = 0;
788 wrb->frag_len = 0;
89b1f496 789 wrb->rsvd0 = 0;
6b7c5b94
SP
790}
791
1ded132d 792static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
748b539a 793 struct sk_buff *skb)
1ded132d
AK
794{
795 u8 vlan_prio;
796 u16 vlan_tag;
797
df8a39de 798 vlan_tag = skb_vlan_tag_get(skb);
1ded132d
AK
799 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
800 /* If vlan priority provided by OS is NOT in available bmap */
801 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
802 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
fdf81bfb 803 adapter->recommended_prio_bits;
1ded132d
AK
804
805 return vlan_tag;
806}
807
c9c47142
SP
808/* Used only for IP tunnel packets */
809static u16 skb_inner_ip_proto(struct sk_buff *skb)
810{
811 return (inner_ip_hdr(skb)->version == 4) ?
812 inner_ip_hdr(skb)->protocol : inner_ipv6_hdr(skb)->nexthdr;
813}
814
815static u16 skb_ip_proto(struct sk_buff *skb)
816{
817 return (ip_hdr(skb)->version == 4) ?
818 ip_hdr(skb)->protocol : ipv6_hdr(skb)->nexthdr;
819}
820
cf5671e6
SB
821static inline bool be_is_txq_full(struct be_tx_obj *txo)
822{
823 return atomic_read(&txo->q.used) + BE_MAX_TX_FRAG_COUNT >= txo->q.len;
824}
825
826static inline bool be_can_txq_wake(struct be_tx_obj *txo)
827{
828 return atomic_read(&txo->q.used) < txo->q.len / 2;
829}
830
831static inline bool be_is_tx_compl_pending(struct be_tx_obj *txo)
832{
833 return atomic_read(&txo->q.used) > txo->pend_wrb_cnt;
834}
835
804abcdb
SB
836static void be_get_wrb_params_from_skb(struct be_adapter *adapter,
837 struct sk_buff *skb,
838 struct be_wrb_params *wrb_params)
6b7c5b94 839{
804abcdb 840 u16 proto;
6b7c5b94 841
49e4b847 842 if (skb_is_gso(skb)) {
804abcdb
SB
843 BE_WRB_F_SET(wrb_params->features, LSO, 1);
844 wrb_params->lso_mss = skb_shinfo(skb)->gso_size;
fe6d2a38 845 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
804abcdb 846 BE_WRB_F_SET(wrb_params->features, LSO6, 1);
6b7c5b94 847 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
c9c47142 848 if (skb->encapsulation) {
804abcdb 849 BE_WRB_F_SET(wrb_params->features, IPCS, 1);
c9c47142
SP
850 proto = skb_inner_ip_proto(skb);
851 } else {
852 proto = skb_ip_proto(skb);
853 }
854 if (proto == IPPROTO_TCP)
804abcdb 855 BE_WRB_F_SET(wrb_params->features, TCPCS, 1);
c9c47142 856 else if (proto == IPPROTO_UDP)
804abcdb 857 BE_WRB_F_SET(wrb_params->features, UDPCS, 1);
6b7c5b94
SP
858 }
859
df8a39de 860 if (skb_vlan_tag_present(skb)) {
804abcdb
SB
861 BE_WRB_F_SET(wrb_params->features, VLAN, 1);
862 wrb_params->vlan_tag = be_get_tx_vlan_tag(adapter, skb);
6b7c5b94
SP
863 }
864
804abcdb
SB
865 BE_WRB_F_SET(wrb_params->features, CRC, 1);
866}
5f07b3c5 867
804abcdb
SB
868static void wrb_fill_hdr(struct be_adapter *adapter,
869 struct be_eth_hdr_wrb *hdr,
870 struct be_wrb_params *wrb_params,
871 struct sk_buff *skb)
872{
873 memset(hdr, 0, sizeof(*hdr));
874
875 SET_TX_WRB_HDR_BITS(crc, hdr,
876 BE_WRB_F_GET(wrb_params->features, CRC));
877 SET_TX_WRB_HDR_BITS(ipcs, hdr,
878 BE_WRB_F_GET(wrb_params->features, IPCS));
879 SET_TX_WRB_HDR_BITS(tcpcs, hdr,
880 BE_WRB_F_GET(wrb_params->features, TCPCS));
881 SET_TX_WRB_HDR_BITS(udpcs, hdr,
882 BE_WRB_F_GET(wrb_params->features, UDPCS));
883
884 SET_TX_WRB_HDR_BITS(lso, hdr,
885 BE_WRB_F_GET(wrb_params->features, LSO));
886 SET_TX_WRB_HDR_BITS(lso6, hdr,
887 BE_WRB_F_GET(wrb_params->features, LSO6));
888 SET_TX_WRB_HDR_BITS(lso_mss, hdr, wrb_params->lso_mss);
889
890 /* Hack to skip HW VLAN tagging needs evt = 1, compl = 0. When this
891 * hack is not needed, the evt bit is set while ringing DB.
5f07b3c5 892 */
804abcdb
SB
893 SET_TX_WRB_HDR_BITS(event, hdr,
894 BE_WRB_F_GET(wrb_params->features, VLAN_SKIP_HW));
895 SET_TX_WRB_HDR_BITS(vlan, hdr,
896 BE_WRB_F_GET(wrb_params->features, VLAN));
897 SET_TX_WRB_HDR_BITS(vlan_tag, hdr, wrb_params->vlan_tag);
898
899 SET_TX_WRB_HDR_BITS(num_wrb, hdr, skb_wrb_cnt(skb));
900 SET_TX_WRB_HDR_BITS(len, hdr, skb->len);
760c295e
VD
901 SET_TX_WRB_HDR_BITS(mgmt, hdr,
902 BE_WRB_F_GET(wrb_params->features, OS2BMC));
6b7c5b94
SP
903}
904
2b7bcebf 905static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
748b539a 906 bool unmap_single)
7101e111
SP
907{
908 dma_addr_t dma;
f986afcb 909 u32 frag_len = le32_to_cpu(wrb->frag_len);
7101e111 910
7101e111 911
f986afcb
SP
912 dma = (u64)le32_to_cpu(wrb->frag_pa_hi) << 32 |
913 (u64)le32_to_cpu(wrb->frag_pa_lo);
914 if (frag_len) {
7101e111 915 if (unmap_single)
f986afcb 916 dma_unmap_single(dev, dma, frag_len, DMA_TO_DEVICE);
7101e111 917 else
f986afcb 918 dma_unmap_page(dev, dma, frag_len, DMA_TO_DEVICE);
7101e111
SP
919 }
920}
6b7c5b94 921
79a0d7d8 922/* Grab a WRB header for xmit */
b0fd2eb2 923static u32 be_tx_get_wrb_hdr(struct be_tx_obj *txo)
79a0d7d8 924{
b0fd2eb2 925 u32 head = txo->q.head;
79a0d7d8
SB
926
927 queue_head_inc(&txo->q);
928 return head;
929}
930
931/* Set up the WRB header for xmit */
932static void be_tx_setup_wrb_hdr(struct be_adapter *adapter,
933 struct be_tx_obj *txo,
934 struct be_wrb_params *wrb_params,
935 struct sk_buff *skb, u16 head)
936{
937 u32 num_frags = skb_wrb_cnt(skb);
938 struct be_queue_info *txq = &txo->q;
939 struct be_eth_hdr_wrb *hdr = queue_index_node(txq, head);
940
941 wrb_fill_hdr(adapter, hdr, wrb_params, skb);
942 be_dws_cpu_to_le(hdr, sizeof(*hdr));
943
944 BUG_ON(txo->sent_skb_list[head]);
945 txo->sent_skb_list[head] = skb;
946 txo->last_req_hdr = head;
947 atomic_add(num_frags, &txq->used);
948 txo->last_req_wrb_cnt = num_frags;
949 txo->pend_wrb_cnt += num_frags;
950}
951
952/* Setup a WRB fragment (buffer descriptor) for xmit */
953static void be_tx_setup_wrb_frag(struct be_tx_obj *txo, dma_addr_t busaddr,
954 int len)
955{
956 struct be_eth_wrb *wrb;
957 struct be_queue_info *txq = &txo->q;
958
959 wrb = queue_head_node(txq);
960 wrb_fill(wrb, busaddr, len);
961 queue_head_inc(txq);
962}
963
964/* Bring the queue back to the state it was in before be_xmit_enqueue() routine
965 * was invoked. The producer index is restored to the previous packet and the
966 * WRBs of the current packet are unmapped. Invoked to handle tx setup errors.
967 */
968static void be_xmit_restore(struct be_adapter *adapter,
b0fd2eb2 969 struct be_tx_obj *txo, u32 head, bool map_single,
79a0d7d8
SB
970 u32 copied)
971{
972 struct device *dev;
973 struct be_eth_wrb *wrb;
974 struct be_queue_info *txq = &txo->q;
975
976 dev = &adapter->pdev->dev;
977 txq->head = head;
978
979 /* skip the first wrb (hdr); it's not mapped */
980 queue_head_inc(txq);
981 while (copied) {
982 wrb = queue_head_node(txq);
983 unmap_tx_frag(dev, wrb, map_single);
984 map_single = false;
985 copied -= le32_to_cpu(wrb->frag_len);
986 queue_head_inc(txq);
987 }
988
989 txq->head = head;
990}
991
992/* Enqueue the given packet for transmit. This routine allocates WRBs for the
993 * packet, dma maps the packet buffers and sets up the WRBs. Returns the number
994 * of WRBs used up by the packet.
995 */
5f07b3c5 996static u32 be_xmit_enqueue(struct be_adapter *adapter, struct be_tx_obj *txo,
804abcdb
SB
997 struct sk_buff *skb,
998 struct be_wrb_params *wrb_params)
6b7c5b94 999{
5f07b3c5 1000 u32 i, copied = 0, wrb_cnt = skb_wrb_cnt(skb);
2b7bcebf 1001 struct device *dev = &adapter->pdev->dev;
7101e111 1002 bool map_single = false;
2e85283d 1003 u32 head;
79a0d7d8
SB
1004 dma_addr_t busaddr;
1005 int len;
6b7c5b94 1006
79a0d7d8 1007 head = be_tx_get_wrb_hdr(txo);
6b7c5b94 1008
ebc8d2ab 1009 if (skb->len > skb->data_len) {
79a0d7d8 1010 len = skb_headlen(skb);
03d28ffe 1011
2b7bcebf
IV
1012 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
1013 if (dma_mapping_error(dev, busaddr))
7101e111
SP
1014 goto dma_err;
1015 map_single = true;
79a0d7d8 1016 be_tx_setup_wrb_frag(txo, busaddr, len);
ebc8d2ab
DM
1017 copied += len;
1018 }
6b7c5b94 1019
ebc8d2ab 1020 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
748b539a 1021 const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
79a0d7d8 1022 len = skb_frag_size(frag);
03d28ffe 1023
79a0d7d8 1024 busaddr = skb_frag_dma_map(dev, frag, 0, len, DMA_TO_DEVICE);
2b7bcebf 1025 if (dma_mapping_error(dev, busaddr))
7101e111 1026 goto dma_err;
79a0d7d8
SB
1027 be_tx_setup_wrb_frag(txo, busaddr, len);
1028 copied += len;
6b7c5b94
SP
1029 }
1030
79a0d7d8 1031 be_tx_setup_wrb_hdr(adapter, txo, wrb_params, skb, head);
6b7c5b94 1032
5f07b3c5
SP
1033 be_tx_stats_update(txo, skb);
1034 return wrb_cnt;
6b7c5b94 1035
7101e111 1036dma_err:
79a0d7d8
SB
1037 adapter->drv_stats.dma_map_errors++;
1038 be_xmit_restore(adapter, txo, head, map_single, copied);
7101e111 1039 return 0;
6b7c5b94
SP
1040}
1041
f7062ee5
SP
1042static inline int qnq_async_evt_rcvd(struct be_adapter *adapter)
1043{
1044 return adapter->flags & BE_FLAGS_QNQ_ASYNC_EVT_RCVD;
1045}
1046
93040ae5 1047static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
bc0c3405 1048 struct sk_buff *skb,
804abcdb
SB
1049 struct be_wrb_params
1050 *wrb_params)
93040ae5
SK
1051{
1052 u16 vlan_tag = 0;
1053
1054 skb = skb_share_check(skb, GFP_ATOMIC);
1055 if (unlikely(!skb))
1056 return skb;
1057
df8a39de 1058 if (skb_vlan_tag_present(skb))
93040ae5 1059 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
52fe29e4
SB
1060
1061 if (qnq_async_evt_rcvd(adapter) && adapter->pvid) {
1062 if (!vlan_tag)
1063 vlan_tag = adapter->pvid;
1064 /* f/w workaround to set skip_hw_vlan = 1, informs the F/W to
1065 * skip VLAN insertion
1066 */
804abcdb 1067 BE_WRB_F_SET(wrb_params->features, VLAN_SKIP_HW, 1);
52fe29e4 1068 }
bc0c3405
AK
1069
1070 if (vlan_tag) {
62749e2c
JP
1071 skb = vlan_insert_tag_set_proto(skb, htons(ETH_P_8021Q),
1072 vlan_tag);
bc0c3405
AK
1073 if (unlikely(!skb))
1074 return skb;
bc0c3405
AK
1075 skb->vlan_tci = 0;
1076 }
1077
1078 /* Insert the outer VLAN, if any */
1079 if (adapter->qnq_vid) {
1080 vlan_tag = adapter->qnq_vid;
62749e2c
JP
1081 skb = vlan_insert_tag_set_proto(skb, htons(ETH_P_8021Q),
1082 vlan_tag);
bc0c3405
AK
1083 if (unlikely(!skb))
1084 return skb;
804abcdb 1085 BE_WRB_F_SET(wrb_params->features, VLAN_SKIP_HW, 1);
bc0c3405
AK
1086 }
1087
93040ae5
SK
1088 return skb;
1089}
1090
bc0c3405
AK
1091static bool be_ipv6_exthdr_check(struct sk_buff *skb)
1092{
1093 struct ethhdr *eh = (struct ethhdr *)skb->data;
1094 u16 offset = ETH_HLEN;
1095
1096 if (eh->h_proto == htons(ETH_P_IPV6)) {
1097 struct ipv6hdr *ip6h = (struct ipv6hdr *)(skb->data + offset);
1098
1099 offset += sizeof(struct ipv6hdr);
1100 if (ip6h->nexthdr != NEXTHDR_TCP &&
1101 ip6h->nexthdr != NEXTHDR_UDP) {
1102 struct ipv6_opt_hdr *ehdr =
504fbf1e 1103 (struct ipv6_opt_hdr *)(skb->data + offset);
bc0c3405
AK
1104
1105 /* offending pkt: 2nd byte following IPv6 hdr is 0xff */
1106 if (ehdr->hdrlen == 0xff)
1107 return true;
1108 }
1109 }
1110 return false;
1111}
1112
1113static int be_vlan_tag_tx_chk(struct be_adapter *adapter, struct sk_buff *skb)
1114{
df8a39de 1115 return skb_vlan_tag_present(skb) || adapter->pvid || adapter->qnq_vid;
bc0c3405
AK
1116}
1117
748b539a 1118static int be_ipv6_tx_stall_chk(struct be_adapter *adapter, struct sk_buff *skb)
bc0c3405 1119{
ee9c799c 1120 return BE3_chip(adapter) && be_ipv6_exthdr_check(skb);
bc0c3405
AK
1121}
1122
ec495fac
VV
1123static struct sk_buff *be_lancer_xmit_workarounds(struct be_adapter *adapter,
1124 struct sk_buff *skb,
804abcdb
SB
1125 struct be_wrb_params
1126 *wrb_params)
6b7c5b94 1127{
d2cb6ce7 1128 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
ee9c799c
SP
1129 unsigned int eth_hdr_len;
1130 struct iphdr *ip;
93040ae5 1131
1297f9db
AK
1132 /* For padded packets, BE HW modifies tot_len field in IP header
1133 * incorrecly when VLAN tag is inserted by HW.
3904dcc4 1134 * For padded packets, Lancer computes incorrect checksum.
1ded132d 1135 */
ee9c799c
SP
1136 eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
1137 VLAN_ETH_HLEN : ETH_HLEN;
3904dcc4 1138 if (skb->len <= 60 &&
df8a39de 1139 (lancer_chip(adapter) || skb_vlan_tag_present(skb)) &&
ee9c799c 1140 is_ipv4_pkt(skb)) {
93040ae5
SK
1141 ip = (struct iphdr *)ip_hdr(skb);
1142 pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len));
1143 }
1ded132d 1144
d2cb6ce7 1145 /* If vlan tag is already inlined in the packet, skip HW VLAN
f93f160b 1146 * tagging in pvid-tagging mode
d2cb6ce7 1147 */
f93f160b 1148 if (be_pvid_tagging_enabled(adapter) &&
d2cb6ce7 1149 veh->h_vlan_proto == htons(ETH_P_8021Q))
804abcdb 1150 BE_WRB_F_SET(wrb_params->features, VLAN_SKIP_HW, 1);
d2cb6ce7 1151
93040ae5
SK
1152 /* HW has a bug wherein it will calculate CSUM for VLAN
1153 * pkts even though it is disabled.
1154 * Manually insert VLAN in pkt.
1155 */
1156 if (skb->ip_summed != CHECKSUM_PARTIAL &&
df8a39de 1157 skb_vlan_tag_present(skb)) {
804abcdb 1158 skb = be_insert_vlan_in_pkt(adapter, skb, wrb_params);
bc0c3405 1159 if (unlikely(!skb))
c9128951 1160 goto err;
bc0c3405
AK
1161 }
1162
1163 /* HW may lockup when VLAN HW tagging is requested on
1164 * certain ipv6 packets. Drop such pkts if the HW workaround to
1165 * skip HW tagging is not enabled by FW.
1166 */
1167 if (unlikely(be_ipv6_tx_stall_chk(adapter, skb) &&
cd3307aa
KA
1168 (adapter->pvid || adapter->qnq_vid) &&
1169 !qnq_async_evt_rcvd(adapter)))
bc0c3405
AK
1170 goto tx_drop;
1171
1172 /* Manual VLAN tag insertion to prevent:
1173 * ASIC lockup when the ASIC inserts VLAN tag into
1174 * certain ipv6 packets. Insert VLAN tags in driver,
1175 * and set event, completion, vlan bits accordingly
1176 * in the Tx WRB.
1177 */
1178 if (be_ipv6_tx_stall_chk(adapter, skb) &&
1179 be_vlan_tag_tx_chk(adapter, skb)) {
804abcdb 1180 skb = be_insert_vlan_in_pkt(adapter, skb, wrb_params);
1ded132d 1181 if (unlikely(!skb))
c9128951 1182 goto err;
1ded132d
AK
1183 }
1184
ee9c799c
SP
1185 return skb;
1186tx_drop:
1187 dev_kfree_skb_any(skb);
c9128951 1188err:
ee9c799c
SP
1189 return NULL;
1190}
1191
ec495fac
VV
1192static struct sk_buff *be_xmit_workarounds(struct be_adapter *adapter,
1193 struct sk_buff *skb,
804abcdb 1194 struct be_wrb_params *wrb_params)
ec495fac 1195{
127bfce5 1196 int err;
1197
8227e990
SR
1198 /* Lancer, SH and BE3 in SRIOV mode have a bug wherein
1199 * packets that are 32b or less may cause a transmit stall
1200 * on that port. The workaround is to pad such packets
1201 * (len <= 32 bytes) to a minimum length of 36b.
ec495fac 1202 */
8227e990 1203 if (skb->len <= 32) {
74b6939d 1204 if (skb_put_padto(skb, 36))
ec495fac 1205 return NULL;
ec495fac
VV
1206 }
1207
1208 if (BEx_chip(adapter) || lancer_chip(adapter)) {
804abcdb 1209 skb = be_lancer_xmit_workarounds(adapter, skb, wrb_params);
ec495fac
VV
1210 if (!skb)
1211 return NULL;
1212 }
1213
127bfce5 1214 /* The stack can send us skbs with length greater than
1215 * what the HW can handle. Trim the extra bytes.
1216 */
1217 WARN_ON_ONCE(skb->len > BE_MAX_GSO_SIZE);
1218 err = pskb_trim(skb, BE_MAX_GSO_SIZE);
1219 WARN_ON(err);
1220
ec495fac
VV
1221 return skb;
1222}
1223
5f07b3c5
SP
1224static void be_xmit_flush(struct be_adapter *adapter, struct be_tx_obj *txo)
1225{
1226 struct be_queue_info *txq = &txo->q;
1227 struct be_eth_hdr_wrb *hdr = queue_index_node(txq, txo->last_req_hdr);
1228
1229 /* Mark the last request eventable if it hasn't been marked already */
1230 if (!(hdr->dw[2] & cpu_to_le32(TX_HDR_WRB_EVT)))
1231 hdr->dw[2] |= cpu_to_le32(TX_HDR_WRB_EVT | TX_HDR_WRB_COMPL);
1232
1233 /* compose a dummy wrb if there are odd set of wrbs to notify */
1234 if (!lancer_chip(adapter) && (txo->pend_wrb_cnt & 1)) {
f986afcb 1235 wrb_fill_dummy(queue_head_node(txq));
5f07b3c5
SP
1236 queue_head_inc(txq);
1237 atomic_inc(&txq->used);
1238 txo->pend_wrb_cnt++;
1239 hdr->dw[2] &= ~cpu_to_le32(TX_HDR_WRB_NUM_MASK <<
1240 TX_HDR_WRB_NUM_SHIFT);
1241 hdr->dw[2] |= cpu_to_le32((txo->last_req_wrb_cnt + 1) <<
1242 TX_HDR_WRB_NUM_SHIFT);
1243 }
1244 be_txq_notify(adapter, txo, txo->pend_wrb_cnt);
1245 txo->pend_wrb_cnt = 0;
1246}
1247
760c295e
VD
1248/* OS2BMC related */
1249
1250#define DHCP_CLIENT_PORT 68
1251#define DHCP_SERVER_PORT 67
1252#define NET_BIOS_PORT1 137
1253#define NET_BIOS_PORT2 138
1254#define DHCPV6_RAS_PORT 547
1255
1256#define is_mc_allowed_on_bmc(adapter, eh) \
1257 (!is_multicast_filt_enabled(adapter) && \
1258 is_multicast_ether_addr(eh->h_dest) && \
1259 !is_broadcast_ether_addr(eh->h_dest))
1260
1261#define is_bc_allowed_on_bmc(adapter, eh) \
1262 (!is_broadcast_filt_enabled(adapter) && \
1263 is_broadcast_ether_addr(eh->h_dest))
1264
1265#define is_arp_allowed_on_bmc(adapter, skb) \
1266 (is_arp(skb) && is_arp_filt_enabled(adapter))
1267
1268#define is_broadcast_packet(eh, adapter) \
1269 (is_multicast_ether_addr(eh->h_dest) && \
1270 !compare_ether_addr(eh->h_dest, adapter->netdev->broadcast))
1271
1272#define is_arp(skb) (skb->protocol == htons(ETH_P_ARP))
1273
1274#define is_arp_filt_enabled(adapter) \
1275 (adapter->bmc_filt_mask & (BMC_FILT_BROADCAST_ARP))
1276
1277#define is_dhcp_client_filt_enabled(adapter) \
1278 (adapter->bmc_filt_mask & BMC_FILT_BROADCAST_DHCP_CLIENT)
1279
1280#define is_dhcp_srvr_filt_enabled(adapter) \
1281 (adapter->bmc_filt_mask & BMC_FILT_BROADCAST_DHCP_SERVER)
1282
1283#define is_nbios_filt_enabled(adapter) \
1284 (adapter->bmc_filt_mask & BMC_FILT_BROADCAST_NET_BIOS)
1285
1286#define is_ipv6_na_filt_enabled(adapter) \
1287 (adapter->bmc_filt_mask & \
1288 BMC_FILT_MULTICAST_IPV6_NEIGH_ADVER)
1289
1290#define is_ipv6_ra_filt_enabled(adapter) \
1291 (adapter->bmc_filt_mask & BMC_FILT_MULTICAST_IPV6_RA)
1292
1293#define is_ipv6_ras_filt_enabled(adapter) \
1294 (adapter->bmc_filt_mask & BMC_FILT_MULTICAST_IPV6_RAS)
1295
1296#define is_broadcast_filt_enabled(adapter) \
1297 (adapter->bmc_filt_mask & BMC_FILT_BROADCAST)
1298
1299#define is_multicast_filt_enabled(adapter) \
1300 (adapter->bmc_filt_mask & BMC_FILT_MULTICAST)
1301
1302static bool be_send_pkt_to_bmc(struct be_adapter *adapter,
1303 struct sk_buff **skb)
1304{
1305 struct ethhdr *eh = (struct ethhdr *)(*skb)->data;
1306 bool os2bmc = false;
1307
1308 if (!be_is_os2bmc_enabled(adapter))
1309 goto done;
1310
1311 if (!is_multicast_ether_addr(eh->h_dest))
1312 goto done;
1313
1314 if (is_mc_allowed_on_bmc(adapter, eh) ||
1315 is_bc_allowed_on_bmc(adapter, eh) ||
1316 is_arp_allowed_on_bmc(adapter, (*skb))) {
1317 os2bmc = true;
1318 goto done;
1319 }
1320
1321 if ((*skb)->protocol == htons(ETH_P_IPV6)) {
1322 struct ipv6hdr *hdr = ipv6_hdr((*skb));
1323 u8 nexthdr = hdr->nexthdr;
1324
1325 if (nexthdr == IPPROTO_ICMPV6) {
1326 struct icmp6hdr *icmp6 = icmp6_hdr((*skb));
1327
1328 switch (icmp6->icmp6_type) {
1329 case NDISC_ROUTER_ADVERTISEMENT:
1330 os2bmc = is_ipv6_ra_filt_enabled(adapter);
1331 goto done;
1332 case NDISC_NEIGHBOUR_ADVERTISEMENT:
1333 os2bmc = is_ipv6_na_filt_enabled(adapter);
1334 goto done;
1335 default:
1336 break;
1337 }
1338 }
1339 }
1340
1341 if (is_udp_pkt((*skb))) {
1342 struct udphdr *udp = udp_hdr((*skb));
1343
1645d997 1344 switch (ntohs(udp->dest)) {
760c295e
VD
1345 case DHCP_CLIENT_PORT:
1346 os2bmc = is_dhcp_client_filt_enabled(adapter);
1347 goto done;
1348 case DHCP_SERVER_PORT:
1349 os2bmc = is_dhcp_srvr_filt_enabled(adapter);
1350 goto done;
1351 case NET_BIOS_PORT1:
1352 case NET_BIOS_PORT2:
1353 os2bmc = is_nbios_filt_enabled(adapter);
1354 goto done;
1355 case DHCPV6_RAS_PORT:
1356 os2bmc = is_ipv6_ras_filt_enabled(adapter);
1357 goto done;
1358 default:
1359 break;
1360 }
1361 }
1362done:
1363 /* For packets over a vlan, which are destined
1364 * to BMC, asic expects the vlan to be inline in the packet.
1365 */
1366 if (os2bmc)
1367 *skb = be_insert_vlan_in_pkt(adapter, *skb, NULL);
1368
1369 return os2bmc;
1370}
1371
ee9c799c
SP
1372static netdev_tx_t be_xmit(struct sk_buff *skb, struct net_device *netdev)
1373{
1374 struct be_adapter *adapter = netdev_priv(netdev);
5f07b3c5
SP
1375 u16 q_idx = skb_get_queue_mapping(skb);
1376 struct be_tx_obj *txo = &adapter->tx_obj[q_idx];
804abcdb 1377 struct be_wrb_params wrb_params = { 0 };
804abcdb 1378 bool flush = !skb->xmit_more;
5f07b3c5 1379 u16 wrb_cnt;
ee9c799c 1380
804abcdb 1381 skb = be_xmit_workarounds(adapter, skb, &wrb_params);
5f07b3c5
SP
1382 if (unlikely(!skb))
1383 goto drop;
6b7c5b94 1384
804abcdb
SB
1385 be_get_wrb_params_from_skb(adapter, skb, &wrb_params);
1386
1387 wrb_cnt = be_xmit_enqueue(adapter, txo, skb, &wrb_params);
5f07b3c5
SP
1388 if (unlikely(!wrb_cnt)) {
1389 dev_kfree_skb_any(skb);
1390 goto drop;
1391 }
cd8f76c0 1392
760c295e
VD
1393 /* if os2bmc is enabled and if the pkt is destined to bmc,
1394 * enqueue the pkt a 2nd time with mgmt bit set.
1395 */
1396 if (be_send_pkt_to_bmc(adapter, &skb)) {
1397 BE_WRB_F_SET(wrb_params.features, OS2BMC, 1);
1398 wrb_cnt = be_xmit_enqueue(adapter, txo, skb, &wrb_params);
1399 if (unlikely(!wrb_cnt))
1400 goto drop;
1401 else
1402 skb_get(skb);
1403 }
1404
cf5671e6 1405 if (be_is_txq_full(txo)) {
5f07b3c5
SP
1406 netif_stop_subqueue(netdev, q_idx);
1407 tx_stats(txo)->tx_stops++;
1408 }
c190e3c8 1409
5f07b3c5
SP
1410 if (flush || __netif_subqueue_stopped(netdev, q_idx))
1411 be_xmit_flush(adapter, txo);
6b7c5b94 1412
5f07b3c5
SP
1413 return NETDEV_TX_OK;
1414drop:
1415 tx_stats(txo)->tx_drv_drops++;
1416 /* Flush the already enqueued tx requests */
1417 if (flush && txo->pend_wrb_cnt)
1418 be_xmit_flush(adapter, txo);
6b7c5b94 1419
6b7c5b94
SP
1420 return NETDEV_TX_OK;
1421}
1422
c1b3bdb2
SR
1423static void be_tx_timeout(struct net_device *netdev)
1424{
1425 struct be_adapter *adapter = netdev_priv(netdev);
1426 struct device *dev = &adapter->pdev->dev;
1427 struct be_tx_obj *txo;
1428 struct sk_buff *skb;
1429 struct tcphdr *tcphdr;
1430 struct udphdr *udphdr;
1431 u32 *entry;
1432 int status;
1433 int i, j;
1434
1435 for_all_tx_queues(adapter, txo, i) {
1436 dev_info(dev, "TXQ Dump: %d H: %d T: %d used: %d, qid: 0x%x\n",
1437 i, txo->q.head, txo->q.tail,
1438 atomic_read(&txo->q.used), txo->q.id);
1439
1440 entry = txo->q.dma_mem.va;
1441 for (j = 0; j < TX_Q_LEN * 4; j += 4) {
1442 if (entry[j] != 0 || entry[j + 1] != 0 ||
1443 entry[j + 2] != 0 || entry[j + 3] != 0) {
1444 dev_info(dev, "Entry %d 0x%x 0x%x 0x%x 0x%x\n",
1445 j, entry[j], entry[j + 1],
1446 entry[j + 2], entry[j + 3]);
1447 }
1448 }
1449
1450 entry = txo->cq.dma_mem.va;
1451 dev_info(dev, "TXCQ Dump: %d H: %d T: %d used: %d\n",
1452 i, txo->cq.head, txo->cq.tail,
1453 atomic_read(&txo->cq.used));
1454 for (j = 0; j < TX_CQ_LEN * 4; j += 4) {
1455 if (entry[j] != 0 || entry[j + 1] != 0 ||
1456 entry[j + 2] != 0 || entry[j + 3] != 0) {
1457 dev_info(dev, "Entry %d 0x%x 0x%x 0x%x 0x%x\n",
1458 j, entry[j], entry[j + 1],
1459 entry[j + 2], entry[j + 3]);
1460 }
1461 }
1462
1463 for (j = 0; j < TX_Q_LEN; j++) {
1464 if (txo->sent_skb_list[j]) {
1465 skb = txo->sent_skb_list[j];
1466 if (ip_hdr(skb)->protocol == IPPROTO_TCP) {
1467 tcphdr = tcp_hdr(skb);
1468 dev_info(dev, "TCP source port %d\n",
1469 ntohs(tcphdr->source));
1470 dev_info(dev, "TCP dest port %d\n",
1471 ntohs(tcphdr->dest));
ffd7ce3c 1472 dev_info(dev, "TCP sequence num %d\n",
c1b3bdb2
SR
1473 ntohs(tcphdr->seq));
1474 dev_info(dev, "TCP ack_seq %d\n",
1475 ntohs(tcphdr->ack_seq));
1476 } else if (ip_hdr(skb)->protocol ==
1477 IPPROTO_UDP) {
1478 udphdr = udp_hdr(skb);
1479 dev_info(dev, "UDP source port %d\n",
1480 ntohs(udphdr->source));
1481 dev_info(dev, "UDP dest port %d\n",
1482 ntohs(udphdr->dest));
1483 }
1484 dev_info(dev, "skb[%d] %p len %d proto 0x%x\n",
1485 j, skb, skb->len, skb->protocol);
1486 }
1487 }
1488 }
1489
1490 if (lancer_chip(adapter)) {
1491 dev_info(dev, "Initiating reset due to tx timeout\n");
1492 dev_info(dev, "Resetting adapter\n");
1493 status = lancer_physdev_ctrl(adapter,
1494 PHYSDEV_CONTROL_FW_RESET_MASK);
1495 if (status)
1496 dev_err(dev, "Reset failed .. Reboot server\n");
1497 }
1498}
1499
f66b7cfd
SP
1500static inline bool be_in_all_promisc(struct be_adapter *adapter)
1501{
1502 return (adapter->if_flags & BE_IF_FLAGS_ALL_PROMISCUOUS) ==
1503 BE_IF_FLAGS_ALL_PROMISCUOUS;
1504}
1505
1506static int be_set_vlan_promisc(struct be_adapter *adapter)
1507{
1508 struct device *dev = &adapter->pdev->dev;
1509 int status;
1510
1511 if (adapter->if_flags & BE_IF_FLAGS_VLAN_PROMISCUOUS)
1512 return 0;
1513
1514 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_VLAN_PROMISCUOUS, ON);
1515 if (!status) {
1516 dev_info(dev, "Enabled VLAN promiscuous mode\n");
1517 adapter->if_flags |= BE_IF_FLAGS_VLAN_PROMISCUOUS;
1518 } else {
1519 dev_err(dev, "Failed to enable VLAN promiscuous mode\n");
1520 }
1521 return status;
1522}
1523
1524static int be_clear_vlan_promisc(struct be_adapter *adapter)
1525{
1526 struct device *dev = &adapter->pdev->dev;
1527 int status;
1528
1529 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_VLAN_PROMISCUOUS, OFF);
1530 if (!status) {
1531 dev_info(dev, "Disabling VLAN promiscuous mode\n");
1532 adapter->if_flags &= ~BE_IF_FLAGS_VLAN_PROMISCUOUS;
1533 }
1534 return status;
1535}
1536
6b7c5b94 1537/*
82903e4b
AK
1538 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
1539 * If the user configures more, place BE in vlan promiscuous mode.
6b7c5b94 1540 */
10329df8 1541static int be_vid_config(struct be_adapter *adapter)
6b7c5b94 1542{
50762667 1543 struct device *dev = &adapter->pdev->dev;
10329df8 1544 u16 vids[BE_NUM_VLANS_SUPPORTED];
f6cbd364 1545 u16 num = 0, i = 0;
82903e4b 1546 int status = 0;
1da87b7f 1547
92fbb1df
SB
1548 /* No need to change the VLAN state if the I/F is in promiscuous */
1549 if (adapter->netdev->flags & IFF_PROMISC)
c0e64ef4
SP
1550 return 0;
1551
92bf14ab 1552 if (adapter->vlans_added > be_max_vlans(adapter))
f66b7cfd 1553 return be_set_vlan_promisc(adapter);
0fc16ebf 1554
841f60fc
SK
1555 if (adapter->if_flags & BE_IF_FLAGS_VLAN_PROMISCUOUS) {
1556 status = be_clear_vlan_promisc(adapter);
1557 if (status)
1558 return status;
1559 }
0fc16ebf 1560 /* Construct VLAN Table to give to HW */
f6cbd364
RN
1561 for_each_set_bit(i, adapter->vids, VLAN_N_VID)
1562 vids[num++] = cpu_to_le16(i);
0fc16ebf 1563
435452aa 1564 status = be_cmd_vlan_config(adapter, adapter->if_handle, vids, num, 0);
0fc16ebf 1565 if (status) {
f66b7cfd 1566 dev_err(dev, "Setting HW VLAN filtering failed\n");
d9d604f8 1567 /* Set to VLAN promisc mode as setting VLAN filter failed */
77be8c1c
KA
1568 if (addl_status(status) == MCC_ADDL_STATUS_INSUFFICIENT_VLANS ||
1569 addl_status(status) ==
4c60005f 1570 MCC_ADDL_STATUS_INSUFFICIENT_RESOURCES)
f66b7cfd 1571 return be_set_vlan_promisc(adapter);
6b7c5b94 1572 }
0fc16ebf 1573 return status;
6b7c5b94
SP
1574}
1575
80d5c368 1576static int be_vlan_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
6b7c5b94
SP
1577{
1578 struct be_adapter *adapter = netdev_priv(netdev);
80817cbf 1579 int status = 0;
6b7c5b94 1580
b7172414
SP
1581 mutex_lock(&adapter->rx_filter_lock);
1582
a85e9986
PR
1583 /* Packets with VID 0 are always received by Lancer by default */
1584 if (lancer_chip(adapter) && vid == 0)
b7172414 1585 goto done;
48291c22 1586
f6cbd364 1587 if (test_bit(vid, adapter->vids))
b7172414 1588 goto done;
a85e9986 1589
f6cbd364 1590 set_bit(vid, adapter->vids);
a6b74e01 1591 adapter->vlans_added++;
8e586137 1592
b7172414
SP
1593 status = be_vid_config(adapter);
1594done:
1595 mutex_unlock(&adapter->rx_filter_lock);
1596 return status;
6b7c5b94
SP
1597}
1598
80d5c368 1599static int be_vlan_rem_vid(struct net_device *netdev, __be16 proto, u16 vid)
6b7c5b94
SP
1600{
1601 struct be_adapter *adapter = netdev_priv(netdev);
b7172414
SP
1602 int status = 0;
1603
1604 mutex_lock(&adapter->rx_filter_lock);
6b7c5b94 1605
a85e9986
PR
1606 /* Packets with VID 0 are always received by Lancer by default */
1607 if (lancer_chip(adapter) && vid == 0)
b7172414 1608 goto done;
a85e9986 1609
41dcdfbd 1610 if (!test_bit(vid, adapter->vids))
b7172414 1611 goto done;
41dcdfbd 1612
f6cbd364 1613 clear_bit(vid, adapter->vids);
9d4dfe4a
KA
1614 adapter->vlans_added--;
1615
b7172414
SP
1616 status = be_vid_config(adapter);
1617done:
1618 mutex_unlock(&adapter->rx_filter_lock);
1619 return status;
6b7c5b94
SP
1620}
1621
f66b7cfd
SP
1622static void be_set_all_promisc(struct be_adapter *adapter)
1623{
1624 be_cmd_rx_filter(adapter, BE_IF_FLAGS_ALL_PROMISCUOUS, ON);
1625 adapter->if_flags |= BE_IF_FLAGS_ALL_PROMISCUOUS;
1626}
1627
1628static void be_set_mc_promisc(struct be_adapter *adapter)
6b7c5b94 1629{
0fc16ebf 1630 int status;
6b7c5b94 1631
f66b7cfd
SP
1632 if (adapter->if_flags & BE_IF_FLAGS_MCAST_PROMISCUOUS)
1633 return;
6b7c5b94 1634
f66b7cfd
SP
1635 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_MCAST_PROMISCUOUS, ON);
1636 if (!status)
1637 adapter->if_flags |= BE_IF_FLAGS_MCAST_PROMISCUOUS;
1638}
1639
92fbb1df 1640static void be_set_uc_promisc(struct be_adapter *adapter)
f66b7cfd
SP
1641{
1642 int status;
1643
92fbb1df
SB
1644 if (adapter->if_flags & BE_IF_FLAGS_PROMISCUOUS)
1645 return;
1646
1647 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_PROMISCUOUS, ON);
f66b7cfd 1648 if (!status)
92fbb1df
SB
1649 adapter->if_flags |= BE_IF_FLAGS_PROMISCUOUS;
1650}
1651
1652static void be_clear_uc_promisc(struct be_adapter *adapter)
1653{
1654 int status;
1655
1656 if (!(adapter->if_flags & BE_IF_FLAGS_PROMISCUOUS))
1657 return;
1658
1659 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_PROMISCUOUS, OFF);
1660 if (!status)
1661 adapter->if_flags &= ~BE_IF_FLAGS_PROMISCUOUS;
1662}
1663
1664/* The below 2 functions are the callback args for __dev_mc_sync/dev_uc_sync().
1665 * We use a single callback function for both sync and unsync. We really don't
1666 * add/remove addresses through this callback. But, we use it to detect changes
1667 * to the uc/mc lists. The entire uc/mc list is programmed in be_set_rx_mode().
1668 */
1669static int be_uc_list_update(struct net_device *netdev,
1670 const unsigned char *addr)
1671{
1672 struct be_adapter *adapter = netdev_priv(netdev);
1673
1674 adapter->update_uc_list = true;
1675 return 0;
1676}
1677
1678static int be_mc_list_update(struct net_device *netdev,
1679 const unsigned char *addr)
1680{
1681 struct be_adapter *adapter = netdev_priv(netdev);
1682
1683 adapter->update_mc_list = true;
1684 return 0;
1685}
1686
1687static void be_set_mc_list(struct be_adapter *adapter)
1688{
1689 struct net_device *netdev = adapter->netdev;
b7172414 1690 struct netdev_hw_addr *ha;
92fbb1df
SB
1691 bool mc_promisc = false;
1692 int status;
1693
b7172414 1694 netif_addr_lock_bh(netdev);
92fbb1df
SB
1695 __dev_mc_sync(netdev, be_mc_list_update, be_mc_list_update);
1696
1697 if (netdev->flags & IFF_PROMISC) {
1698 adapter->update_mc_list = false;
1699 } else if (netdev->flags & IFF_ALLMULTI ||
1700 netdev_mc_count(netdev) > be_max_mc(adapter)) {
1701 /* Enable multicast promisc if num configured exceeds
1702 * what we support
1703 */
1704 mc_promisc = true;
1705 adapter->update_mc_list = false;
1706 } else if (adapter->if_flags & BE_IF_FLAGS_MCAST_PROMISCUOUS) {
1707 /* Update mc-list unconditionally if the iface was previously
1708 * in mc-promisc mode and now is out of that mode.
1709 */
1710 adapter->update_mc_list = true;
1711 }
1712
b7172414
SP
1713 if (adapter->update_mc_list) {
1714 int i = 0;
1715
1716 /* cache the mc-list in adapter */
1717 netdev_for_each_mc_addr(ha, netdev) {
1718 ether_addr_copy(adapter->mc_list[i].mac, ha->addr);
1719 i++;
1720 }
1721 adapter->mc_count = netdev_mc_count(netdev);
1722 }
1723 netif_addr_unlock_bh(netdev);
1724
92fbb1df 1725 if (mc_promisc) {
f66b7cfd 1726 be_set_mc_promisc(adapter);
92fbb1df
SB
1727 } else if (adapter->update_mc_list) {
1728 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_MULTICAST, ON);
1729 if (!status)
1730 adapter->if_flags &= ~BE_IF_FLAGS_MCAST_PROMISCUOUS;
1731 else
1732 be_set_mc_promisc(adapter);
1733
1734 adapter->update_mc_list = false;
1735 }
1736}
1737
1738static void be_clear_mc_list(struct be_adapter *adapter)
1739{
1740 struct net_device *netdev = adapter->netdev;
1741
1742 __dev_mc_unsync(netdev, NULL);
1743 be_cmd_rx_filter(adapter, BE_IF_FLAGS_MULTICAST, OFF);
b7172414 1744 adapter->mc_count = 0;
f66b7cfd
SP
1745}
1746
988d44b1
SR
1747static int be_uc_mac_add(struct be_adapter *adapter, int uc_idx)
1748{
1d0f110a 1749 if (ether_addr_equal(adapter->uc_list[uc_idx].mac, adapter->dev_mac)) {
988d44b1
SR
1750 adapter->pmac_id[uc_idx + 1] = adapter->pmac_id[0];
1751 return 0;
1752 }
1753
1d0f110a 1754 return be_cmd_pmac_add(adapter, adapter->uc_list[uc_idx].mac,
988d44b1
SR
1755 adapter->if_handle,
1756 &adapter->pmac_id[uc_idx + 1], 0);
1757}
1758
1759static void be_uc_mac_del(struct be_adapter *adapter, int pmac_id)
1760{
1761 if (pmac_id == adapter->pmac_id[0])
1762 return;
1763
1764 be_cmd_pmac_del(adapter, adapter->if_handle, pmac_id, 0);
1765}
1766
f66b7cfd
SP
1767static void be_set_uc_list(struct be_adapter *adapter)
1768{
92fbb1df 1769 struct net_device *netdev = adapter->netdev;
f66b7cfd 1770 struct netdev_hw_addr *ha;
92fbb1df 1771 bool uc_promisc = false;
b7172414 1772 int curr_uc_macs = 0, i;
f66b7cfd 1773
b7172414 1774 netif_addr_lock_bh(netdev);
92fbb1df 1775 __dev_uc_sync(netdev, be_uc_list_update, be_uc_list_update);
f66b7cfd 1776
92fbb1df
SB
1777 if (netdev->flags & IFF_PROMISC) {
1778 adapter->update_uc_list = false;
1779 } else if (netdev_uc_count(netdev) > (be_max_uc(adapter) - 1)) {
1780 uc_promisc = true;
1781 adapter->update_uc_list = false;
1782 } else if (adapter->if_flags & BE_IF_FLAGS_PROMISCUOUS) {
1783 /* Update uc-list unconditionally if the iface was previously
1784 * in uc-promisc mode and now is out of that mode.
1785 */
1786 adapter->update_uc_list = true;
6b7c5b94
SP
1787 }
1788
b7172414 1789 if (adapter->update_uc_list) {
b7172414 1790 /* cache the uc-list in adapter array */
6052cd1a 1791 i = 0;
b7172414
SP
1792 netdev_for_each_uc_addr(ha, netdev) {
1793 ether_addr_copy(adapter->uc_list[i].mac, ha->addr);
1794 i++;
1795 }
1796 curr_uc_macs = netdev_uc_count(netdev);
1797 }
1798 netif_addr_unlock_bh(netdev);
1799
92fbb1df
SB
1800 if (uc_promisc) {
1801 be_set_uc_promisc(adapter);
1802 } else if (adapter->update_uc_list) {
1803 be_clear_uc_promisc(adapter);
1804
b7172414 1805 for (i = 0; i < adapter->uc_macs; i++)
988d44b1 1806 be_uc_mac_del(adapter, adapter->pmac_id[i + 1]);
92fbb1df 1807
b7172414 1808 for (i = 0; i < curr_uc_macs; i++)
988d44b1 1809 be_uc_mac_add(adapter, i);
b7172414 1810 adapter->uc_macs = curr_uc_macs;
92fbb1df 1811 adapter->update_uc_list = false;
f66b7cfd
SP
1812 }
1813}
6b7c5b94 1814
f66b7cfd
SP
1815static void be_clear_uc_list(struct be_adapter *adapter)
1816{
92fbb1df 1817 struct net_device *netdev = adapter->netdev;
f66b7cfd 1818 int i;
fbc13f01 1819
92fbb1df 1820 __dev_uc_unsync(netdev, NULL);
b7172414 1821 for (i = 0; i < adapter->uc_macs; i++)
988d44b1
SR
1822 be_uc_mac_del(adapter, adapter->pmac_id[i + 1]);
1823
f66b7cfd
SP
1824 adapter->uc_macs = 0;
1825}
fbc13f01 1826
b7172414 1827static void __be_set_rx_mode(struct be_adapter *adapter)
f66b7cfd 1828{
b7172414
SP
1829 struct net_device *netdev = adapter->netdev;
1830
1831 mutex_lock(&adapter->rx_filter_lock);
fbc13f01 1832
f66b7cfd 1833 if (netdev->flags & IFF_PROMISC) {
92fbb1df
SB
1834 if (!be_in_all_promisc(adapter))
1835 be_set_all_promisc(adapter);
1836 } else if (be_in_all_promisc(adapter)) {
1837 /* We need to re-program the vlan-list or clear
1838 * vlan-promisc mode (if needed) when the interface
1839 * comes out of promisc mode.
1840 */
1841 be_vid_config(adapter);
f66b7cfd 1842 }
a0794885 1843
92fbb1df 1844 be_set_uc_list(adapter);
f66b7cfd 1845 be_set_mc_list(adapter);
b7172414
SP
1846
1847 mutex_unlock(&adapter->rx_filter_lock);
1848}
1849
1850static void be_work_set_rx_mode(struct work_struct *work)
1851{
1852 struct be_cmd_work *cmd_work =
1853 container_of(work, struct be_cmd_work, work);
1854
1855 __be_set_rx_mode(cmd_work->adapter);
1856 kfree(cmd_work);
6b7c5b94
SP
1857}
1858
ba343c77
SB
1859static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
1860{
1861 struct be_adapter *adapter = netdev_priv(netdev);
11ac75ed 1862 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
ba343c77
SB
1863 int status;
1864
11ac75ed 1865 if (!sriov_enabled(adapter))
ba343c77
SB
1866 return -EPERM;
1867
11ac75ed 1868 if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
ba343c77
SB
1869 return -EINVAL;
1870
3c31aaf3
VV
1871 /* Proceed further only if user provided MAC is different
1872 * from active MAC
1873 */
1874 if (ether_addr_equal(mac, vf_cfg->mac_addr))
1875 return 0;
1876
3175d8c2
SP
1877 if (BEx_chip(adapter)) {
1878 be_cmd_pmac_del(adapter, vf_cfg->if_handle, vf_cfg->pmac_id,
1879 vf + 1);
ba343c77 1880
11ac75ed
SP
1881 status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
1882 &vf_cfg->pmac_id, vf + 1);
3175d8c2
SP
1883 } else {
1884 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
1885 vf + 1);
590c391d
PR
1886 }
1887
abccf23e
KA
1888 if (status) {
1889 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed: %#x",
1890 mac, vf, status);
1891 return be_cmd_status(status);
1892 }
64600ea5 1893
abccf23e
KA
1894 ether_addr_copy(vf_cfg->mac_addr, mac);
1895
1896 return 0;
ba343c77
SB
1897}
1898
64600ea5 1899static int be_get_vf_config(struct net_device *netdev, int vf,
748b539a 1900 struct ifla_vf_info *vi)
64600ea5
AK
1901{
1902 struct be_adapter *adapter = netdev_priv(netdev);
11ac75ed 1903 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
64600ea5 1904
11ac75ed 1905 if (!sriov_enabled(adapter))
64600ea5
AK
1906 return -EPERM;
1907
11ac75ed 1908 if (vf >= adapter->num_vfs)
64600ea5
AK
1909 return -EINVAL;
1910
1911 vi->vf = vf;
ed616689
SC
1912 vi->max_tx_rate = vf_cfg->tx_rate;
1913 vi->min_tx_rate = 0;
a60b3a13
AK
1914 vi->vlan = vf_cfg->vlan_tag & VLAN_VID_MASK;
1915 vi->qos = vf_cfg->vlan_tag >> VLAN_PRIO_SHIFT;
11ac75ed 1916 memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
bdce2ad7 1917 vi->linkstate = adapter->vf_cfg[vf].plink_tracking;
e7bcbd7b 1918 vi->spoofchk = adapter->vf_cfg[vf].spoofchk;
64600ea5
AK
1919
1920 return 0;
1921}
1922
435452aa
VV
1923static int be_set_vf_tvt(struct be_adapter *adapter, int vf, u16 vlan)
1924{
1925 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1926 u16 vids[BE_NUM_VLANS_SUPPORTED];
1927 int vf_if_id = vf_cfg->if_handle;
1928 int status;
1929
1930 /* Enable Transparent VLAN Tagging */
e7bcbd7b 1931 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1, vf_if_id, 0, 0);
435452aa
VV
1932 if (status)
1933 return status;
1934
1935 /* Clear pre-programmed VLAN filters on VF if any, if TVT is enabled */
1936 vids[0] = 0;
1937 status = be_cmd_vlan_config(adapter, vf_if_id, vids, 1, vf + 1);
1938 if (!status)
1939 dev_info(&adapter->pdev->dev,
1940 "Cleared guest VLANs on VF%d", vf);
1941
1942 /* After TVT is enabled, disallow VFs to program VLAN filters */
1943 if (vf_cfg->privileges & BE_PRIV_FILTMGMT) {
1944 status = be_cmd_set_fn_privileges(adapter, vf_cfg->privileges &
1945 ~BE_PRIV_FILTMGMT, vf + 1);
1946 if (!status)
1947 vf_cfg->privileges &= ~BE_PRIV_FILTMGMT;
1948 }
1949 return 0;
1950}
1951
1952static int be_clear_vf_tvt(struct be_adapter *adapter, int vf)
1953{
1954 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1955 struct device *dev = &adapter->pdev->dev;
1956 int status;
1957
1958 /* Reset Transparent VLAN Tagging. */
1959 status = be_cmd_set_hsw_config(adapter, BE_RESET_VLAN_TAG_ID, vf + 1,
e7bcbd7b 1960 vf_cfg->if_handle, 0, 0);
435452aa
VV
1961 if (status)
1962 return status;
1963
1964 /* Allow VFs to program VLAN filtering */
1965 if (!(vf_cfg->privileges & BE_PRIV_FILTMGMT)) {
1966 status = be_cmd_set_fn_privileges(adapter, vf_cfg->privileges |
1967 BE_PRIV_FILTMGMT, vf + 1);
1968 if (!status) {
1969 vf_cfg->privileges |= BE_PRIV_FILTMGMT;
1970 dev_info(dev, "VF%d: FILTMGMT priv enabled", vf);
1971 }
1972 }
1973
1974 dev_info(dev,
1975 "Disable/re-enable i/f in VM to clear Transparent VLAN tag");
1976 return 0;
1977}
1978
79aab093
MS
1979static int be_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos,
1980 __be16 vlan_proto)
1da87b7f
AK
1981{
1982 struct be_adapter *adapter = netdev_priv(netdev);
b9fc0e53 1983 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
435452aa 1984 int status;
1da87b7f 1985
11ac75ed 1986 if (!sriov_enabled(adapter))
1da87b7f
AK
1987 return -EPERM;
1988
b9fc0e53 1989 if (vf >= adapter->num_vfs || vlan > 4095 || qos > 7)
1da87b7f
AK
1990 return -EINVAL;
1991
79aab093
MS
1992 if (vlan_proto != htons(ETH_P_8021Q))
1993 return -EPROTONOSUPPORT;
1994
b9fc0e53
AK
1995 if (vlan || qos) {
1996 vlan |= qos << VLAN_PRIO_SHIFT;
435452aa 1997 status = be_set_vf_tvt(adapter, vf, vlan);
1da87b7f 1998 } else {
435452aa 1999 status = be_clear_vf_tvt(adapter, vf);
1da87b7f
AK
2000 }
2001
abccf23e
KA
2002 if (status) {
2003 dev_err(&adapter->pdev->dev,
435452aa
VV
2004 "VLAN %d config on VF %d failed : %#x\n", vlan, vf,
2005 status);
abccf23e
KA
2006 return be_cmd_status(status);
2007 }
2008
2009 vf_cfg->vlan_tag = vlan;
abccf23e 2010 return 0;
1da87b7f
AK
2011}
2012
ed616689
SC
2013static int be_set_vf_tx_rate(struct net_device *netdev, int vf,
2014 int min_tx_rate, int max_tx_rate)
e1d18735
AK
2015{
2016 struct be_adapter *adapter = netdev_priv(netdev);
0f77ba73
RN
2017 struct device *dev = &adapter->pdev->dev;
2018 int percent_rate, status = 0;
2019 u16 link_speed = 0;
2020 u8 link_status;
e1d18735 2021
11ac75ed 2022 if (!sriov_enabled(adapter))
e1d18735
AK
2023 return -EPERM;
2024
94f434c2 2025 if (vf >= adapter->num_vfs)
e1d18735
AK
2026 return -EINVAL;
2027
ed616689
SC
2028 if (min_tx_rate)
2029 return -EINVAL;
2030
0f77ba73
RN
2031 if (!max_tx_rate)
2032 goto config_qos;
2033
2034 status = be_cmd_link_status_query(adapter, &link_speed,
2035 &link_status, 0);
2036 if (status)
2037 goto err;
2038
2039 if (!link_status) {
2040 dev_err(dev, "TX-rate setting not allowed when link is down\n");
940a3fcd 2041 status = -ENETDOWN;
0f77ba73
RN
2042 goto err;
2043 }
2044
2045 if (max_tx_rate < 100 || max_tx_rate > link_speed) {
2046 dev_err(dev, "TX-rate must be between 100 and %d Mbps\n",
2047 link_speed);
2048 status = -EINVAL;
2049 goto err;
2050 }
2051
2052 /* On Skyhawk the QOS setting must be done only as a % value */
2053 percent_rate = link_speed / 100;
2054 if (skyhawk_chip(adapter) && (max_tx_rate % percent_rate)) {
2055 dev_err(dev, "TX-rate must be a multiple of %d Mbps\n",
2056 percent_rate);
2057 status = -EINVAL;
2058 goto err;
94f434c2 2059 }
e1d18735 2060
0f77ba73
RN
2061config_qos:
2062 status = be_cmd_config_qos(adapter, max_tx_rate, link_speed, vf + 1);
e1d18735 2063 if (status)
0f77ba73
RN
2064 goto err;
2065
2066 adapter->vf_cfg[vf].tx_rate = max_tx_rate;
2067 return 0;
2068
2069err:
2070 dev_err(dev, "TX-rate setting of %dMbps on VF%d failed\n",
2071 max_tx_rate, vf);
abccf23e 2072 return be_cmd_status(status);
e1d18735 2073}
e2fb1afa 2074
bdce2ad7
SR
2075static int be_set_vf_link_state(struct net_device *netdev, int vf,
2076 int link_state)
2077{
2078 struct be_adapter *adapter = netdev_priv(netdev);
2079 int status;
2080
2081 if (!sriov_enabled(adapter))
2082 return -EPERM;
2083
2084 if (vf >= adapter->num_vfs)
2085 return -EINVAL;
2086
2087 status = be_cmd_set_logical_link_config(adapter, link_state, vf+1);
abccf23e
KA
2088 if (status) {
2089 dev_err(&adapter->pdev->dev,
2090 "Link state change on VF %d failed: %#x\n", vf, status);
2091 return be_cmd_status(status);
2092 }
bdce2ad7 2093
abccf23e
KA
2094 adapter->vf_cfg[vf].plink_tracking = link_state;
2095
2096 return 0;
bdce2ad7 2097}
e1d18735 2098
e7bcbd7b
KA
2099static int be_set_vf_spoofchk(struct net_device *netdev, int vf, bool enable)
2100{
2101 struct be_adapter *adapter = netdev_priv(netdev);
2102 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
2103 u8 spoofchk;
2104 int status;
2105
2106 if (!sriov_enabled(adapter))
2107 return -EPERM;
2108
2109 if (vf >= adapter->num_vfs)
2110 return -EINVAL;
2111
2112 if (BEx_chip(adapter))
2113 return -EOPNOTSUPP;
2114
2115 if (enable == vf_cfg->spoofchk)
2116 return 0;
2117
2118 spoofchk = enable ? ENABLE_MAC_SPOOFCHK : DISABLE_MAC_SPOOFCHK;
2119
2120 status = be_cmd_set_hsw_config(adapter, 0, vf + 1, vf_cfg->if_handle,
2121 0, spoofchk);
2122 if (status) {
2123 dev_err(&adapter->pdev->dev,
2124 "Spoofchk change on VF %d failed: %#x\n", vf, status);
2125 return be_cmd_status(status);
2126 }
2127
2128 vf_cfg->spoofchk = enable;
2129 return 0;
2130}
2131
2632bafd
SP
2132static void be_aic_update(struct be_aic_obj *aic, u64 rx_pkts, u64 tx_pkts,
2133 ulong now)
6b7c5b94 2134{
2632bafd
SP
2135 aic->rx_pkts_prev = rx_pkts;
2136 aic->tx_reqs_prev = tx_pkts;
2137 aic->jiffies = now;
2138}
ac124ff9 2139
20947770 2140static int be_get_new_eqd(struct be_eq_obj *eqo)
2632bafd 2141{
20947770
PR
2142 struct be_adapter *adapter = eqo->adapter;
2143 int eqd, start;
2632bafd 2144 struct be_aic_obj *aic;
2632bafd
SP
2145 struct be_rx_obj *rxo;
2146 struct be_tx_obj *txo;
20947770 2147 u64 rx_pkts = 0, tx_pkts = 0;
2632bafd
SP
2148 ulong now;
2149 u32 pps, delta;
20947770 2150 int i;
10ef9ab4 2151
20947770
PR
2152 aic = &adapter->aic_obj[eqo->idx];
2153 if (!aic->enable) {
2154 if (aic->jiffies)
2155 aic->jiffies = 0;
2156 eqd = aic->et_eqd;
2157 return eqd;
2158 }
6b7c5b94 2159
20947770 2160 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
2632bafd 2161 do {
57a7744e 2162 start = u64_stats_fetch_begin_irq(&rxo->stats.sync);
20947770 2163 rx_pkts += rxo->stats.rx_pkts;
57a7744e 2164 } while (u64_stats_fetch_retry_irq(&rxo->stats.sync, start));
20947770 2165 }
10ef9ab4 2166
20947770 2167 for_all_tx_queues_on_eq(adapter, eqo, txo, i) {
2632bafd 2168 do {
57a7744e 2169 start = u64_stats_fetch_begin_irq(&txo->stats.sync);
20947770 2170 tx_pkts += txo->stats.tx_reqs;
57a7744e 2171 } while (u64_stats_fetch_retry_irq(&txo->stats.sync, start));
20947770 2172 }
6b7c5b94 2173
20947770
PR
2174 /* Skip, if wrapped around or first calculation */
2175 now = jiffies;
2176 if (!aic->jiffies || time_before(now, aic->jiffies) ||
2177 rx_pkts < aic->rx_pkts_prev ||
2178 tx_pkts < aic->tx_reqs_prev) {
2179 be_aic_update(aic, rx_pkts, tx_pkts, now);
2180 return aic->prev_eqd;
2181 }
2632bafd 2182
20947770
PR
2183 delta = jiffies_to_msecs(now - aic->jiffies);
2184 if (delta == 0)
2185 return aic->prev_eqd;
10ef9ab4 2186
20947770
PR
2187 pps = (((u32)(rx_pkts - aic->rx_pkts_prev) * 1000) / delta) +
2188 (((u32)(tx_pkts - aic->tx_reqs_prev) * 1000) / delta);
2189 eqd = (pps / 15000) << 2;
2632bafd 2190
20947770
PR
2191 if (eqd < 8)
2192 eqd = 0;
2193 eqd = min_t(u32, eqd, aic->max_eqd);
2194 eqd = max_t(u32, eqd, aic->min_eqd);
2195
2196 be_aic_update(aic, rx_pkts, tx_pkts, now);
2197
2198 return eqd;
2199}
2200
2201/* For Skyhawk-R only */
2202static u32 be_get_eq_delay_mult_enc(struct be_eq_obj *eqo)
2203{
2204 struct be_adapter *adapter = eqo->adapter;
2205 struct be_aic_obj *aic = &adapter->aic_obj[eqo->idx];
2206 ulong now = jiffies;
2207 int eqd;
2208 u32 mult_enc;
2209
2210 if (!aic->enable)
2211 return 0;
2212
3c0d49aa 2213 if (jiffies_to_msecs(now - aic->jiffies) < 1)
20947770
PR
2214 eqd = aic->prev_eqd;
2215 else
2216 eqd = be_get_new_eqd(eqo);
2217
2218 if (eqd > 100)
2219 mult_enc = R2I_DLY_ENC_1;
2220 else if (eqd > 60)
2221 mult_enc = R2I_DLY_ENC_2;
2222 else if (eqd > 20)
2223 mult_enc = R2I_DLY_ENC_3;
2224 else
2225 mult_enc = R2I_DLY_ENC_0;
2226
2227 aic->prev_eqd = eqd;
2228
2229 return mult_enc;
2230}
2231
2232void be_eqd_update(struct be_adapter *adapter, bool force_update)
2233{
2234 struct be_set_eqd set_eqd[MAX_EVT_QS];
2235 struct be_aic_obj *aic;
2236 struct be_eq_obj *eqo;
2237 int i, num = 0, eqd;
2238
2239 for_all_evt_queues(adapter, eqo, i) {
2240 aic = &adapter->aic_obj[eqo->idx];
2241 eqd = be_get_new_eqd(eqo);
2242 if (force_update || eqd != aic->prev_eqd) {
2632bafd
SP
2243 set_eqd[num].delay_multiplier = (eqd * 65)/100;
2244 set_eqd[num].eq_id = eqo->q.id;
2245 aic->prev_eqd = eqd;
2246 num++;
2247 }
ac124ff9 2248 }
2632bafd
SP
2249
2250 if (num)
2251 be_cmd_modify_eqd(adapter, set_eqd, num);
6b7c5b94
SP
2252}
2253
3abcdeda 2254static void be_rx_stats_update(struct be_rx_obj *rxo,
748b539a 2255 struct be_rx_compl_info *rxcp)
4097f663 2256{
ac124ff9 2257 struct be_rx_stats *stats = rx_stats(rxo);
1ef78abe 2258
ab1594e9 2259 u64_stats_update_begin(&stats->sync);
3abcdeda 2260 stats->rx_compl++;
2e588f84 2261 stats->rx_bytes += rxcp->pkt_size;
3abcdeda 2262 stats->rx_pkts++;
8670f2a5
SB
2263 if (rxcp->tunneled)
2264 stats->rx_vxlan_offload_pkts++;
2e588f84 2265 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
3abcdeda 2266 stats->rx_mcast_pkts++;
2e588f84 2267 if (rxcp->err)
ac124ff9 2268 stats->rx_compl_err++;
ab1594e9 2269 u64_stats_update_end(&stats->sync);
4097f663
SP
2270}
2271
2e588f84 2272static inline bool csum_passed(struct be_rx_compl_info *rxcp)
728a9972 2273{
19fad86f 2274 /* L4 checksum is not reliable for non TCP/UDP packets.
c9c47142
SP
2275 * Also ignore ipcksm for ipv6 pkts
2276 */
2e588f84 2277 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
c9c47142 2278 (rxcp->ip_csum || rxcp->ipv6) && !rxcp->err;
728a9972
AK
2279}
2280
0b0ef1d0 2281static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo)
6b7c5b94 2282{
10ef9ab4 2283 struct be_adapter *adapter = rxo->adapter;
6b7c5b94 2284 struct be_rx_page_info *rx_page_info;
3abcdeda 2285 struct be_queue_info *rxq = &rxo->q;
b0fd2eb2 2286 u32 frag_idx = rxq->tail;
6b7c5b94 2287
3abcdeda 2288 rx_page_info = &rxo->page_info_tbl[frag_idx];
6b7c5b94
SP
2289 BUG_ON(!rx_page_info->page);
2290
e50287be 2291 if (rx_page_info->last_frag) {
2b7bcebf
IV
2292 dma_unmap_page(&adapter->pdev->dev,
2293 dma_unmap_addr(rx_page_info, bus),
2294 adapter->big_page_size, DMA_FROM_DEVICE);
e50287be
SP
2295 rx_page_info->last_frag = false;
2296 } else {
2297 dma_sync_single_for_cpu(&adapter->pdev->dev,
2298 dma_unmap_addr(rx_page_info, bus),
2299 rx_frag_size, DMA_FROM_DEVICE);
205859a2 2300 }
6b7c5b94 2301
0b0ef1d0 2302 queue_tail_inc(rxq);
6b7c5b94
SP
2303 atomic_dec(&rxq->used);
2304 return rx_page_info;
2305}
2306
2307/* Throwaway the data in the Rx completion */
10ef9ab4
SP
2308static void be_rx_compl_discard(struct be_rx_obj *rxo,
2309 struct be_rx_compl_info *rxcp)
6b7c5b94 2310{
6b7c5b94 2311 struct be_rx_page_info *page_info;
2e588f84 2312 u16 i, num_rcvd = rxcp->num_rcvd;
6b7c5b94 2313
e80d9da6 2314 for (i = 0; i < num_rcvd; i++) {
0b0ef1d0 2315 page_info = get_rx_page_info(rxo);
e80d9da6
PR
2316 put_page(page_info->page);
2317 memset(page_info, 0, sizeof(*page_info));
6b7c5b94
SP
2318 }
2319}
2320
2321/*
2322 * skb_fill_rx_data forms a complete skb for an ether frame
2323 * indicated by rxcp.
2324 */
10ef9ab4
SP
2325static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
2326 struct be_rx_compl_info *rxcp)
6b7c5b94 2327{
6b7c5b94 2328 struct be_rx_page_info *page_info;
2e588f84
SP
2329 u16 i, j;
2330 u16 hdr_len, curr_frag_len, remaining;
6b7c5b94 2331 u8 *start;
6b7c5b94 2332
0b0ef1d0 2333 page_info = get_rx_page_info(rxo);
6b7c5b94
SP
2334 start = page_address(page_info->page) + page_info->page_offset;
2335 prefetch(start);
2336
2337 /* Copy data in the first descriptor of this completion */
2e588f84 2338 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
6b7c5b94 2339
6b7c5b94
SP
2340 skb->len = curr_frag_len;
2341 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
ac1ae5f3 2342 memcpy(skb->data, start, curr_frag_len);
6b7c5b94
SP
2343 /* Complete packet has now been moved to data */
2344 put_page(page_info->page);
2345 skb->data_len = 0;
2346 skb->tail += curr_frag_len;
2347 } else {
ac1ae5f3
ED
2348 hdr_len = ETH_HLEN;
2349 memcpy(skb->data, start, hdr_len);
6b7c5b94 2350 skb_shinfo(skb)->nr_frags = 1;
b061b39e 2351 skb_frag_set_page(skb, 0, page_info->page);
6b7c5b94
SP
2352 skb_shinfo(skb)->frags[0].page_offset =
2353 page_info->page_offset + hdr_len;
748b539a
SP
2354 skb_frag_size_set(&skb_shinfo(skb)->frags[0],
2355 curr_frag_len - hdr_len);
6b7c5b94 2356 skb->data_len = curr_frag_len - hdr_len;
bdb28a97 2357 skb->truesize += rx_frag_size;
6b7c5b94
SP
2358 skb->tail += hdr_len;
2359 }
205859a2 2360 page_info->page = NULL;
6b7c5b94 2361
2e588f84
SP
2362 if (rxcp->pkt_size <= rx_frag_size) {
2363 BUG_ON(rxcp->num_rcvd != 1);
2364 return;
6b7c5b94
SP
2365 }
2366
2367 /* More frags present for this completion */
2e588f84
SP
2368 remaining = rxcp->pkt_size - curr_frag_len;
2369 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
0b0ef1d0 2370 page_info = get_rx_page_info(rxo);
2e588f84 2371 curr_frag_len = min(remaining, rx_frag_size);
6b7c5b94 2372
bd46cb6c
AK
2373 /* Coalesce all frags from the same physical page in one slot */
2374 if (page_info->page_offset == 0) {
2375 /* Fresh page */
2376 j++;
b061b39e 2377 skb_frag_set_page(skb, j, page_info->page);
bd46cb6c
AK
2378 skb_shinfo(skb)->frags[j].page_offset =
2379 page_info->page_offset;
9e903e08 2380 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
bd46cb6c
AK
2381 skb_shinfo(skb)->nr_frags++;
2382 } else {
2383 put_page(page_info->page);
2384 }
2385
9e903e08 2386 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
6b7c5b94
SP
2387 skb->len += curr_frag_len;
2388 skb->data_len += curr_frag_len;
bdb28a97 2389 skb->truesize += rx_frag_size;
2e588f84 2390 remaining -= curr_frag_len;
205859a2 2391 page_info->page = NULL;
6b7c5b94 2392 }
bd46cb6c 2393 BUG_ON(j > MAX_SKB_FRAGS);
6b7c5b94
SP
2394}
2395
5be93b9a 2396/* Process the RX completion indicated by rxcp when GRO is disabled */
6384a4d0 2397static void be_rx_compl_process(struct be_rx_obj *rxo, struct napi_struct *napi,
10ef9ab4 2398 struct be_rx_compl_info *rxcp)
6b7c5b94 2399{
10ef9ab4 2400 struct be_adapter *adapter = rxo->adapter;
6332c8d3 2401 struct net_device *netdev = adapter->netdev;
6b7c5b94 2402 struct sk_buff *skb;
89420424 2403
bb349bb4 2404 skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE);
a058a632 2405 if (unlikely(!skb)) {
ac124ff9 2406 rx_stats(rxo)->rx_drops_no_skbs++;
10ef9ab4 2407 be_rx_compl_discard(rxo, rxcp);
6b7c5b94
SP
2408 return;
2409 }
2410
10ef9ab4 2411 skb_fill_rx_data(rxo, skb, rxcp);
6b7c5b94 2412
6332c8d3 2413 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
728a9972 2414 skb->ip_summed = CHECKSUM_UNNECESSARY;
c6ce2f4b
SK
2415 else
2416 skb_checksum_none_assert(skb);
6b7c5b94 2417
6332c8d3 2418 skb->protocol = eth_type_trans(skb, netdev);
aaa6daec 2419 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
10ef9ab4 2420 if (netdev->features & NETIF_F_RXHASH)
d2464c8c 2421 skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
c9c47142 2422
b6c0e89d 2423 skb->csum_level = rxcp->tunneled;
6384a4d0 2424 skb_mark_napi_id(skb, napi);
6b7c5b94 2425
343e43c0 2426 if (rxcp->vlanf)
86a9bad3 2427 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
4c5102f9
AK
2428
2429 netif_receive_skb(skb);
6b7c5b94
SP
2430}
2431
5be93b9a 2432/* Process the RX completion indicated by rxcp when GRO is enabled */
4188e7df
JH
2433static void be_rx_compl_process_gro(struct be_rx_obj *rxo,
2434 struct napi_struct *napi,
2435 struct be_rx_compl_info *rxcp)
6b7c5b94 2436{
10ef9ab4 2437 struct be_adapter *adapter = rxo->adapter;
6b7c5b94 2438 struct be_rx_page_info *page_info;
5be93b9a 2439 struct sk_buff *skb = NULL;
2e588f84
SP
2440 u16 remaining, curr_frag_len;
2441 u16 i, j;
3968fa1e 2442
10ef9ab4 2443 skb = napi_get_frags(napi);
5be93b9a 2444 if (!skb) {
10ef9ab4 2445 be_rx_compl_discard(rxo, rxcp);
5be93b9a
AK
2446 return;
2447 }
2448
2e588f84
SP
2449 remaining = rxcp->pkt_size;
2450 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
0b0ef1d0 2451 page_info = get_rx_page_info(rxo);
6b7c5b94
SP
2452
2453 curr_frag_len = min(remaining, rx_frag_size);
2454
bd46cb6c
AK
2455 /* Coalesce all frags from the same physical page in one slot */
2456 if (i == 0 || page_info->page_offset == 0) {
2457 /* First frag or Fresh page */
2458 j++;
b061b39e 2459 skb_frag_set_page(skb, j, page_info->page);
5be93b9a
AK
2460 skb_shinfo(skb)->frags[j].page_offset =
2461 page_info->page_offset;
9e903e08 2462 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
bd46cb6c
AK
2463 } else {
2464 put_page(page_info->page);
2465 }
9e903e08 2466 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
bdb28a97 2467 skb->truesize += rx_frag_size;
bd46cb6c 2468 remaining -= curr_frag_len;
6b7c5b94
SP
2469 memset(page_info, 0, sizeof(*page_info));
2470 }
bd46cb6c 2471 BUG_ON(j > MAX_SKB_FRAGS);
6b7c5b94 2472
5be93b9a 2473 skb_shinfo(skb)->nr_frags = j + 1;
2e588f84
SP
2474 skb->len = rxcp->pkt_size;
2475 skb->data_len = rxcp->pkt_size;
5be93b9a 2476 skb->ip_summed = CHECKSUM_UNNECESSARY;
aaa6daec 2477 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
4b972914 2478 if (adapter->netdev->features & NETIF_F_RXHASH)
d2464c8c 2479 skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
c9c47142 2480
b6c0e89d 2481 skb->csum_level = rxcp->tunneled;
5be93b9a 2482
343e43c0 2483 if (rxcp->vlanf)
86a9bad3 2484 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
4c5102f9 2485
10ef9ab4 2486 napi_gro_frags(napi);
2e588f84
SP
2487}
2488
10ef9ab4
SP
2489static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
2490 struct be_rx_compl_info *rxcp)
2e588f84 2491{
c3c18bc1
SP
2492 rxcp->pkt_size = GET_RX_COMPL_V1_BITS(pktsize, compl);
2493 rxcp->vlanf = GET_RX_COMPL_V1_BITS(vtp, compl);
2494 rxcp->err = GET_RX_COMPL_V1_BITS(err, compl);
2495 rxcp->tcpf = GET_RX_COMPL_V1_BITS(tcpf, compl);
2496 rxcp->udpf = GET_RX_COMPL_V1_BITS(udpf, compl);
2497 rxcp->ip_csum = GET_RX_COMPL_V1_BITS(ipcksm, compl);
2498 rxcp->l4_csum = GET_RX_COMPL_V1_BITS(l4_cksm, compl);
2499 rxcp->ipv6 = GET_RX_COMPL_V1_BITS(ip_version, compl);
2500 rxcp->num_rcvd = GET_RX_COMPL_V1_BITS(numfrags, compl);
2501 rxcp->pkt_type = GET_RX_COMPL_V1_BITS(cast_enc, compl);
2502 rxcp->rss_hash = GET_RX_COMPL_V1_BITS(rsshash, compl);
15d72184 2503 if (rxcp->vlanf) {
c3c18bc1
SP
2504 rxcp->qnq = GET_RX_COMPL_V1_BITS(qnq, compl);
2505 rxcp->vlan_tag = GET_RX_COMPL_V1_BITS(vlan_tag, compl);
15d72184 2506 }
c3c18bc1 2507 rxcp->port = GET_RX_COMPL_V1_BITS(port, compl);
c9c47142 2508 rxcp->tunneled =
c3c18bc1 2509 GET_RX_COMPL_V1_BITS(tunneled, compl);
2e588f84
SP
2510}
2511
10ef9ab4
SP
2512static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
2513 struct be_rx_compl_info *rxcp)
2e588f84 2514{
c3c18bc1
SP
2515 rxcp->pkt_size = GET_RX_COMPL_V0_BITS(pktsize, compl);
2516 rxcp->vlanf = GET_RX_COMPL_V0_BITS(vtp, compl);
2517 rxcp->err = GET_RX_COMPL_V0_BITS(err, compl);
2518 rxcp->tcpf = GET_RX_COMPL_V0_BITS(tcpf, compl);
2519 rxcp->udpf = GET_RX_COMPL_V0_BITS(udpf, compl);
2520 rxcp->ip_csum = GET_RX_COMPL_V0_BITS(ipcksm, compl);
2521 rxcp->l4_csum = GET_RX_COMPL_V0_BITS(l4_cksm, compl);
2522 rxcp->ipv6 = GET_RX_COMPL_V0_BITS(ip_version, compl);
2523 rxcp->num_rcvd = GET_RX_COMPL_V0_BITS(numfrags, compl);
2524 rxcp->pkt_type = GET_RX_COMPL_V0_BITS(cast_enc, compl);
2525 rxcp->rss_hash = GET_RX_COMPL_V0_BITS(rsshash, compl);
15d72184 2526 if (rxcp->vlanf) {
c3c18bc1
SP
2527 rxcp->qnq = GET_RX_COMPL_V0_BITS(qnq, compl);
2528 rxcp->vlan_tag = GET_RX_COMPL_V0_BITS(vlan_tag, compl);
15d72184 2529 }
c3c18bc1
SP
2530 rxcp->port = GET_RX_COMPL_V0_BITS(port, compl);
2531 rxcp->ip_frag = GET_RX_COMPL_V0_BITS(ip_frag, compl);
2e588f84
SP
2532}
2533
2534static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
2535{
2536 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
2537 struct be_rx_compl_info *rxcp = &rxo->rxcp;
2538 struct be_adapter *adapter = rxo->adapter;
6b7c5b94 2539
2e588f84
SP
2540 /* For checking the valid bit it is Ok to use either definition as the
2541 * valid bit is at the same position in both v0 and v1 Rx compl */
2542 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
2543 return NULL;
6b7c5b94 2544
2e588f84
SP
2545 rmb();
2546 be_dws_le_to_cpu(compl, sizeof(*compl));
6b7c5b94 2547
2e588f84 2548 if (adapter->be3_native)
10ef9ab4 2549 be_parse_rx_compl_v1(compl, rxcp);
2e588f84 2550 else
10ef9ab4 2551 be_parse_rx_compl_v0(compl, rxcp);
6b7c5b94 2552
e38b1706
SK
2553 if (rxcp->ip_frag)
2554 rxcp->l4_csum = 0;
2555
15d72184 2556 if (rxcp->vlanf) {
f93f160b
VV
2557 /* In QNQ modes, if qnq bit is not set, then the packet was
2558 * tagged only with the transparent outer vlan-tag and must
2559 * not be treated as a vlan packet by host
2560 */
2561 if (be_is_qnq_mode(adapter) && !rxcp->qnq)
15d72184 2562 rxcp->vlanf = 0;
6b7c5b94 2563
15d72184 2564 if (!lancer_chip(adapter))
3c709f8f 2565 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
6b7c5b94 2566
939cf306 2567 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
f6cbd364 2568 !test_bit(rxcp->vlan_tag, adapter->vids))
15d72184
SP
2569 rxcp->vlanf = 0;
2570 }
2e588f84
SP
2571
2572 /* As the compl has been parsed, reset it; we wont touch it again */
2573 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
6b7c5b94 2574
3abcdeda 2575 queue_tail_inc(&rxo->cq);
6b7c5b94
SP
2576 return rxcp;
2577}
2578
1829b086 2579static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
6b7c5b94 2580{
6b7c5b94 2581 u32 order = get_order(size);
1829b086 2582
6b7c5b94 2583 if (order > 0)
1829b086
ED
2584 gfp |= __GFP_COMP;
2585 return alloc_pages(gfp, order);
6b7c5b94
SP
2586}
2587
2588/*
2589 * Allocate a page, split it to fragments of size rx_frag_size and post as
2590 * receive buffers to BE
2591 */
c30d7266 2592static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp, u32 frags_needed)
6b7c5b94 2593{
3abcdeda 2594 struct be_adapter *adapter = rxo->adapter;
26d92f92 2595 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
3abcdeda 2596 struct be_queue_info *rxq = &rxo->q;
6b7c5b94 2597 struct page *pagep = NULL;
ba42fad0 2598 struct device *dev = &adapter->pdev->dev;
6b7c5b94
SP
2599 struct be_eth_rx_d *rxd;
2600 u64 page_dmaaddr = 0, frag_dmaaddr;
c30d7266 2601 u32 posted, page_offset = 0, notify = 0;
6b7c5b94 2602
3abcdeda 2603 page_info = &rxo->page_info_tbl[rxq->head];
c30d7266 2604 for (posted = 0; posted < frags_needed && !page_info->page; posted++) {
6b7c5b94 2605 if (!pagep) {
1829b086 2606 pagep = be_alloc_pages(adapter->big_page_size, gfp);
6b7c5b94 2607 if (unlikely(!pagep)) {
ac124ff9 2608 rx_stats(rxo)->rx_post_fail++;
6b7c5b94
SP
2609 break;
2610 }
ba42fad0
IV
2611 page_dmaaddr = dma_map_page(dev, pagep, 0,
2612 adapter->big_page_size,
2b7bcebf 2613 DMA_FROM_DEVICE);
ba42fad0
IV
2614 if (dma_mapping_error(dev, page_dmaaddr)) {
2615 put_page(pagep);
2616 pagep = NULL;
d3de1540 2617 adapter->drv_stats.dma_map_errors++;
ba42fad0
IV
2618 break;
2619 }
e50287be 2620 page_offset = 0;
6b7c5b94
SP
2621 } else {
2622 get_page(pagep);
e50287be 2623 page_offset += rx_frag_size;
6b7c5b94 2624 }
e50287be 2625 page_info->page_offset = page_offset;
6b7c5b94 2626 page_info->page = pagep;
6b7c5b94
SP
2627
2628 rxd = queue_head_node(rxq);
e50287be 2629 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
6b7c5b94
SP
2630 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
2631 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
6b7c5b94
SP
2632
2633 /* Any space left in the current big page for another frag? */
2634 if ((page_offset + rx_frag_size + rx_frag_size) >
2635 adapter->big_page_size) {
2636 pagep = NULL;
e50287be
SP
2637 page_info->last_frag = true;
2638 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
2639 } else {
2640 dma_unmap_addr_set(page_info, bus, frag_dmaaddr);
6b7c5b94 2641 }
26d92f92
SP
2642
2643 prev_page_info = page_info;
2644 queue_head_inc(rxq);
10ef9ab4 2645 page_info = &rxo->page_info_tbl[rxq->head];
6b7c5b94 2646 }
e50287be
SP
2647
2648 /* Mark the last frag of a page when we break out of the above loop
2649 * with no more slots available in the RXQ
2650 */
2651 if (pagep) {
2652 prev_page_info->last_frag = true;
2653 dma_unmap_addr_set(prev_page_info, bus, page_dmaaddr);
2654 }
6b7c5b94
SP
2655
2656 if (posted) {
6b7c5b94 2657 atomic_add(posted, &rxq->used);
6384a4d0
SP
2658 if (rxo->rx_post_starved)
2659 rxo->rx_post_starved = false;
c30d7266 2660 do {
69304cc9 2661 notify = min(MAX_NUM_POST_ERX_DB, posted);
c30d7266
AK
2662 be_rxq_notify(adapter, rxq->id, notify);
2663 posted -= notify;
2664 } while (posted);
ea1dae11
SP
2665 } else if (atomic_read(&rxq->used) == 0) {
2666 /* Let be_worker replenish when memory is available */
3abcdeda 2667 rxo->rx_post_starved = true;
6b7c5b94 2668 }
6b7c5b94
SP
2669}
2670
ffc39620
SR
2671static inline void be_update_tx_err(struct be_tx_obj *txo, u8 status)
2672{
2673 switch (status) {
2674 case BE_TX_COMP_HDR_PARSE_ERR:
2675 tx_stats(txo)->tx_hdr_parse_err++;
2676 break;
2677 case BE_TX_COMP_NDMA_ERR:
2678 tx_stats(txo)->tx_dma_err++;
2679 break;
2680 case BE_TX_COMP_ACL_ERR:
2681 tx_stats(txo)->tx_spoof_check_err++;
2682 break;
2683 }
2684}
2685
2686static inline void lancer_update_tx_err(struct be_tx_obj *txo, u8 status)
2687{
2688 switch (status) {
2689 case LANCER_TX_COMP_LSO_ERR:
2690 tx_stats(txo)->tx_tso_err++;
2691 break;
2692 case LANCER_TX_COMP_HSW_DROP_MAC_ERR:
2693 case LANCER_TX_COMP_HSW_DROP_VLAN_ERR:
2694 tx_stats(txo)->tx_spoof_check_err++;
2695 break;
2696 case LANCER_TX_COMP_QINQ_ERR:
2697 tx_stats(txo)->tx_qinq_err++;
2698 break;
2699 case LANCER_TX_COMP_PARITY_ERR:
2700 tx_stats(txo)->tx_internal_parity_err++;
2701 break;
2702 case LANCER_TX_COMP_DMA_ERR:
2703 tx_stats(txo)->tx_dma_err++;
2704 break;
2705 case LANCER_TX_COMP_SGE_ERR:
2706 tx_stats(txo)->tx_sge_err++;
2707 break;
2708 }
2709}
2710
2711static struct be_tx_compl_info *be_tx_compl_get(struct be_adapter *adapter,
2712 struct be_tx_obj *txo)
6b7c5b94 2713{
152ffe5b
SB
2714 struct be_queue_info *tx_cq = &txo->cq;
2715 struct be_tx_compl_info *txcp = &txo->txcp;
2716 struct be_eth_tx_compl *compl = queue_tail_node(tx_cq);
6b7c5b94 2717
152ffe5b 2718 if (compl->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
6b7c5b94
SP
2719 return NULL;
2720
152ffe5b 2721 /* Ensure load ordering of valid bit dword and other dwords below */
f3eb62d2 2722 rmb();
152ffe5b 2723 be_dws_le_to_cpu(compl, sizeof(*compl));
6b7c5b94 2724
152ffe5b
SB
2725 txcp->status = GET_TX_COMPL_BITS(status, compl);
2726 txcp->end_index = GET_TX_COMPL_BITS(wrb_index, compl);
6b7c5b94 2727
ffc39620
SR
2728 if (txcp->status) {
2729 if (lancer_chip(adapter)) {
2730 lancer_update_tx_err(txo, txcp->status);
2731 /* Reset the adapter incase of TSO,
2732 * SGE or Parity error
2733 */
2734 if (txcp->status == LANCER_TX_COMP_LSO_ERR ||
2735 txcp->status == LANCER_TX_COMP_PARITY_ERR ||
2736 txcp->status == LANCER_TX_COMP_SGE_ERR)
2737 be_set_error(adapter, BE_ERROR_TX);
2738 } else {
2739 be_update_tx_err(txo, txcp->status);
2740 }
2741 }
2742
2743 if (be_check_error(adapter, BE_ERROR_TX))
2744 return NULL;
2745
152ffe5b 2746 compl->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
6b7c5b94
SP
2747 queue_tail_inc(tx_cq);
2748 return txcp;
2749}
2750
3c8def97 2751static u16 be_tx_compl_process(struct be_adapter *adapter,
748b539a 2752 struct be_tx_obj *txo, u16 last_index)
6b7c5b94 2753{
5f07b3c5 2754 struct sk_buff **sent_skbs = txo->sent_skb_list;
3c8def97 2755 struct be_queue_info *txq = &txo->q;
5f07b3c5
SP
2756 struct sk_buff *skb = NULL;
2757 bool unmap_skb_hdr = false;
a73b796e 2758 struct be_eth_wrb *wrb;
b0fd2eb2 2759 u16 num_wrbs = 0;
2760 u32 frag_index;
6b7c5b94 2761
ec43b1a6 2762 do {
5f07b3c5
SP
2763 if (sent_skbs[txq->tail]) {
2764 /* Free skb from prev req */
2765 if (skb)
2766 dev_consume_skb_any(skb);
2767 skb = sent_skbs[txq->tail];
2768 sent_skbs[txq->tail] = NULL;
2769 queue_tail_inc(txq); /* skip hdr wrb */
2770 num_wrbs++;
2771 unmap_skb_hdr = true;
2772 }
a73b796e 2773 wrb = queue_tail_node(txq);
5f07b3c5 2774 frag_index = txq->tail;
2b7bcebf 2775 unmap_tx_frag(&adapter->pdev->dev, wrb,
5f07b3c5 2776 (unmap_skb_hdr && skb_headlen(skb)));
ec43b1a6 2777 unmap_skb_hdr = false;
6b7c5b94 2778 queue_tail_inc(txq);
5f07b3c5
SP
2779 num_wrbs++;
2780 } while (frag_index != last_index);
2781 dev_consume_skb_any(skb);
6b7c5b94 2782
4d586b82 2783 return num_wrbs;
6b7c5b94
SP
2784}
2785
10ef9ab4
SP
2786/* Return the number of events in the event queue */
2787static inline int events_get(struct be_eq_obj *eqo)
859b1e4e 2788{
10ef9ab4
SP
2789 struct be_eq_entry *eqe;
2790 int num = 0;
859b1e4e 2791
10ef9ab4
SP
2792 do {
2793 eqe = queue_tail_node(&eqo->q);
2794 if (eqe->evt == 0)
2795 break;
859b1e4e 2796
10ef9ab4
SP
2797 rmb();
2798 eqe->evt = 0;
2799 num++;
2800 queue_tail_inc(&eqo->q);
2801 } while (true);
2802
2803 return num;
859b1e4e
SP
2804}
2805
10ef9ab4
SP
2806/* Leaves the EQ is disarmed state */
2807static void be_eq_clean(struct be_eq_obj *eqo)
859b1e4e 2808{
10ef9ab4 2809 int num = events_get(eqo);
859b1e4e 2810
20947770 2811 be_eq_notify(eqo->adapter, eqo->q.id, false, true, num, 0);
859b1e4e
SP
2812}
2813
99b44304
KA
2814/* Free posted rx buffers that were not used */
2815static void be_rxq_clean(struct be_rx_obj *rxo)
6b7c5b94 2816{
3abcdeda 2817 struct be_queue_info *rxq = &rxo->q;
99b44304
KA
2818 struct be_rx_page_info *page_info;
2819
2820 while (atomic_read(&rxq->used) > 0) {
2821 page_info = get_rx_page_info(rxo);
2822 put_page(page_info->page);
2823 memset(page_info, 0, sizeof(*page_info));
2824 }
2825 BUG_ON(atomic_read(&rxq->used));
2826 rxq->tail = 0;
2827 rxq->head = 0;
2828}
2829
2830static void be_rx_cq_clean(struct be_rx_obj *rxo)
2831{
3abcdeda 2832 struct be_queue_info *rx_cq = &rxo->cq;
2e588f84 2833 struct be_rx_compl_info *rxcp;
d23e946c
SP
2834 struct be_adapter *adapter = rxo->adapter;
2835 int flush_wait = 0;
6b7c5b94 2836
d23e946c
SP
2837 /* Consume pending rx completions.
2838 * Wait for the flush completion (identified by zero num_rcvd)
2839 * to arrive. Notify CQ even when there are no more CQ entries
2840 * for HW to flush partially coalesced CQ entries.
2841 * In Lancer, there is no need to wait for flush compl.
2842 */
2843 for (;;) {
2844 rxcp = be_rx_compl_get(rxo);
ddf1169f 2845 if (!rxcp) {
d23e946c
SP
2846 if (lancer_chip(adapter))
2847 break;
2848
954f6825
VD
2849 if (flush_wait++ > 50 ||
2850 be_check_error(adapter,
2851 BE_ERROR_HW)) {
d23e946c
SP
2852 dev_warn(&adapter->pdev->dev,
2853 "did not receive flush compl\n");
2854 break;
2855 }
2856 be_cq_notify(adapter, rx_cq->id, true, 0);
2857 mdelay(1);
2858 } else {
2859 be_rx_compl_discard(rxo, rxcp);
3f5dffe6 2860 be_cq_notify(adapter, rx_cq->id, false, 1);
d23e946c
SP
2861 if (rxcp->num_rcvd == 0)
2862 break;
2863 }
6b7c5b94
SP
2864 }
2865
d23e946c
SP
2866 /* After cleanup, leave the CQ in unarmed state */
2867 be_cq_notify(adapter, rx_cq->id, false, 0);
6b7c5b94
SP
2868}
2869
0ae57bb3 2870static void be_tx_compl_clean(struct be_adapter *adapter)
6b7c5b94 2871{
5f07b3c5 2872 struct device *dev = &adapter->pdev->dev;
b0fd2eb2 2873 u16 cmpl = 0, timeo = 0, num_wrbs = 0;
152ffe5b 2874 struct be_tx_compl_info *txcp;
0ae57bb3 2875 struct be_queue_info *txq;
b0fd2eb2 2876 u32 end_idx, notified_idx;
152ffe5b 2877 struct be_tx_obj *txo;
0ae57bb3 2878 int i, pending_txqs;
a8e9179a 2879
1a3d0717 2880 /* Stop polling for compls when HW has been silent for 10ms */
a8e9179a 2881 do {
0ae57bb3
SP
2882 pending_txqs = adapter->num_tx_qs;
2883
2884 for_all_tx_queues(adapter, txo, i) {
1a3d0717
VV
2885 cmpl = 0;
2886 num_wrbs = 0;
0ae57bb3 2887 txq = &txo->q;
ffc39620 2888 while ((txcp = be_tx_compl_get(adapter, txo))) {
152ffe5b
SB
2889 num_wrbs +=
2890 be_tx_compl_process(adapter, txo,
2891 txcp->end_index);
0ae57bb3
SP
2892 cmpl++;
2893 }
2894 if (cmpl) {
2895 be_cq_notify(adapter, txo->cq.id, false, cmpl);
2896 atomic_sub(num_wrbs, &txq->used);
1a3d0717 2897 timeo = 0;
0ae57bb3 2898 }
cf5671e6 2899 if (!be_is_tx_compl_pending(txo))
0ae57bb3 2900 pending_txqs--;
a8e9179a
SP
2901 }
2902
954f6825
VD
2903 if (pending_txqs == 0 || ++timeo > 10 ||
2904 be_check_error(adapter, BE_ERROR_HW))
a8e9179a
SP
2905 break;
2906
2907 mdelay(1);
2908 } while (true);
2909
5f07b3c5 2910 /* Free enqueued TX that was never notified to HW */
0ae57bb3
SP
2911 for_all_tx_queues(adapter, txo, i) {
2912 txq = &txo->q;
0ae57bb3 2913
5f07b3c5
SP
2914 if (atomic_read(&txq->used)) {
2915 dev_info(dev, "txq%d: cleaning %d pending tx-wrbs\n",
2916 i, atomic_read(&txq->used));
2917 notified_idx = txq->tail;
0ae57bb3 2918 end_idx = txq->tail;
5f07b3c5
SP
2919 index_adv(&end_idx, atomic_read(&txq->used) - 1,
2920 txq->len);
2921 /* Use the tx-compl process logic to handle requests
2922 * that were not sent to the HW.
2923 */
0ae57bb3
SP
2924 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
2925 atomic_sub(num_wrbs, &txq->used);
5f07b3c5
SP
2926 BUG_ON(atomic_read(&txq->used));
2927 txo->pend_wrb_cnt = 0;
2928 /* Since hw was never notified of these requests,
2929 * reset TXQ indices
2930 */
2931 txq->head = notified_idx;
2932 txq->tail = notified_idx;
0ae57bb3 2933 }
b03388d6 2934 }
6b7c5b94
SP
2935}
2936
10ef9ab4
SP
2937static void be_evt_queues_destroy(struct be_adapter *adapter)
2938{
2939 struct be_eq_obj *eqo;
2940 int i;
2941
2942 for_all_evt_queues(adapter, eqo, i) {
19d59aa7
PR
2943 if (eqo->q.created) {
2944 be_eq_clean(eqo);
10ef9ab4 2945 be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
68d7bdcb 2946 netif_napi_del(&eqo->napi);
649886a3 2947 free_cpumask_var(eqo->affinity_mask);
19d59aa7 2948 }
10ef9ab4
SP
2949 be_queue_free(adapter, &eqo->q);
2950 }
2951}
2952
2953static int be_evt_queues_create(struct be_adapter *adapter)
2954{
2955 struct be_queue_info *eq;
2956 struct be_eq_obj *eqo;
2632bafd 2957 struct be_aic_obj *aic;
10ef9ab4
SP
2958 int i, rc;
2959
e261768e 2960 /* need enough EQs to service both RX and TX queues */
92bf14ab 2961 adapter->num_evt_qs = min_t(u16, num_irqs(adapter),
e261768e
SP
2962 max(adapter->cfg_num_rx_irqs,
2963 adapter->cfg_num_tx_irqs));
10ef9ab4
SP
2964
2965 for_all_evt_queues(adapter, eqo, i) {
f36963c9 2966 int numa_node = dev_to_node(&adapter->pdev->dev);
649886a3 2967
2632bafd 2968 aic = &adapter->aic_obj[i];
10ef9ab4 2969 eqo->adapter = adapter;
10ef9ab4 2970 eqo->idx = i;
2632bafd
SP
2971 aic->max_eqd = BE_MAX_EQD;
2972 aic->enable = true;
10ef9ab4
SP
2973
2974 eq = &eqo->q;
2975 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
748b539a 2976 sizeof(struct be_eq_entry));
10ef9ab4
SP
2977 if (rc)
2978 return rc;
2979
f2f781a7 2980 rc = be_cmd_eq_create(adapter, eqo);
10ef9ab4
SP
2981 if (rc)
2982 return rc;
649886a3
KA
2983
2984 if (!zalloc_cpumask_var(&eqo->affinity_mask, GFP_KERNEL))
2985 return -ENOMEM;
2986 cpumask_set_cpu(cpumask_local_spread(i, numa_node),
2987 eqo->affinity_mask);
2988 netif_napi_add(adapter->netdev, &eqo->napi, be_poll,
2989 BE_NAPI_WEIGHT);
10ef9ab4 2990 }
1cfafab9 2991 return 0;
10ef9ab4
SP
2992}
2993
5fb379ee
SP
2994static void be_mcc_queues_destroy(struct be_adapter *adapter)
2995{
2996 struct be_queue_info *q;
5fb379ee 2997
8788fdc2 2998 q = &adapter->mcc_obj.q;
5fb379ee 2999 if (q->created)
8788fdc2 3000 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
5fb379ee
SP
3001 be_queue_free(adapter, q);
3002
8788fdc2 3003 q = &adapter->mcc_obj.cq;
5fb379ee 3004 if (q->created)
8788fdc2 3005 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
5fb379ee
SP
3006 be_queue_free(adapter, q);
3007}
3008
3009/* Must be called only after TX qs are created as MCC shares TX EQ */
3010static int be_mcc_queues_create(struct be_adapter *adapter)
3011{
3012 struct be_queue_info *q, *cq;
5fb379ee 3013
8788fdc2 3014 cq = &adapter->mcc_obj.cq;
5fb379ee 3015 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
748b539a 3016 sizeof(struct be_mcc_compl)))
5fb379ee
SP
3017 goto err;
3018
10ef9ab4
SP
3019 /* Use the default EQ for MCC completions */
3020 if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0))
5fb379ee
SP
3021 goto mcc_cq_free;
3022
8788fdc2 3023 q = &adapter->mcc_obj.q;
5fb379ee
SP
3024 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
3025 goto mcc_cq_destroy;
3026
8788fdc2 3027 if (be_cmd_mccq_create(adapter, q, cq))
5fb379ee
SP
3028 goto mcc_q_free;
3029
3030 return 0;
3031
3032mcc_q_free:
3033 be_queue_free(adapter, q);
3034mcc_cq_destroy:
8788fdc2 3035 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
5fb379ee
SP
3036mcc_cq_free:
3037 be_queue_free(adapter, cq);
3038err:
3039 return -1;
3040}
3041
6b7c5b94
SP
3042static void be_tx_queues_destroy(struct be_adapter *adapter)
3043{
3044 struct be_queue_info *q;
3c8def97
SP
3045 struct be_tx_obj *txo;
3046 u8 i;
6b7c5b94 3047
3c8def97
SP
3048 for_all_tx_queues(adapter, txo, i) {
3049 q = &txo->q;
3050 if (q->created)
3051 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
3052 be_queue_free(adapter, q);
6b7c5b94 3053
3c8def97
SP
3054 q = &txo->cq;
3055 if (q->created)
3056 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
3057 be_queue_free(adapter, q);
3058 }
6b7c5b94
SP
3059}
3060
7707133c 3061static int be_tx_qs_create(struct be_adapter *adapter)
6b7c5b94 3062{
73f394e6 3063 struct be_queue_info *cq;
3c8def97 3064 struct be_tx_obj *txo;
73f394e6 3065 struct be_eq_obj *eqo;
92bf14ab 3066 int status, i;
6b7c5b94 3067
e261768e 3068 adapter->num_tx_qs = min(adapter->num_evt_qs, adapter->cfg_num_tx_irqs);
dafc0fe3 3069
10ef9ab4
SP
3070 for_all_tx_queues(adapter, txo, i) {
3071 cq = &txo->cq;
3072 status = be_queue_alloc(adapter, cq, TX_CQ_LEN,
3073 sizeof(struct be_eth_tx_compl));
3074 if (status)
3075 return status;
3c8def97 3076
827da44c
JS
3077 u64_stats_init(&txo->stats.sync);
3078 u64_stats_init(&txo->stats.sync_compl);
3079
10ef9ab4
SP
3080 /* If num_evt_qs is less than num_tx_qs, then more than
3081 * one txq share an eq
3082 */
73f394e6
SP
3083 eqo = &adapter->eq_obj[i % adapter->num_evt_qs];
3084 status = be_cmd_cq_create(adapter, cq, &eqo->q, false, 3);
10ef9ab4
SP
3085 if (status)
3086 return status;
6b7c5b94 3087
10ef9ab4
SP
3088 status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
3089 sizeof(struct be_eth_wrb));
3090 if (status)
3091 return status;
6b7c5b94 3092
94d73aaa 3093 status = be_cmd_txq_create(adapter, txo);
10ef9ab4
SP
3094 if (status)
3095 return status;
73f394e6
SP
3096
3097 netif_set_xps_queue(adapter->netdev, eqo->affinity_mask,
3098 eqo->idx);
3c8def97 3099 }
6b7c5b94 3100
d379142b
SP
3101 dev_info(&adapter->pdev->dev, "created %d TX queue(s)\n",
3102 adapter->num_tx_qs);
10ef9ab4 3103 return 0;
6b7c5b94
SP
3104}
3105
10ef9ab4 3106static void be_rx_cqs_destroy(struct be_adapter *adapter)
6b7c5b94
SP
3107{
3108 struct be_queue_info *q;
3abcdeda
SP
3109 struct be_rx_obj *rxo;
3110 int i;
3111
3112 for_all_rx_queues(adapter, rxo, i) {
3abcdeda
SP
3113 q = &rxo->cq;
3114 if (q->created)
3115 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
3116 be_queue_free(adapter, q);
ac6a0c4a
SP
3117 }
3118}
3119
10ef9ab4 3120static int be_rx_cqs_create(struct be_adapter *adapter)
6b7c5b94 3121{
10ef9ab4 3122 struct be_queue_info *eq, *cq;
3abcdeda
SP
3123 struct be_rx_obj *rxo;
3124 int rc, i;
6b7c5b94 3125
e261768e
SP
3126 adapter->num_rss_qs =
3127 min(adapter->num_evt_qs, adapter->cfg_num_rx_irqs);
92bf14ab 3128
71bb8bd0 3129 /* We'll use RSS only if atleast 2 RSS rings are supported. */
e261768e 3130 if (adapter->num_rss_qs < 2)
71bb8bd0
VV
3131 adapter->num_rss_qs = 0;
3132
3133 adapter->num_rx_qs = adapter->num_rss_qs + adapter->need_def_rxq;
3134
3135 /* When the interface is not capable of RSS rings (and there is no
3136 * need to create a default RXQ) we'll still need one RXQ
10ef9ab4 3137 */
71bb8bd0
VV
3138 if (adapter->num_rx_qs == 0)
3139 adapter->num_rx_qs = 1;
92bf14ab 3140
6b7c5b94 3141 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
3abcdeda
SP
3142 for_all_rx_queues(adapter, rxo, i) {
3143 rxo->adapter = adapter;
3abcdeda
SP
3144 cq = &rxo->cq;
3145 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
748b539a 3146 sizeof(struct be_eth_rx_compl));
3abcdeda 3147 if (rc)
10ef9ab4 3148 return rc;
3abcdeda 3149
827da44c 3150 u64_stats_init(&rxo->stats.sync);
10ef9ab4
SP
3151 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
3152 rc = be_cmd_cq_create(adapter, cq, eq, false, 3);
3abcdeda 3153 if (rc)
10ef9ab4 3154 return rc;
3abcdeda 3155 }
6b7c5b94 3156
d379142b 3157 dev_info(&adapter->pdev->dev,
71bb8bd0 3158 "created %d RX queue(s)\n", adapter->num_rx_qs);
10ef9ab4 3159 return 0;
b628bde2
SP
3160}
3161
6b7c5b94
SP
3162static irqreturn_t be_intx(int irq, void *dev)
3163{
e49cc34f
SP
3164 struct be_eq_obj *eqo = dev;
3165 struct be_adapter *adapter = eqo->adapter;
3166 int num_evts = 0;
6b7c5b94 3167
d0b9cec3
SP
3168 /* IRQ is not expected when NAPI is scheduled as the EQ
3169 * will not be armed.
3170 * But, this can happen on Lancer INTx where it takes
3171 * a while to de-assert INTx or in BE2 where occasionaly
3172 * an interrupt may be raised even when EQ is unarmed.
3173 * If NAPI is already scheduled, then counting & notifying
3174 * events will orphan them.
e49cc34f 3175 */
d0b9cec3 3176 if (napi_schedule_prep(&eqo->napi)) {
e49cc34f 3177 num_evts = events_get(eqo);
d0b9cec3
SP
3178 __napi_schedule(&eqo->napi);
3179 if (num_evts)
3180 eqo->spurious_intr = 0;
3181 }
20947770 3182 be_eq_notify(adapter, eqo->q.id, false, true, num_evts, 0);
e49cc34f 3183
d0b9cec3
SP
3184 /* Return IRQ_HANDLED only for the the first spurious intr
3185 * after a valid intr to stop the kernel from branding
3186 * this irq as a bad one!
e49cc34f 3187 */
d0b9cec3
SP
3188 if (num_evts || eqo->spurious_intr++ == 0)
3189 return IRQ_HANDLED;
3190 else
3191 return IRQ_NONE;
6b7c5b94
SP
3192}
3193
10ef9ab4 3194static irqreturn_t be_msix(int irq, void *dev)
6b7c5b94 3195{
10ef9ab4 3196 struct be_eq_obj *eqo = dev;
6b7c5b94 3197
20947770 3198 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0, 0);
0b545a62 3199 napi_schedule(&eqo->napi);
6b7c5b94
SP
3200 return IRQ_HANDLED;
3201}
3202
2e588f84 3203static inline bool do_gro(struct be_rx_compl_info *rxcp)
6b7c5b94 3204{
e38b1706 3205 return (rxcp->tcpf && !rxcp->err && rxcp->l4_csum) ? true : false;
6b7c5b94
SP
3206}
3207
10ef9ab4 3208static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
fb6113e6 3209 int budget)
6b7c5b94 3210{
3abcdeda
SP
3211 struct be_adapter *adapter = rxo->adapter;
3212 struct be_queue_info *rx_cq = &rxo->cq;
2e588f84 3213 struct be_rx_compl_info *rxcp;
6b7c5b94 3214 u32 work_done;
c30d7266 3215 u32 frags_consumed = 0;
6b7c5b94
SP
3216
3217 for (work_done = 0; work_done < budget; work_done++) {
3abcdeda 3218 rxcp = be_rx_compl_get(rxo);
6b7c5b94
SP
3219 if (!rxcp)
3220 break;
3221
12004ae9
SP
3222 /* Is it a flush compl that has no data */
3223 if (unlikely(rxcp->num_rcvd == 0))
3224 goto loop_continue;
3225
3226 /* Discard compl with partial DMA Lancer B0 */
3227 if (unlikely(!rxcp->pkt_size)) {
10ef9ab4 3228 be_rx_compl_discard(rxo, rxcp);
12004ae9
SP
3229 goto loop_continue;
3230 }
3231
3232 /* On BE drop pkts that arrive due to imperfect filtering in
3233 * promiscuous mode on some skews
3234 */
3235 if (unlikely(rxcp->port != adapter->port_num &&
748b539a 3236 !lancer_chip(adapter))) {
10ef9ab4 3237 be_rx_compl_discard(rxo, rxcp);
12004ae9 3238 goto loop_continue;
64642811 3239 }
009dd872 3240
fb6113e6 3241 if (do_gro(rxcp))
10ef9ab4 3242 be_rx_compl_process_gro(rxo, napi, rxcp);
12004ae9 3243 else
6384a4d0
SP
3244 be_rx_compl_process(rxo, napi, rxcp);
3245
12004ae9 3246loop_continue:
c30d7266 3247 frags_consumed += rxcp->num_rcvd;
2e588f84 3248 be_rx_stats_update(rxo, rxcp);
6b7c5b94
SP
3249 }
3250
10ef9ab4
SP
3251 if (work_done) {
3252 be_cq_notify(adapter, rx_cq->id, true, work_done);
9372cacb 3253
6384a4d0
SP
3254 /* When an rx-obj gets into post_starved state, just
3255 * let be_worker do the posting.
3256 */
3257 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM &&
3258 !rxo->rx_post_starved)
c30d7266
AK
3259 be_post_rx_frags(rxo, GFP_ATOMIC,
3260 max_t(u32, MAX_RX_POST,
3261 frags_consumed));
6b7c5b94 3262 }
10ef9ab4 3263
6b7c5b94
SP
3264 return work_done;
3265}
3266
512bb8a2 3267
c8f64615
SP
3268static void be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
3269 int idx)
6b7c5b94 3270{
c8f64615 3271 int num_wrbs = 0, work_done = 0;
152ffe5b 3272 struct be_tx_compl_info *txcp;
c8f64615 3273
ffc39620 3274 while ((txcp = be_tx_compl_get(adapter, txo))) {
152ffe5b 3275 num_wrbs += be_tx_compl_process(adapter, txo, txcp->end_index);
c8f64615 3276 work_done++;
10ef9ab4 3277 }
6b7c5b94 3278
10ef9ab4
SP
3279 if (work_done) {
3280 be_cq_notify(adapter, txo->cq.id, true, work_done);
3281 atomic_sub(num_wrbs, &txo->q.used);
3c8def97 3282
10ef9ab4
SP
3283 /* As Tx wrbs have been freed up, wake up netdev queue
3284 * if it was stopped due to lack of tx wrbs. */
3285 if (__netif_subqueue_stopped(adapter->netdev, idx) &&
cf5671e6 3286 be_can_txq_wake(txo)) {
10ef9ab4 3287 netif_wake_subqueue(adapter->netdev, idx);
3c8def97 3288 }
10ef9ab4
SP
3289
3290 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
3291 tx_stats(txo)->tx_compl += work_done;
3292 u64_stats_update_end(&tx_stats(txo)->sync_compl);
6b7c5b94 3293 }
10ef9ab4 3294}
6b7c5b94 3295
68d7bdcb 3296int be_poll(struct napi_struct *napi, int budget)
10ef9ab4
SP
3297{
3298 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
3299 struct be_adapter *adapter = eqo->adapter;
0b545a62 3300 int max_work = 0, work, i, num_evts;
6384a4d0 3301 struct be_rx_obj *rxo;
a4906ea0 3302 struct be_tx_obj *txo;
20947770 3303 u32 mult_enc = 0;
f31e50a8 3304
0b545a62
SP
3305 num_evts = events_get(eqo);
3306
a4906ea0
SP
3307 for_all_tx_queues_on_eq(adapter, eqo, txo, i)
3308 be_process_tx(adapter, txo, i);
f31e50a8 3309
fb6113e6
ED
3310 /* This loop will iterate twice for EQ0 in which
3311 * completions of the last RXQ (default one) are also processed
3312 * For other EQs the loop iterates only once
3313 */
3314 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
3315 work = be_process_rx(rxo, napi, budget);
3316 max_work = max(work, max_work);
10ef9ab4 3317 }
6b7c5b94 3318
10ef9ab4
SP
3319 if (is_mcc_eqo(eqo))
3320 be_process_mcc(adapter);
93c86700 3321
10ef9ab4 3322 if (max_work < budget) {
6ad20165 3323 napi_complete_done(napi, max_work);
20947770
PR
3324
3325 /* Skyhawk EQ_DB has a provision to set the rearm to interrupt
3326 * delay via a delay multiplier encoding value
3327 */
3328 if (skyhawk_chip(adapter))
3329 mult_enc = be_get_eq_delay_mult_enc(eqo);
3330
3331 be_eq_notify(adapter, eqo->q.id, true, false, num_evts,
3332 mult_enc);
10ef9ab4
SP
3333 } else {
3334 /* As we'll continue in polling mode, count and clear events */
20947770 3335 be_eq_notify(adapter, eqo->q.id, false, false, num_evts, 0);
93c86700 3336 }
10ef9ab4 3337 return max_work;
6b7c5b94
SP
3338}
3339
f67ef7ba 3340void be_detect_error(struct be_adapter *adapter)
7c185276 3341{
e1cfb67a
PR
3342 u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
3343 u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
eb0eecc1 3344 struct device *dev = &adapter->pdev->dev;
673c96e5
SR
3345 u16 val;
3346 u32 i;
7c185276 3347
954f6825 3348 if (be_check_error(adapter, BE_ERROR_HW))
72f02485
SP
3349 return;
3350
e1cfb67a
PR
3351 if (lancer_chip(adapter)) {
3352 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3353 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
954f6825 3354 be_set_error(adapter, BE_ERROR_UE);
e1cfb67a 3355 sliport_err1 = ioread32(adapter->db +
748b539a 3356 SLIPORT_ERROR1_OFFSET);
e1cfb67a 3357 sliport_err2 = ioread32(adapter->db +
748b539a 3358 SLIPORT_ERROR2_OFFSET);
eb0eecc1
SK
3359 /* Do not log error messages if its a FW reset */
3360 if (sliport_err1 == SLIPORT_ERROR_FW_RESET1 &&
3361 sliport_err2 == SLIPORT_ERROR_FW_RESET2) {
c1b3bdb2 3362 dev_info(dev, "Reset is in progress\n");
eb0eecc1 3363 } else {
eb0eecc1
SK
3364 dev_err(dev, "Error detected in the card\n");
3365 dev_err(dev, "ERR: sliport status 0x%x\n",
3366 sliport_status);
3367 dev_err(dev, "ERR: sliport error1 0x%x\n",
3368 sliport_err1);
3369 dev_err(dev, "ERR: sliport error2 0x%x\n",
3370 sliport_err2);
3371 }
e1cfb67a
PR
3372 }
3373 } else {
25848c90
SR
3374 ue_lo = ioread32(adapter->pcicfg + PCICFG_UE_STATUS_LOW);
3375 ue_hi = ioread32(adapter->pcicfg + PCICFG_UE_STATUS_HIGH);
3376 ue_lo_mask = ioread32(adapter->pcicfg +
3377 PCICFG_UE_STATUS_LOW_MASK);
3378 ue_hi_mask = ioread32(adapter->pcicfg +
3379 PCICFG_UE_STATUS_HI_MASK);
e1cfb67a 3380
f67ef7ba
PR
3381 ue_lo = (ue_lo & ~ue_lo_mask);
3382 ue_hi = (ue_hi & ~ue_hi_mask);
7c185276 3383
eb0eecc1 3384 if (ue_lo || ue_hi) {
673c96e5
SR
3385 /* On certain platforms BE3 hardware can indicate
3386 * spurious UEs. In case of a UE in the chip,
3387 * the POST register correctly reports either a
3388 * FAT_LOG_START state (FW is currently dumping
3389 * FAT log data) or a ARMFW_UE state. Check for the
3390 * above states to ascertain if the UE is valid or not.
3391 */
3392 if (BE3_chip(adapter)) {
3393 val = be_POST_stage_get(adapter);
3394 if ((val & POST_STAGE_FAT_LOG_START)
3395 != POST_STAGE_FAT_LOG_START &&
3396 (val & POST_STAGE_ARMFW_UE)
d2c2725c
SR
3397 != POST_STAGE_ARMFW_UE &&
3398 (val & POST_STAGE_RECOVERABLE_ERR)
3399 != POST_STAGE_RECOVERABLE_ERR)
673c96e5
SR
3400 return;
3401 }
3402
710f3e59 3403 dev_err(dev, "Error detected in the adapter");
673c96e5 3404 be_set_error(adapter, BE_ERROR_UE);
954f6825 3405
eb0eecc1
SK
3406 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
3407 if (ue_lo & 1)
3408 dev_err(dev, "UE: %s bit set\n",
3409 ue_status_low_desc[i]);
3410 }
3411 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
3412 if (ue_hi & 1)
3413 dev_err(dev, "UE: %s bit set\n",
3414 ue_status_hi_desc[i]);
3415 }
7c185276
AK
3416 }
3417 }
7c185276
AK
3418}
3419
8d56ff11
SP
3420static void be_msix_disable(struct be_adapter *adapter)
3421{
ac6a0c4a 3422 if (msix_enabled(adapter)) {
8d56ff11 3423 pci_disable_msix(adapter->pdev);
ac6a0c4a 3424 adapter->num_msix_vec = 0;
68d7bdcb 3425 adapter->num_msix_roce_vec = 0;
3abcdeda
SP
3426 }
3427}
3428
c2bba3df 3429static int be_msix_enable(struct be_adapter *adapter)
6b7c5b94 3430{
6fde0e63 3431 unsigned int i, max_roce_eqs;
d379142b 3432 struct device *dev = &adapter->pdev->dev;
6fde0e63 3433 int num_vec;
6b7c5b94 3434
ce7faf0a
SP
3435 /* If RoCE is supported, program the max number of vectors that
3436 * could be used for NIC and RoCE, else, just program the number
3437 * we'll use initially.
92bf14ab 3438 */
e261768e
SP
3439 if (be_roce_supported(adapter)) {
3440 max_roce_eqs =
3441 be_max_func_eqs(adapter) - be_max_nic_eqs(adapter);
3442 max_roce_eqs = min(max_roce_eqs, num_online_cpus());
3443 num_vec = be_max_any_irqs(adapter) + max_roce_eqs;
3444 } else {
3445 num_vec = max(adapter->cfg_num_rx_irqs,
3446 adapter->cfg_num_tx_irqs);
3447 }
3abcdeda 3448
ac6a0c4a 3449 for (i = 0; i < num_vec; i++)
6b7c5b94
SP
3450 adapter->msix_entries[i].entry = i;
3451
7dc4c064
AG
3452 num_vec = pci_enable_msix_range(adapter->pdev, adapter->msix_entries,
3453 MIN_MSIX_VECTORS, num_vec);
3454 if (num_vec < 0)
3455 goto fail;
92bf14ab 3456
92bf14ab
SP
3457 if (be_roce_supported(adapter) && num_vec > MIN_MSIX_VECTORS) {
3458 adapter->num_msix_roce_vec = num_vec / 2;
3459 dev_info(dev, "enabled %d MSI-x vector(s) for RoCE\n",
3460 adapter->num_msix_roce_vec);
3461 }
3462
3463 adapter->num_msix_vec = num_vec - adapter->num_msix_roce_vec;
3464
3465 dev_info(dev, "enabled %d MSI-x vector(s) for NIC\n",
3466 adapter->num_msix_vec);
c2bba3df 3467 return 0;
7dc4c064
AG
3468
3469fail:
3470 dev_warn(dev, "MSIx enable failed\n");
3471
3472 /* INTx is not supported in VFs, so fail probe if enable_msix fails */
18c57c74 3473 if (be_virtfn(adapter))
7dc4c064
AG
3474 return num_vec;
3475 return 0;
6b7c5b94
SP
3476}
3477
fe6d2a38 3478static inline int be_msix_vec_get(struct be_adapter *adapter,
748b539a 3479 struct be_eq_obj *eqo)
b628bde2 3480{
f2f781a7 3481 return adapter->msix_entries[eqo->msix_idx].vector;
b628bde2 3482}
6b7c5b94 3483
b628bde2
SP
3484static int be_msix_register(struct be_adapter *adapter)
3485{
10ef9ab4
SP
3486 struct net_device *netdev = adapter->netdev;
3487 struct be_eq_obj *eqo;
3488 int status, i, vec;
6b7c5b94 3489
10ef9ab4 3490 for_all_evt_queues(adapter, eqo, i) {
d6d9704a
IV
3491 char irq_name[IFNAMSIZ+4];
3492
3493 snprintf(irq_name, sizeof(irq_name), "%s-q%d", netdev->name, i);
10ef9ab4 3494 vec = be_msix_vec_get(adapter, eqo);
d6d9704a 3495 status = request_irq(vec, be_msix, 0, irq_name, eqo);
3abcdeda
SP
3496 if (status)
3497 goto err_msix;
d658d98a
PR
3498
3499 irq_set_affinity_hint(vec, eqo->affinity_mask);
3abcdeda 3500 }
b628bde2 3501
6b7c5b94 3502 return 0;
3abcdeda 3503err_msix:
6e3cd5fa
VD
3504 for (i--; i >= 0; i--) {
3505 eqo = &adapter->eq_obj[i];
10ef9ab4 3506 free_irq(be_msix_vec_get(adapter, eqo), eqo);
6e3cd5fa 3507 }
10ef9ab4 3508 dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
748b539a 3509 status);
ac6a0c4a 3510 be_msix_disable(adapter);
6b7c5b94
SP
3511 return status;
3512}
3513
3514static int be_irq_register(struct be_adapter *adapter)
3515{
3516 struct net_device *netdev = adapter->netdev;
3517 int status;
3518
ac6a0c4a 3519 if (msix_enabled(adapter)) {
6b7c5b94
SP
3520 status = be_msix_register(adapter);
3521 if (status == 0)
3522 goto done;
ba343c77 3523 /* INTx is not supported for VF */
18c57c74 3524 if (be_virtfn(adapter))
ba343c77 3525 return status;
6b7c5b94
SP
3526 }
3527
e49cc34f 3528 /* INTx: only the first EQ is used */
6b7c5b94
SP
3529 netdev->irq = adapter->pdev->irq;
3530 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
e49cc34f 3531 &adapter->eq_obj[0]);
6b7c5b94
SP
3532 if (status) {
3533 dev_err(&adapter->pdev->dev,
3534 "INTx request IRQ failed - err %d\n", status);
3535 return status;
3536 }
3537done:
3538 adapter->isr_registered = true;
3539 return 0;
3540}
3541
3542static void be_irq_unregister(struct be_adapter *adapter)
3543{
3544 struct net_device *netdev = adapter->netdev;
10ef9ab4 3545 struct be_eq_obj *eqo;
d658d98a 3546 int i, vec;
6b7c5b94
SP
3547
3548 if (!adapter->isr_registered)
3549 return;
3550
3551 /* INTx */
ac6a0c4a 3552 if (!msix_enabled(adapter)) {
e49cc34f 3553 free_irq(netdev->irq, &adapter->eq_obj[0]);
6b7c5b94
SP
3554 goto done;
3555 }
3556
3557 /* MSIx */
d658d98a
PR
3558 for_all_evt_queues(adapter, eqo, i) {
3559 vec = be_msix_vec_get(adapter, eqo);
3560 irq_set_affinity_hint(vec, NULL);
3561 free_irq(vec, eqo);
3562 }
3abcdeda 3563
6b7c5b94
SP
3564done:
3565 adapter->isr_registered = false;
6b7c5b94
SP
3566}
3567
10ef9ab4 3568static void be_rx_qs_destroy(struct be_adapter *adapter)
482c9e79 3569{
62219066 3570 struct rss_info *rss = &adapter->rss_info;
482c9e79
SP
3571 struct be_queue_info *q;
3572 struct be_rx_obj *rxo;
3573 int i;
3574
3575 for_all_rx_queues(adapter, rxo, i) {
3576 q = &rxo->q;
3577 if (q->created) {
99b44304
KA
3578 /* If RXQs are destroyed while in an "out of buffer"
3579 * state, there is a possibility of an HW stall on
3580 * Lancer. So, post 64 buffers to each queue to relieve
3581 * the "out of buffer" condition.
3582 * Make sure there's space in the RXQ before posting.
3583 */
3584 if (lancer_chip(adapter)) {
3585 be_rx_cq_clean(rxo);
3586 if (atomic_read(&q->used) == 0)
3587 be_post_rx_frags(rxo, GFP_KERNEL,
3588 MAX_RX_POST);
3589 }
3590
482c9e79 3591 be_cmd_rxq_destroy(adapter, q);
10ef9ab4 3592 be_rx_cq_clean(rxo);
99b44304 3593 be_rxq_clean(rxo);
482c9e79 3594 }
10ef9ab4 3595 be_queue_free(adapter, q);
482c9e79 3596 }
62219066
AK
3597
3598 if (rss->rss_flags) {
3599 rss->rss_flags = RSS_ENABLE_NONE;
3600 be_cmd_rss_config(adapter, rss->rsstable, rss->rss_flags,
3601 128, rss->rss_hkey);
3602 }
482c9e79
SP
3603}
3604
bcc84140
KA
3605static void be_disable_if_filters(struct be_adapter *adapter)
3606{
6d928ae5
IV
3607 /* Don't delete MAC on BE3 VFs without FILTMGMT privilege */
3608 if (!BEx_chip(adapter) || !be_virtfn(adapter) ||
4993b39a 3609 check_privilege(adapter, BE_PRIV_FILTMGMT)) {
6d928ae5 3610 be_dev_mac_del(adapter, adapter->pmac_id[0]);
4993b39a
IV
3611 eth_zero_addr(adapter->dev_mac);
3612 }
6d928ae5 3613
bcc84140 3614 be_clear_uc_list(adapter);
92fbb1df 3615 be_clear_mc_list(adapter);
bcc84140
KA
3616
3617 /* The IFACE flags are enabled in the open path and cleared
3618 * in the close path. When a VF gets detached from the host and
3619 * assigned to a VM the following happens:
3620 * - VF's IFACE flags get cleared in the detach path
3621 * - IFACE create is issued by the VF in the attach path
3622 * Due to a bug in the BE3/Skyhawk-R FW
3623 * (Lancer FW doesn't have the bug), the IFACE capability flags
3624 * specified along with the IFACE create cmd issued by a VF are not
3625 * honoured by FW. As a consequence, if a *new* driver
3626 * (that enables/disables IFACE flags in open/close)
3627 * is loaded in the host and an *old* driver is * used by a VM/VF,
3628 * the IFACE gets created *without* the needed flags.
3629 * To avoid this, disable RX-filter flags only for Lancer.
3630 */
3631 if (lancer_chip(adapter)) {
3632 be_cmd_rx_filter(adapter, BE_IF_ALL_FILT_FLAGS, OFF);
3633 adapter->if_flags &= ~BE_IF_ALL_FILT_FLAGS;
3634 }
3635}
3636
889cd4b2
SP
3637static int be_close(struct net_device *netdev)
3638{
3639 struct be_adapter *adapter = netdev_priv(netdev);
10ef9ab4
SP
3640 struct be_eq_obj *eqo;
3641 int i;
889cd4b2 3642
e1ad8e33
KA
3643 /* This protection is needed as be_close() may be called even when the
3644 * adapter is in cleared state (after eeh perm failure)
3645 */
3646 if (!(adapter->flags & BE_FLAGS_SETUP_DONE))
3647 return 0;
3648
b7172414
SP
3649 /* Before attempting cleanup ensure all the pending cmds in the
3650 * config_wq have finished execution
3651 */
3652 flush_workqueue(be_wq);
3653
bcc84140
KA
3654 be_disable_if_filters(adapter);
3655
dff345c5
IV
3656 if (adapter->flags & BE_FLAGS_NAPI_ENABLED) {
3657 for_all_evt_queues(adapter, eqo, i) {
04d3d624 3658 napi_disable(&eqo->napi);
6384a4d0 3659 }
71237b6f 3660 adapter->flags &= ~BE_FLAGS_NAPI_ENABLED;
04d3d624 3661 }
a323d9bf
SP
3662
3663 be_async_mcc_disable(adapter);
3664
3665 /* Wait for all pending tx completions to arrive so that
3666 * all tx skbs are freed.
3667 */
fba87559 3668 netif_tx_disable(netdev);
6e1f9975 3669 be_tx_compl_clean(adapter);
a323d9bf
SP
3670
3671 be_rx_qs_destroy(adapter);
d11a347d 3672
a323d9bf 3673 for_all_evt_queues(adapter, eqo, i) {
10ef9ab4
SP
3674 if (msix_enabled(adapter))
3675 synchronize_irq(be_msix_vec_get(adapter, eqo));
3676 else
3677 synchronize_irq(netdev->irq);
3678 be_eq_clean(eqo);
63fcb27f
PR
3679 }
3680
889cd4b2
SP
3681 be_irq_unregister(adapter);
3682
482c9e79
SP
3683 return 0;
3684}
3685
10ef9ab4 3686static int be_rx_qs_create(struct be_adapter *adapter)
482c9e79 3687{
1dcf7b1c
ED
3688 struct rss_info *rss = &adapter->rss_info;
3689 u8 rss_key[RSS_HASH_KEY_LEN];
482c9e79 3690 struct be_rx_obj *rxo;
e9008ee9 3691 int rc, i, j;
482c9e79
SP
3692
3693 for_all_rx_queues(adapter, rxo, i) {
10ef9ab4
SP
3694 rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
3695 sizeof(struct be_eth_rx_d));
3696 if (rc)
3697 return rc;
3698 }
3699
71bb8bd0
VV
3700 if (adapter->need_def_rxq || !adapter->num_rss_qs) {
3701 rxo = default_rxo(adapter);
3702 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
3703 rx_frag_size, adapter->if_handle,
3704 false, &rxo->rss_id);
3705 if (rc)
3706 return rc;
3707 }
10ef9ab4
SP
3708
3709 for_all_rss_queues(adapter, rxo, i) {
482c9e79 3710 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
10ef9ab4
SP
3711 rx_frag_size, adapter->if_handle,
3712 true, &rxo->rss_id);
482c9e79
SP
3713 if (rc)
3714 return rc;
3715 }
3716
3717 if (be_multi_rxq(adapter)) {
71bb8bd0 3718 for (j = 0; j < RSS_INDIR_TABLE_LEN; j += adapter->num_rss_qs) {
e9008ee9 3719 for_all_rss_queues(adapter, rxo, i) {
e2557877 3720 if ((j + i) >= RSS_INDIR_TABLE_LEN)
e9008ee9 3721 break;
e2557877
VD
3722 rss->rsstable[j + i] = rxo->rss_id;
3723 rss->rss_queue[j + i] = i;
e9008ee9
PR
3724 }
3725 }
e2557877
VD
3726 rss->rss_flags = RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_IPV4 |
3727 RSS_ENABLE_TCP_IPV6 | RSS_ENABLE_IPV6;
594ad54a
SR
3728
3729 if (!BEx_chip(adapter))
e2557877
VD
3730 rss->rss_flags |= RSS_ENABLE_UDP_IPV4 |
3731 RSS_ENABLE_UDP_IPV6;
62219066
AK
3732
3733 netdev_rss_key_fill(rss_key, RSS_HASH_KEY_LEN);
3734 rc = be_cmd_rss_config(adapter, rss->rsstable, rss->rss_flags,
3735 RSS_INDIR_TABLE_LEN, rss_key);
3736 if (rc) {
3737 rss->rss_flags = RSS_ENABLE_NONE;
3738 return rc;
3739 }
3740
3741 memcpy(rss->rss_hkey, rss_key, RSS_HASH_KEY_LEN);
da1388d6
VV
3742 } else {
3743 /* Disable RSS, if only default RX Q is created */
e2557877 3744 rss->rss_flags = RSS_ENABLE_NONE;
da1388d6 3745 }
594ad54a 3746
e2557877 3747
b02e60c8
SR
3748 /* Post 1 less than RXQ-len to avoid head being equal to tail,
3749 * which is a queue empty condition
3750 */
10ef9ab4 3751 for_all_rx_queues(adapter, rxo, i)
b02e60c8
SR
3752 be_post_rx_frags(rxo, GFP_KERNEL, RX_Q_LEN - 1);
3753
889cd4b2
SP
3754 return 0;
3755}
3756
bcc84140
KA
3757static int be_enable_if_filters(struct be_adapter *adapter)
3758{
3759 int status;
3760
c1bb0a55 3761 status = be_cmd_rx_filter(adapter, BE_IF_FILT_FLAGS_BASIC, ON);
bcc84140
KA
3762 if (status)
3763 return status;
3764
4993b39a
IV
3765 /* Normally this condition usually true as the ->dev_mac is zeroed.
3766 * But on BE3 VFs the initial MAC is pre-programmed by PF and
3767 * subsequent be_dev_mac_add() can fail (after fresh boot)
3768 */
3769 if (!ether_addr_equal(adapter->dev_mac, adapter->netdev->dev_addr)) {
3770 int old_pmac_id = -1;
3771
3772 /* Remember old programmed MAC if any - can happen on BE3 VF */
3773 if (!is_zero_ether_addr(adapter->dev_mac))
3774 old_pmac_id = adapter->pmac_id[0];
3775
988d44b1 3776 status = be_dev_mac_add(adapter, adapter->netdev->dev_addr);
bcc84140
KA
3777 if (status)
3778 return status;
4993b39a
IV
3779
3780 /* Delete the old programmed MAC as we successfully programmed
3781 * a new MAC
3782 */
3783 if (old_pmac_id >= 0 && old_pmac_id != adapter->pmac_id[0])
3784 be_dev_mac_del(adapter, old_pmac_id);
3785
c27ebf58 3786 ether_addr_copy(adapter->dev_mac, adapter->netdev->dev_addr);
bcc84140
KA
3787 }
3788
3789 if (adapter->vlans_added)
3790 be_vid_config(adapter);
3791
b7172414 3792 __be_set_rx_mode(adapter);
bcc84140
KA
3793
3794 return 0;
3795}
3796
6b7c5b94
SP
3797static int be_open(struct net_device *netdev)
3798{
3799 struct be_adapter *adapter = netdev_priv(netdev);
10ef9ab4 3800 struct be_eq_obj *eqo;
3abcdeda 3801 struct be_rx_obj *rxo;
10ef9ab4 3802 struct be_tx_obj *txo;
b236916a 3803 u8 link_status;
3abcdeda 3804 int status, i;
5fb379ee 3805
10ef9ab4 3806 status = be_rx_qs_create(adapter);
482c9e79
SP
3807 if (status)
3808 goto err;
3809
bcc84140
KA
3810 status = be_enable_if_filters(adapter);
3811 if (status)
3812 goto err;
3813
c2bba3df
SK
3814 status = be_irq_register(adapter);
3815 if (status)
3816 goto err;
5fb379ee 3817
10ef9ab4 3818 for_all_rx_queues(adapter, rxo, i)
3abcdeda 3819 be_cq_notify(adapter, rxo->cq.id, true, 0);
5fb379ee 3820
10ef9ab4
SP
3821 for_all_tx_queues(adapter, txo, i)
3822 be_cq_notify(adapter, txo->cq.id, true, 0);
3823
7a1e9b20
SP
3824 be_async_mcc_enable(adapter);
3825
10ef9ab4
SP
3826 for_all_evt_queues(adapter, eqo, i) {
3827 napi_enable(&eqo->napi);
20947770 3828 be_eq_notify(adapter, eqo->q.id, true, true, 0, 0);
10ef9ab4 3829 }
04d3d624 3830 adapter->flags |= BE_FLAGS_NAPI_ENABLED;
10ef9ab4 3831
323ff71e 3832 status = be_cmd_link_status_query(adapter, NULL, &link_status, 0);
b236916a
AK
3833 if (!status)
3834 be_link_status_update(adapter, link_status);
3835
fba87559 3836 netif_tx_start_all_queues(netdev);
c9c47142 3837 if (skyhawk_chip(adapter))
bde6b7cd 3838 udp_tunnel_get_rx_info(netdev);
c5abe7c0 3839
889cd4b2
SP
3840 return 0;
3841err:
3842 be_close(adapter->netdev);
3843 return -EIO;
5fb379ee
SP
3844}
3845
f7062ee5
SP
3846static void be_vf_eth_addr_generate(struct be_adapter *adapter, u8 *mac)
3847{
3848 u32 addr;
3849
3850 addr = jhash(adapter->netdev->dev_addr, ETH_ALEN, 0);
3851
3852 mac[5] = (u8)(addr & 0xFF);
3853 mac[4] = (u8)((addr >> 8) & 0xFF);
3854 mac[3] = (u8)((addr >> 16) & 0xFF);
3855 /* Use the OUI from the current MAC address */
3856 memcpy(mac, adapter->netdev->dev_addr, 3);
3857}
3858
6d87f5c3
AK
3859/*
3860 * Generate a seed MAC address from the PF MAC Address using jhash.
3861 * MAC Address for VFs are assigned incrementally starting from the seed.
3862 * These addresses are programmed in the ASIC by the PF and the VF driver
3863 * queries for the MAC address during its probe.
3864 */
4c876616 3865static int be_vf_eth_addr_config(struct be_adapter *adapter)
6d87f5c3 3866{
f9449ab7 3867 u32 vf;
3abcdeda 3868 int status = 0;
6d87f5c3 3869 u8 mac[ETH_ALEN];
11ac75ed 3870 struct be_vf_cfg *vf_cfg;
6d87f5c3
AK
3871
3872 be_vf_eth_addr_generate(adapter, mac);
3873
11ac75ed 3874 for_all_vfs(adapter, vf_cfg, vf) {
3175d8c2 3875 if (BEx_chip(adapter))
590c391d 3876 status = be_cmd_pmac_add(adapter, mac,
11ac75ed
SP
3877 vf_cfg->if_handle,
3878 &vf_cfg->pmac_id, vf + 1);
3175d8c2
SP
3879 else
3880 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
3881 vf + 1);
590c391d 3882
6d87f5c3
AK
3883 if (status)
3884 dev_err(&adapter->pdev->dev,
748b539a
SP
3885 "Mac address assignment failed for VF %d\n",
3886 vf);
6d87f5c3 3887 else
11ac75ed 3888 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
6d87f5c3
AK
3889
3890 mac[5] += 1;
3891 }
3892 return status;
3893}
3894
4c876616
SP
3895static int be_vfs_mac_query(struct be_adapter *adapter)
3896{
3897 int status, vf;
3898 u8 mac[ETH_ALEN];
3899 struct be_vf_cfg *vf_cfg;
4c876616
SP
3900
3901 for_all_vfs(adapter, vf_cfg, vf) {
b188f090
SR
3902 status = be_cmd_get_active_mac(adapter, vf_cfg->pmac_id,
3903 mac, vf_cfg->if_handle,
3904 false, vf+1);
4c876616
SP
3905 if (status)
3906 return status;
3907 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
3908 }
3909 return 0;
3910}
3911
f9449ab7 3912static void be_vf_clear(struct be_adapter *adapter)
6d87f5c3 3913{
11ac75ed 3914 struct be_vf_cfg *vf_cfg;
6d87f5c3
AK
3915 u32 vf;
3916
257a3feb 3917 if (pci_vfs_assigned(adapter->pdev)) {
4c876616
SP
3918 dev_warn(&adapter->pdev->dev,
3919 "VFs are assigned to VMs: not disabling VFs\n");
39f1d94d
SP
3920 goto done;
3921 }
3922
b4c1df93
SP
3923 pci_disable_sriov(adapter->pdev);
3924
11ac75ed 3925 for_all_vfs(adapter, vf_cfg, vf) {
3175d8c2 3926 if (BEx_chip(adapter))
11ac75ed
SP
3927 be_cmd_pmac_del(adapter, vf_cfg->if_handle,
3928 vf_cfg->pmac_id, vf + 1);
3175d8c2
SP
3929 else
3930 be_cmd_set_mac(adapter, NULL, vf_cfg->if_handle,
3931 vf + 1);
f9449ab7 3932
11ac75ed
SP
3933 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
3934 }
884476be
SK
3935
3936 if (BE3_chip(adapter))
3937 be_cmd_set_hsw_config(adapter, 0, 0,
3938 adapter->if_handle,
3939 PORT_FWD_TYPE_PASSTHRU, 0);
39f1d94d
SP
3940done:
3941 kfree(adapter->vf_cfg);
3942 adapter->num_vfs = 0;
f174c7ec 3943 adapter->flags &= ~BE_FLAGS_SRIOV_ENABLED;
6d87f5c3
AK
3944}
3945
7707133c
SP
3946static void be_clear_queues(struct be_adapter *adapter)
3947{
3948 be_mcc_queues_destroy(adapter);
3949 be_rx_cqs_destroy(adapter);
3950 be_tx_queues_destroy(adapter);
3951 be_evt_queues_destroy(adapter);
3952}
3953
68d7bdcb 3954static void be_cancel_worker(struct be_adapter *adapter)
a54769f5 3955{
191eb756
SP
3956 if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) {
3957 cancel_delayed_work_sync(&adapter->work);
3958 adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED;
3959 }
68d7bdcb
SP
3960}
3961
eb7dd46c
SP
3962static void be_cancel_err_detection(struct be_adapter *adapter)
3963{
710f3e59
SB
3964 struct be_error_recovery *err_rec = &adapter->error_recovery;
3965
3966 if (!be_err_recovery_workq)
3967 return;
3968
eb7dd46c 3969 if (adapter->flags & BE_FLAGS_ERR_DETECTION_SCHEDULED) {
710f3e59 3970 cancel_delayed_work_sync(&err_rec->err_detection_work);
eb7dd46c
SP
3971 adapter->flags &= ~BE_FLAGS_ERR_DETECTION_SCHEDULED;
3972 }
3973}
3974
bf8d9dfb
SB
3975static int be_enable_vxlan_offloads(struct be_adapter *adapter)
3976{
3977 struct net_device *netdev = adapter->netdev;
3978 struct device *dev = &adapter->pdev->dev;
3979 struct be_vxlan_port *vxlan_port;
3980 __be16 port;
3981 int status;
3982
3983 vxlan_port = list_first_entry(&adapter->vxlan_port_list,
3984 struct be_vxlan_port, list);
3985 port = vxlan_port->port;
3986
3987 status = be_cmd_manage_iface(adapter, adapter->if_handle,
3988 OP_CONVERT_NORMAL_TO_TUNNEL);
3989 if (status) {
3990 dev_warn(dev, "Failed to convert normal interface to tunnel\n");
3991 return status;
3992 }
3993 adapter->flags |= BE_FLAGS_VXLAN_OFFLOADS;
3994
3995 status = be_cmd_set_vxlan_port(adapter, port);
3996 if (status) {
3997 dev_warn(dev, "Failed to add VxLAN port\n");
3998 return status;
3999 }
4000 adapter->vxlan_port = port;
4001
4002 netdev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
4003 NETIF_F_TSO | NETIF_F_TSO6 |
4004 NETIF_F_GSO_UDP_TUNNEL;
bf8d9dfb
SB
4005
4006 dev_info(dev, "Enabled VxLAN offloads for UDP port %d\n",
4007 be16_to_cpu(port));
4008 return 0;
4009}
4010
c9c47142
SP
4011static void be_disable_vxlan_offloads(struct be_adapter *adapter)
4012{
630f4b70
SB
4013 struct net_device *netdev = adapter->netdev;
4014
c9c47142
SP
4015 if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS)
4016 be_cmd_manage_iface(adapter, adapter->if_handle,
4017 OP_CONVERT_TUNNEL_TO_NORMAL);
4018
4019 if (adapter->vxlan_port)
4020 be_cmd_set_vxlan_port(adapter, 0);
4021
4022 adapter->flags &= ~BE_FLAGS_VXLAN_OFFLOADS;
4023 adapter->vxlan_port = 0;
630f4b70
SB
4024
4025 netdev->hw_enc_features = 0;
c9c47142
SP
4026}
4027
b9263cbf
SR
4028static void be_calculate_vf_res(struct be_adapter *adapter, u16 num_vfs,
4029 struct be_resources *vft_res)
f2858738
VV
4030{
4031 struct be_resources res = adapter->pool_res;
b9263cbf
SR
4032 u32 vf_if_cap_flags = res.vf_if_cap_flags;
4033 struct be_resources res_mod = {0};
f2858738
VV
4034 u16 num_vf_qs = 1;
4035
de2b1e03
SK
4036 /* Distribute the queue resources among the PF and it's VFs */
4037 if (num_vfs) {
4038 /* Divide the rx queues evenly among the VFs and the PF, capped
4039 * at VF-EQ-count. Any remainder queues belong to the PF.
4040 */
ee9ad280
SB
4041 num_vf_qs = min(SH_VF_MAX_NIC_EQS,
4042 res.max_rss_qs / (num_vfs + 1));
f2858738 4043
de2b1e03
SK
4044 /* Skyhawk-R chip supports only MAX_PORT_RSS_TABLES
4045 * RSS Tables per port. Provide RSS on VFs, only if number of
4046 * VFs requested is less than it's PF Pool's RSS Tables limit.
f2858738 4047 */
de2b1e03 4048 if (num_vfs >= be_max_pf_pool_rss_tables(adapter))
f2858738
VV
4049 num_vf_qs = 1;
4050 }
b9263cbf
SR
4051
4052 /* Resource with fields set to all '1's by GET_PROFILE_CONFIG cmd,
4053 * which are modifiable using SET_PROFILE_CONFIG cmd.
4054 */
de2b1e03
SK
4055 be_cmd_get_profile_config(adapter, &res_mod, NULL, ACTIVE_PROFILE_TYPE,
4056 RESOURCE_MODIFIABLE, 0);
b9263cbf
SR
4057
4058 /* If RSS IFACE capability flags are modifiable for a VF, set the
4059 * capability flag as valid and set RSS and DEFQ_RSS IFACE flags if
4060 * more than 1 RSSQ is available for a VF.
4061 * Otherwise, provision only 1 queue pair for VF.
4062 */
4063 if (res_mod.vf_if_cap_flags & BE_IF_FLAGS_RSS) {
4064 vft_res->flags |= BIT(IF_CAPS_FLAGS_VALID_SHIFT);
4065 if (num_vf_qs > 1) {
4066 vf_if_cap_flags |= BE_IF_FLAGS_RSS;
4067 if (res.if_cap_flags & BE_IF_FLAGS_DEFQ_RSS)
4068 vf_if_cap_flags |= BE_IF_FLAGS_DEFQ_RSS;
4069 } else {
4070 vf_if_cap_flags &= ~(BE_IF_FLAGS_RSS |
4071 BE_IF_FLAGS_DEFQ_RSS);
4072 }
4073 } else {
4074 num_vf_qs = 1;
4075 }
4076
4077 if (res_mod.vf_if_cap_flags & BE_IF_FLAGS_VLAN_PROMISCUOUS) {
4078 vft_res->flags |= BIT(IF_CAPS_FLAGS_VALID_SHIFT);
4079 vf_if_cap_flags &= ~BE_IF_FLAGS_VLAN_PROMISCUOUS;
4080 }
4081
4082 vft_res->vf_if_cap_flags = vf_if_cap_flags;
4083 vft_res->max_rx_qs = num_vf_qs;
4084 vft_res->max_rss_qs = num_vf_qs;
4085 vft_res->max_tx_qs = res.max_tx_qs / (num_vfs + 1);
4086 vft_res->max_cq_count = res.max_cq_count / (num_vfs + 1);
4087
4088 /* Distribute unicast MACs, VLANs, IFACE count and MCCQ count equally
4089 * among the PF and it's VFs, if the fields are changeable
4090 */
4091 if (res_mod.max_uc_mac == FIELD_MODIFIABLE)
4092 vft_res->max_uc_mac = res.max_uc_mac / (num_vfs + 1);
4093
4094 if (res_mod.max_vlans == FIELD_MODIFIABLE)
4095 vft_res->max_vlans = res.max_vlans / (num_vfs + 1);
4096
4097 if (res_mod.max_iface_count == FIELD_MODIFIABLE)
4098 vft_res->max_iface_count = res.max_iface_count / (num_vfs + 1);
4099
4100 if (res_mod.max_mcc_count == FIELD_MODIFIABLE)
4101 vft_res->max_mcc_count = res.max_mcc_count / (num_vfs + 1);
f2858738
VV
4102}
4103
b7172414
SP
4104static void be_if_destroy(struct be_adapter *adapter)
4105{
4106 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
4107
4108 kfree(adapter->pmac_id);
4109 adapter->pmac_id = NULL;
4110
4111 kfree(adapter->mc_list);
4112 adapter->mc_list = NULL;
4113
4114 kfree(adapter->uc_list);
4115 adapter->uc_list = NULL;
4116}
4117
b05004ad
SK
4118static int be_clear(struct be_adapter *adapter)
4119{
f2858738 4120 struct pci_dev *pdev = adapter->pdev;
b9263cbf 4121 struct be_resources vft_res = {0};
f2858738 4122
68d7bdcb 4123 be_cancel_worker(adapter);
191eb756 4124
b7172414
SP
4125 flush_workqueue(be_wq);
4126
11ac75ed 4127 if (sriov_enabled(adapter))
f9449ab7
SP
4128 be_vf_clear(adapter);
4129
bec84e6b
VV
4130 /* Re-configure FW to distribute resources evenly across max-supported
4131 * number of VFs, only when VFs are not already enabled.
4132 */
ace40aff
VV
4133 if (skyhawk_chip(adapter) && be_physfn(adapter) &&
4134 !pci_vfs_assigned(pdev)) {
b9263cbf
SR
4135 be_calculate_vf_res(adapter,
4136 pci_sriov_get_totalvfs(pdev),
4137 &vft_res);
bec84e6b 4138 be_cmd_set_sriov_config(adapter, adapter->pool_res,
f2858738 4139 pci_sriov_get_totalvfs(pdev),
b9263cbf 4140 &vft_res);
f2858738 4141 }
bec84e6b 4142
c9c47142 4143 be_disable_vxlan_offloads(adapter);
fbc13f01 4144
b7172414 4145 be_if_destroy(adapter);
a54769f5 4146
7707133c 4147 be_clear_queues(adapter);
a54769f5 4148
10ef9ab4 4149 be_msix_disable(adapter);
e1ad8e33 4150 adapter->flags &= ~BE_FLAGS_SETUP_DONE;
a54769f5
SP
4151 return 0;
4152}
4153
4c876616 4154static int be_vfs_if_create(struct be_adapter *adapter)
abb93951 4155{
92bf14ab 4156 struct be_resources res = {0};
bcc84140 4157 u32 cap_flags, en_flags, vf;
4c876616 4158 struct be_vf_cfg *vf_cfg;
0700d816 4159 int status;
abb93951 4160
0700d816 4161 /* If a FW profile exists, then cap_flags are updated */
c1bb0a55 4162 cap_flags = BE_VF_IF_EN_FLAGS;
abb93951 4163
4c876616 4164 for_all_vfs(adapter, vf_cfg, vf) {
92bf14ab 4165 if (!BE3_chip(adapter)) {
de2b1e03
SK
4166 status = be_cmd_get_profile_config(adapter, &res, NULL,
4167 ACTIVE_PROFILE_TYPE,
f2858738 4168 RESOURCE_LIMITS,
92bf14ab 4169 vf + 1);
435452aa 4170 if (!status) {
92bf14ab 4171 cap_flags = res.if_cap_flags;
435452aa
VV
4172 /* Prevent VFs from enabling VLAN promiscuous
4173 * mode
4174 */
4175 cap_flags &= ~BE_IF_FLAGS_VLAN_PROMISCUOUS;
4176 }
92bf14ab 4177 }
4c876616 4178
c1bb0a55
VD
4179 /* PF should enable IF flags during proxy if_create call */
4180 en_flags = cap_flags & BE_VF_IF_EN_FLAGS;
bcc84140
KA
4181 status = be_cmd_if_create(adapter, cap_flags, en_flags,
4182 &vf_cfg->if_handle, vf + 1);
4c876616 4183 if (status)
0700d816 4184 return status;
4c876616 4185 }
0700d816
KA
4186
4187 return 0;
abb93951
PR
4188}
4189
39f1d94d 4190static int be_vf_setup_init(struct be_adapter *adapter)
30128031 4191{
11ac75ed 4192 struct be_vf_cfg *vf_cfg;
30128031
SP
4193 int vf;
4194
39f1d94d
SP
4195 adapter->vf_cfg = kcalloc(adapter->num_vfs, sizeof(*vf_cfg),
4196 GFP_KERNEL);
4197 if (!adapter->vf_cfg)
4198 return -ENOMEM;
4199
11ac75ed
SP
4200 for_all_vfs(adapter, vf_cfg, vf) {
4201 vf_cfg->if_handle = -1;
4202 vf_cfg->pmac_id = -1;
30128031 4203 }
39f1d94d 4204 return 0;
30128031
SP
4205}
4206
f9449ab7
SP
4207static int be_vf_setup(struct be_adapter *adapter)
4208{
c502224e 4209 struct device *dev = &adapter->pdev->dev;
11ac75ed 4210 struct be_vf_cfg *vf_cfg;
4c876616 4211 int status, old_vfs, vf;
e7bcbd7b 4212 bool spoofchk;
39f1d94d 4213
257a3feb 4214 old_vfs = pci_num_vf(adapter->pdev);
39f1d94d
SP
4215
4216 status = be_vf_setup_init(adapter);
4217 if (status)
4218 goto err;
30128031 4219
4c876616
SP
4220 if (old_vfs) {
4221 for_all_vfs(adapter, vf_cfg, vf) {
4222 status = be_cmd_get_if_id(adapter, vf_cfg, vf);
4223 if (status)
4224 goto err;
4225 }
f9449ab7 4226
4c876616
SP
4227 status = be_vfs_mac_query(adapter);
4228 if (status)
4229 goto err;
4230 } else {
bec84e6b
VV
4231 status = be_vfs_if_create(adapter);
4232 if (status)
4233 goto err;
4234
39f1d94d
SP
4235 status = be_vf_eth_addr_config(adapter);
4236 if (status)
4237 goto err;
4238 }
f9449ab7 4239
11ac75ed 4240 for_all_vfs(adapter, vf_cfg, vf) {
04a06028 4241 /* Allow VFs to programs MAC/VLAN filters */
435452aa
VV
4242 status = be_cmd_get_fn_privileges(adapter, &vf_cfg->privileges,
4243 vf + 1);
4244 if (!status && !(vf_cfg->privileges & BE_PRIV_FILTMGMT)) {
04a06028 4245 status = be_cmd_set_fn_privileges(adapter,
435452aa 4246 vf_cfg->privileges |
04a06028
SP
4247 BE_PRIV_FILTMGMT,
4248 vf + 1);
435452aa
VV
4249 if (!status) {
4250 vf_cfg->privileges |= BE_PRIV_FILTMGMT;
04a06028
SP
4251 dev_info(dev, "VF%d has FILTMGMT privilege\n",
4252 vf);
435452aa 4253 }
04a06028
SP
4254 }
4255
0f77ba73
RN
4256 /* Allow full available bandwidth */
4257 if (!old_vfs)
4258 be_cmd_config_qos(adapter, 0, 0, vf + 1);
f1f3ee1b 4259
e7bcbd7b
KA
4260 status = be_cmd_get_hsw_config(adapter, NULL, vf + 1,
4261 vf_cfg->if_handle, NULL,
4262 &spoofchk);
4263 if (!status)
4264 vf_cfg->spoofchk = spoofchk;
4265
bdce2ad7 4266 if (!old_vfs) {
0599863d 4267 be_cmd_enable_vf(adapter, vf + 1);
bdce2ad7
SR
4268 be_cmd_set_logical_link_config(adapter,
4269 IFLA_VF_LINK_STATE_AUTO,
4270 vf+1);
4271 }
f9449ab7 4272 }
b4c1df93
SP
4273
4274 if (!old_vfs) {
4275 status = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
4276 if (status) {
4277 dev_err(dev, "SRIOV enable failed\n");
4278 adapter->num_vfs = 0;
4279 goto err;
4280 }
4281 }
f174c7ec 4282
884476be
SK
4283 if (BE3_chip(adapter)) {
4284 /* On BE3, enable VEB only when SRIOV is enabled */
4285 status = be_cmd_set_hsw_config(adapter, 0, 0,
4286 adapter->if_handle,
4287 PORT_FWD_TYPE_VEB, 0);
4288 if (status)
4289 goto err;
4290 }
4291
f174c7ec 4292 adapter->flags |= BE_FLAGS_SRIOV_ENABLED;
f9449ab7
SP
4293 return 0;
4294err:
4c876616
SP
4295 dev_err(dev, "VF setup failed\n");
4296 be_vf_clear(adapter);
f9449ab7
SP
4297 return status;
4298}
4299
f93f160b
VV
4300/* Converting function_mode bits on BE3 to SH mc_type enums */
4301
4302static u8 be_convert_mc_type(u32 function_mode)
4303{
66064dbc 4304 if (function_mode & VNIC_MODE && function_mode & QNQ_MODE)
f93f160b 4305 return vNIC1;
66064dbc 4306 else if (function_mode & QNQ_MODE)
f93f160b
VV
4307 return FLEX10;
4308 else if (function_mode & VNIC_MODE)
4309 return vNIC2;
4310 else if (function_mode & UMC_ENABLED)
4311 return UMC;
4312 else
4313 return MC_NONE;
4314}
4315
92bf14ab
SP
4316/* On BE2/BE3 FW does not suggest the supported limits */
4317static void BEx_get_resources(struct be_adapter *adapter,
4318 struct be_resources *res)
4319{
bec84e6b 4320 bool use_sriov = adapter->num_vfs ? 1 : 0;
92bf14ab
SP
4321
4322 if (be_physfn(adapter))
4323 res->max_uc_mac = BE_UC_PMAC_COUNT;
4324 else
4325 res->max_uc_mac = BE_VF_UC_PMAC_COUNT;
4326
f93f160b
VV
4327 adapter->mc_type = be_convert_mc_type(adapter->function_mode);
4328
4329 if (be_is_mc(adapter)) {
4330 /* Assuming that there are 4 channels per port,
4331 * when multi-channel is enabled
4332 */
4333 if (be_is_qnq_mode(adapter))
4334 res->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
4335 else
4336 /* In a non-qnq multichannel mode, the pvid
4337 * takes up one vlan entry
4338 */
4339 res->max_vlans = (BE_NUM_VLANS_SUPPORTED / 4) - 1;
4340 } else {
92bf14ab 4341 res->max_vlans = BE_NUM_VLANS_SUPPORTED;
f93f160b
VV
4342 }
4343
92bf14ab
SP
4344 res->max_mcast_mac = BE_MAX_MC;
4345
a5243dab
VV
4346 /* 1) For BE3 1Gb ports, FW does not support multiple TXQs
4347 * 2) Create multiple TX rings on a BE3-R multi-channel interface
4348 * *only* if it is RSS-capable.
4349 */
4350 if (BE2_chip(adapter) || use_sriov || (adapter->port_num > 1) ||
18c57c74
KA
4351 be_virtfn(adapter) ||
4352 (be_is_mc(adapter) &&
4353 !(adapter->function_caps & BE_FUNCTION_CAPS_RSS))) {
92bf14ab 4354 res->max_tx_qs = 1;
a28277dc
SR
4355 } else if (adapter->function_caps & BE_FUNCTION_CAPS_SUPER_NIC) {
4356 struct be_resources super_nic_res = {0};
4357
4358 /* On a SuperNIC profile, the driver needs to use the
4359 * GET_PROFILE_CONFIG cmd to query the per-function TXQ limits
4360 */
de2b1e03
SK
4361 be_cmd_get_profile_config(adapter, &super_nic_res, NULL,
4362 ACTIVE_PROFILE_TYPE, RESOURCE_LIMITS,
4363 0);
a28277dc
SR
4364 /* Some old versions of BE3 FW don't report max_tx_qs value */
4365 res->max_tx_qs = super_nic_res.max_tx_qs ? : BE3_MAX_TX_QS;
4366 } else {
92bf14ab 4367 res->max_tx_qs = BE3_MAX_TX_QS;
a28277dc 4368 }
92bf14ab
SP
4369
4370 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
4371 !use_sriov && be_physfn(adapter))
4372 res->max_rss_qs = (adapter->be3_native) ?
4373 BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
4374 res->max_rx_qs = res->max_rss_qs + 1;
4375
e3dc867c 4376 if (be_physfn(adapter))
d3518e21 4377 res->max_evt_qs = (be_max_vfs(adapter) > 0) ?
e3dc867c
SR
4378 BE3_SRIOV_MAX_EVT_QS : BE3_MAX_EVT_QS;
4379 else
4380 res->max_evt_qs = 1;
92bf14ab
SP
4381
4382 res->if_cap_flags = BE_IF_CAP_FLAGS_WANT;
71bb8bd0 4383 res->if_cap_flags &= ~BE_IF_FLAGS_DEFQ_RSS;
92bf14ab
SP
4384 if (!(adapter->function_caps & BE_FUNCTION_CAPS_RSS))
4385 res->if_cap_flags &= ~BE_IF_FLAGS_RSS;
4386}
4387
30128031
SP
4388static void be_setup_init(struct be_adapter *adapter)
4389{
4390 adapter->vlan_prio_bmap = 0xff;
42f11cf2 4391 adapter->phy.link_speed = -1;
30128031
SP
4392 adapter->if_handle = -1;
4393 adapter->be3_native = false;
f66b7cfd 4394 adapter->if_flags = 0;
51d1f98a 4395 adapter->phy_state = BE_UNKNOWN_PHY_STATE;
f25b119c
PR
4396 if (be_physfn(adapter))
4397 adapter->cmd_privileges = MAX_PRIVILEGES;
4398 else
4399 adapter->cmd_privileges = MIN_PRIVILEGES;
30128031
SP
4400}
4401
de2b1e03
SK
4402/* HW supports only MAX_PORT_RSS_TABLES RSS Policy Tables per port.
4403 * However, this HW limitation is not exposed to the host via any SLI cmd.
4404 * As a result, in the case of SRIOV and in particular multi-partition configs
4405 * the driver needs to calcuate a proportional share of RSS Tables per PF-pool
4406 * for distribution between the VFs. This self-imposed limit will determine the
4407 * no: of VFs for which RSS can be enabled.
4408 */
d766e7e6 4409static void be_calculate_pf_pool_rss_tables(struct be_adapter *adapter)
de2b1e03
SK
4410{
4411 struct be_port_resources port_res = {0};
4412 u8 rss_tables_on_port;
4413 u16 max_vfs = be_max_vfs(adapter);
4414
4415 be_cmd_get_profile_config(adapter, NULL, &port_res, SAVED_PROFILE_TYPE,
4416 RESOURCE_LIMITS, 0);
4417
4418 rss_tables_on_port = MAX_PORT_RSS_TABLES - port_res.nic_pfs;
4419
4420 /* Each PF Pool's RSS Tables limit =
4421 * PF's Max VFs / Total_Max_VFs on Port * RSS Tables on Port
4422 */
4423 adapter->pool_res.max_rss_tables =
4424 max_vfs * rss_tables_on_port / port_res.max_vfs;
4425}
4426
bec84e6b
VV
4427static int be_get_sriov_config(struct be_adapter *adapter)
4428{
bec84e6b 4429 struct be_resources res = {0};
d3d18312 4430 int max_vfs, old_vfs;
bec84e6b 4431
de2b1e03
SK
4432 be_cmd_get_profile_config(adapter, &res, NULL, ACTIVE_PROFILE_TYPE,
4433 RESOURCE_LIMITS, 0);
d3d18312 4434
ace40aff 4435 /* Some old versions of BE3 FW don't report max_vfs value */
bec84e6b
VV
4436 if (BE3_chip(adapter) && !res.max_vfs) {
4437 max_vfs = pci_sriov_get_totalvfs(adapter->pdev);
4438 res.max_vfs = max_vfs > 0 ? min(MAX_VFS, max_vfs) : 0;
4439 }
4440
d3d18312 4441 adapter->pool_res = res;
bec84e6b 4442
ace40aff
VV
4443 /* If during previous unload of the driver, the VFs were not disabled,
4444 * then we cannot rely on the PF POOL limits for the TotalVFs value.
4445 * Instead use the TotalVFs value stored in the pci-dev struct.
4446 */
bec84e6b
VV
4447 old_vfs = pci_num_vf(adapter->pdev);
4448 if (old_vfs) {
ace40aff
VV
4449 dev_info(&adapter->pdev->dev, "%d VFs are already enabled\n",
4450 old_vfs);
4451
4452 adapter->pool_res.max_vfs =
4453 pci_sriov_get_totalvfs(adapter->pdev);
bec84e6b 4454 adapter->num_vfs = old_vfs;
bec84e6b
VV
4455 }
4456
de2b1e03
SK
4457 if (skyhawk_chip(adapter) && be_max_vfs(adapter) && !old_vfs) {
4458 be_calculate_pf_pool_rss_tables(adapter);
4459 dev_info(&adapter->pdev->dev,
4460 "RSS can be enabled for all VFs if num_vfs <= %d\n",
4461 be_max_pf_pool_rss_tables(adapter));
4462 }
bec84e6b
VV
4463 return 0;
4464}
4465
ace40aff
VV
4466static void be_alloc_sriov_res(struct be_adapter *adapter)
4467{
4468 int old_vfs = pci_num_vf(adapter->pdev);
b9263cbf 4469 struct be_resources vft_res = {0};
ace40aff
VV
4470 int status;
4471
4472 be_get_sriov_config(adapter);
4473
4474 if (!old_vfs)
4475 pci_sriov_set_totalvfs(adapter->pdev, be_max_vfs(adapter));
4476
4477 /* When the HW is in SRIOV capable configuration, the PF-pool
4478 * resources are given to PF during driver load, if there are no
4479 * old VFs. This facility is not available in BE3 FW.
4480 * Also, this is done by FW in Lancer chip.
4481 */
4482 if (skyhawk_chip(adapter) && be_max_vfs(adapter) && !old_vfs) {
b9263cbf 4483 be_calculate_vf_res(adapter, 0, &vft_res);
ace40aff 4484 status = be_cmd_set_sriov_config(adapter, adapter->pool_res, 0,
b9263cbf 4485 &vft_res);
ace40aff
VV
4486 if (status)
4487 dev_err(&adapter->pdev->dev,
4488 "Failed to optimize SRIOV resources\n");
4489 }
4490}
4491
92bf14ab 4492static int be_get_resources(struct be_adapter *adapter)
abb93951 4493{
92bf14ab
SP
4494 struct device *dev = &adapter->pdev->dev;
4495 struct be_resources res = {0};
4496 int status;
abb93951 4497
92bf14ab
SP
4498 /* For Lancer, SH etc read per-function resource limits from FW.
4499 * GET_FUNC_CONFIG returns per function guaranteed limits.
4500 * GET_PROFILE_CONFIG returns PCI-E related limits PF-pool limits
4501 */
ce7faf0a
SP
4502 if (BEx_chip(adapter)) {
4503 BEx_get_resources(adapter, &res);
4504 } else {
92bf14ab
SP
4505 status = be_cmd_get_func_config(adapter, &res);
4506 if (status)
4507 return status;
abb93951 4508
71bb8bd0
VV
4509 /* If a deafault RXQ must be created, we'll use up one RSSQ*/
4510 if (res.max_rss_qs && res.max_rss_qs == res.max_rx_qs &&
4511 !(res.if_cap_flags & BE_IF_FLAGS_DEFQ_RSS))
4512 res.max_rss_qs -= 1;
abb93951 4513 }
4c876616 4514
ce7faf0a
SP
4515 /* If RoCE is supported stash away half the EQs for RoCE */
4516 res.max_nic_evt_qs = be_roce_supported(adapter) ?
4517 res.max_evt_qs / 2 : res.max_evt_qs;
4518 adapter->res = res;
4519
71bb8bd0
VV
4520 /* If FW supports RSS default queue, then skip creating non-RSS
4521 * queue for non-IP traffic.
4522 */
4523 adapter->need_def_rxq = (be_if_cap_flags(adapter) &
4524 BE_IF_FLAGS_DEFQ_RSS) ? 0 : 1;
4525
acbafeb1
SP
4526 dev_info(dev, "Max: txqs %d, rxqs %d, rss %d, eqs %d, vfs %d\n",
4527 be_max_txqs(adapter), be_max_rxqs(adapter),
ce7faf0a 4528 be_max_rss(adapter), be_max_nic_eqs(adapter),
acbafeb1
SP
4529 be_max_vfs(adapter));
4530 dev_info(dev, "Max: uc-macs %d, mc-macs %d, vlans %d\n",
4531 be_max_uc(adapter), be_max_mc(adapter),
4532 be_max_vlans(adapter));
4533
e261768e
SP
4534 /* Ensure RX and TX queues are created in pairs at init time */
4535 adapter->cfg_num_rx_irqs =
4536 min_t(u16, netif_get_num_default_rss_queues(),
4537 be_max_qp_irqs(adapter));
4538 adapter->cfg_num_tx_irqs = adapter->cfg_num_rx_irqs;
92bf14ab 4539 return 0;
abb93951
PR
4540}
4541
39f1d94d
SP
4542static int be_get_config(struct be_adapter *adapter)
4543{
6b085ba9 4544 int status, level;
542963b7 4545 u16 profile_id;
6b085ba9 4546
980df249
SR
4547 status = be_cmd_get_cntl_attributes(adapter);
4548 if (status)
4549 return status;
4550
e97e3cda 4551 status = be_cmd_query_fw_cfg(adapter);
abb93951 4552 if (status)
92bf14ab 4553 return status;
abb93951 4554
fd7ff6f0
VD
4555 if (!lancer_chip(adapter) && be_physfn(adapter))
4556 be_cmd_get_fat_dump_len(adapter, &adapter->fat_dump_len);
4557
6b085ba9
SP
4558 if (BEx_chip(adapter)) {
4559 level = be_cmd_get_fw_log_level(adapter);
4560 adapter->msg_enable =
4561 level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0;
4562 }
4563
4564 be_cmd_get_acpi_wol_cap(adapter);
45f13df7
SB
4565 pci_enable_wake(adapter->pdev, PCI_D3hot, adapter->wol_en);
4566 pci_enable_wake(adapter->pdev, PCI_D3cold, adapter->wol_en);
6b085ba9 4567
21252377
VV
4568 be_cmd_query_port_name(adapter);
4569
4570 if (be_physfn(adapter)) {
542963b7
VV
4571 status = be_cmd_get_active_profile(adapter, &profile_id);
4572 if (!status)
4573 dev_info(&adapter->pdev->dev,
4574 "Using profile 0x%x\n", profile_id);
962bcb75 4575 }
bec84e6b 4576
92bf14ab 4577 return 0;
39f1d94d
SP
4578}
4579
95046b92
SP
4580static int be_mac_setup(struct be_adapter *adapter)
4581{
4582 u8 mac[ETH_ALEN];
4583 int status;
4584
4585 if (is_zero_ether_addr(adapter->netdev->dev_addr)) {
4586 status = be_cmd_get_perm_mac(adapter, mac);
4587 if (status)
4588 return status;
4589
4590 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
4591 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
4993b39a
IV
4592
4593 /* Initial MAC for BE3 VFs is already programmed by PF */
4594 if (BEx_chip(adapter) && be_virtfn(adapter))
4595 memcpy(adapter->dev_mac, mac, ETH_ALEN);
95046b92
SP
4596 }
4597
95046b92
SP
4598 return 0;
4599}
4600
68d7bdcb
SP
4601static void be_schedule_worker(struct be_adapter *adapter)
4602{
b7172414 4603 queue_delayed_work(be_wq, &adapter->work, msecs_to_jiffies(1000));
68d7bdcb
SP
4604 adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
4605}
4606
710f3e59
SB
4607static void be_destroy_err_recovery_workq(void)
4608{
4609 if (!be_err_recovery_workq)
4610 return;
4611
4612 flush_workqueue(be_err_recovery_workq);
4613 destroy_workqueue(be_err_recovery_workq);
4614 be_err_recovery_workq = NULL;
4615}
4616
972f37b4 4617static void be_schedule_err_detection(struct be_adapter *adapter, u32 delay)
eb7dd46c 4618{
710f3e59
SB
4619 struct be_error_recovery *err_rec = &adapter->error_recovery;
4620
4621 if (!be_err_recovery_workq)
4622 return;
4623
4624 queue_delayed_work(be_err_recovery_workq, &err_rec->err_detection_work,
4625 msecs_to_jiffies(delay));
eb7dd46c
SP
4626 adapter->flags |= BE_FLAGS_ERR_DETECTION_SCHEDULED;
4627}
4628
7707133c 4629static int be_setup_queues(struct be_adapter *adapter)
5fb379ee 4630{
68d7bdcb 4631 struct net_device *netdev = adapter->netdev;
10ef9ab4 4632 int status;
ba343c77 4633
7707133c 4634 status = be_evt_queues_create(adapter);
abb93951
PR
4635 if (status)
4636 goto err;
73d540f2 4637
7707133c 4638 status = be_tx_qs_create(adapter);
c2bba3df
SK
4639 if (status)
4640 goto err;
10ef9ab4 4641
7707133c 4642 status = be_rx_cqs_create(adapter);
10ef9ab4 4643 if (status)
a54769f5 4644 goto err;
6b7c5b94 4645
7707133c 4646 status = be_mcc_queues_create(adapter);
10ef9ab4
SP
4647 if (status)
4648 goto err;
4649
68d7bdcb
SP
4650 status = netif_set_real_num_rx_queues(netdev, adapter->num_rx_qs);
4651 if (status)
4652 goto err;
4653
4654 status = netif_set_real_num_tx_queues(netdev, adapter->num_tx_qs);
4655 if (status)
4656 goto err;
4657
7707133c
SP
4658 return 0;
4659err:
4660 dev_err(&adapter->pdev->dev, "queue_setup failed\n");
4661 return status;
4662}
4663
62219066
AK
4664static int be_if_create(struct be_adapter *adapter)
4665{
4666 u32 en_flags = BE_IF_FLAGS_RSS | BE_IF_FLAGS_DEFQ_RSS;
4667 u32 cap_flags = be_if_cap_flags(adapter);
4668 int status;
4669
b7172414
SP
4670 /* alloc required memory for other filtering fields */
4671 adapter->pmac_id = kcalloc(be_max_uc(adapter),
4672 sizeof(*adapter->pmac_id), GFP_KERNEL);
4673 if (!adapter->pmac_id)
4674 return -ENOMEM;
4675
4676 adapter->mc_list = kcalloc(be_max_mc(adapter),
4677 sizeof(*adapter->mc_list), GFP_KERNEL);
4678 if (!adapter->mc_list)
4679 return -ENOMEM;
4680
4681 adapter->uc_list = kcalloc(be_max_uc(adapter),
4682 sizeof(*adapter->uc_list), GFP_KERNEL);
4683 if (!adapter->uc_list)
4684 return -ENOMEM;
4685
e261768e 4686 if (adapter->cfg_num_rx_irqs == 1)
62219066
AK
4687 cap_flags &= ~(BE_IF_FLAGS_DEFQ_RSS | BE_IF_FLAGS_RSS);
4688
4689 en_flags &= cap_flags;
4690 /* will enable all the needed filter flags in be_open() */
4691 status = be_cmd_if_create(adapter, be_if_cap_flags(adapter), en_flags,
4692 &adapter->if_handle, 0);
4693
b7172414
SP
4694 if (status)
4695 return status;
4696
4697 return 0;
62219066
AK
4698}
4699
68d7bdcb
SP
4700int be_update_queues(struct be_adapter *adapter)
4701{
4702 struct net_device *netdev = adapter->netdev;
4703 int status;
4704
4705 if (netif_running(netdev))
4706 be_close(netdev);
4707
4708 be_cancel_worker(adapter);
4709
4710 /* If any vectors have been shared with RoCE we cannot re-program
4711 * the MSIx table.
4712 */
4713 if (!adapter->num_msix_roce_vec)
4714 be_msix_disable(adapter);
4715
4716 be_clear_queues(adapter);
62219066
AK
4717 status = be_cmd_if_destroy(adapter, adapter->if_handle, 0);
4718 if (status)
4719 return status;
68d7bdcb
SP
4720
4721 if (!msix_enabled(adapter)) {
4722 status = be_msix_enable(adapter);
4723 if (status)
4724 return status;
4725 }
4726
62219066
AK
4727 status = be_if_create(adapter);
4728 if (status)
4729 return status;
4730
68d7bdcb
SP
4731 status = be_setup_queues(adapter);
4732 if (status)
4733 return status;
4734
4735 be_schedule_worker(adapter);
4736
5f834cf4 4737 /* The IF was destroyed and re-created. We need to clear
52acf064
IV
4738 * all promiscuous flags valid for the destroyed IF.
4739 * Without this promisc mode is not restored during
4740 * be_open() because the driver thinks that it is
4741 * already enabled in HW.
4742 */
4743 adapter->if_flags &= ~BE_IF_FLAGS_ALL_PROMISCUOUS;
4744
68d7bdcb
SP
4745 if (netif_running(netdev))
4746 status = be_open(netdev);
4747
4748 return status;
4749}
4750
f7062ee5
SP
4751static inline int fw_major_num(const char *fw_ver)
4752{
4753 int fw_major = 0, i;
4754
4755 i = sscanf(fw_ver, "%d.", &fw_major);
4756 if (i != 1)
4757 return 0;
4758
4759 return fw_major;
4760}
4761
710f3e59
SB
4762/* If it is error recovery, FLR the PF
4763 * Else if any VFs are already enabled don't FLR the PF
4764 */
f962f840
SP
4765static bool be_reset_required(struct be_adapter *adapter)
4766{
710f3e59
SB
4767 if (be_error_recovering(adapter))
4768 return true;
4769 else
4770 return pci_num_vf(adapter->pdev) == 0;
f962f840
SP
4771}
4772
4773/* Wait for the FW to be ready and perform the required initialization */
4774static int be_func_init(struct be_adapter *adapter)
4775{
4776 int status;
4777
4778 status = be_fw_wait_ready(adapter);
4779 if (status)
4780 return status;
4781
710f3e59
SB
4782 /* FW is now ready; clear errors to allow cmds/doorbell */
4783 be_clear_error(adapter, BE_CLEAR_ALL);
4784
f962f840
SP
4785 if (be_reset_required(adapter)) {
4786 status = be_cmd_reset_function(adapter);
4787 if (status)
4788 return status;
4789
4790 /* Wait for interrupts to quiesce after an FLR */
4791 msleep(100);
f962f840
SP
4792 }
4793
4794 /* Tell FW we're ready to fire cmds */
4795 status = be_cmd_fw_init(adapter);
4796 if (status)
4797 return status;
4798
4799 /* Allow interrupts for other ULPs running on NIC function */
4800 be_intr_set(adapter, true);
4801
4802 return 0;
4803}
4804
7707133c
SP
4805static int be_setup(struct be_adapter *adapter)
4806{
4807 struct device *dev = &adapter->pdev->dev;
7707133c
SP
4808 int status;
4809
f962f840
SP
4810 status = be_func_init(adapter);
4811 if (status)
4812 return status;
4813
7707133c
SP
4814 be_setup_init(adapter);
4815
4816 if (!lancer_chip(adapter))
4817 be_cmd_req_native_mode(adapter);
4818
980df249
SR
4819 /* invoke this cmd first to get pf_num and vf_num which are needed
4820 * for issuing profile related cmds
4821 */
4822 if (!BEx_chip(adapter)) {
4823 status = be_cmd_get_func_config(adapter, NULL);
4824 if (status)
4825 return status;
4826 }
72ef3a88 4827
de2b1e03
SK
4828 status = be_get_config(adapter);
4829 if (status)
4830 goto err;
4831
ace40aff
VV
4832 if (!BE2_chip(adapter) && be_physfn(adapter))
4833 be_alloc_sriov_res(adapter);
4834
de2b1e03 4835 status = be_get_resources(adapter);
10ef9ab4 4836 if (status)
a54769f5 4837 goto err;
6b7c5b94 4838
7707133c 4839 status = be_msix_enable(adapter);
10ef9ab4 4840 if (status)
a54769f5 4841 goto err;
6b7c5b94 4842
bcc84140 4843 /* will enable all the needed filter flags in be_open() */
62219066 4844 status = be_if_create(adapter);
7707133c 4845 if (status)
a54769f5 4846 goto err;
6b7c5b94 4847
68d7bdcb
SP
4848 /* Updating real_num_tx/rx_queues() requires rtnl_lock() */
4849 rtnl_lock();
7707133c 4850 status = be_setup_queues(adapter);
68d7bdcb 4851 rtnl_unlock();
95046b92 4852 if (status)
1578e777
PR
4853 goto err;
4854
7707133c 4855 be_cmd_get_fn_privileges(adapter, &adapter->cmd_privileges, 0);
7707133c
SP
4856
4857 status = be_mac_setup(adapter);
10ef9ab4
SP
4858 if (status)
4859 goto err;
4860
e97e3cda 4861 be_cmd_get_fw_ver(adapter);
acbafeb1 4862 dev_info(dev, "FW version is %s\n", adapter->fw_ver);
5a56eb10 4863
e9e2a904 4864 if (BE2_chip(adapter) && fw_major_num(adapter->fw_ver) < 4) {
50762667 4865 dev_err(dev, "Firmware on card is old(%s), IRQs may not work",
e9e2a904
SK
4866 adapter->fw_ver);
4867 dev_err(dev, "Please upgrade firmware to version >= 4.0\n");
4868 }
4869
00d594c3
KA
4870 status = be_cmd_set_flow_control(adapter, adapter->tx_fc,
4871 adapter->rx_fc);
4872 if (status)
4873 be_cmd_get_flow_control(adapter, &adapter->tx_fc,
4874 &adapter->rx_fc);
590c391d 4875
00d594c3
KA
4876 dev_info(&adapter->pdev->dev, "HW Flow control - TX:%d RX:%d\n",
4877 adapter->tx_fc, adapter->rx_fc);
2dc1deb6 4878
bdce2ad7
SR
4879 if (be_physfn(adapter))
4880 be_cmd_set_logical_link_config(adapter,
4881 IFLA_VF_LINK_STATE_AUTO, 0);
4882
884476be
SK
4883 /* BE3 EVB echoes broadcast/multicast packets back to PF's vport
4884 * confusing a linux bridge or OVS that it might be connected to.
4885 * Set the EVB to PASSTHRU mode which effectively disables the EVB
4886 * when SRIOV is not enabled.
4887 */
4888 if (BE3_chip(adapter))
4889 be_cmd_set_hsw_config(adapter, 0, 0, adapter->if_handle,
4890 PORT_FWD_TYPE_PASSTHRU, 0);
4891
bec84e6b
VV
4892 if (adapter->num_vfs)
4893 be_vf_setup(adapter);
f9449ab7 4894
f25b119c
PR
4895 status = be_cmd_get_phy_info(adapter);
4896 if (!status && be_pause_supported(adapter))
42f11cf2
AK
4897 adapter->phy.fc_autoneg = 1;
4898
710f3e59
SB
4899 if (be_physfn(adapter) && !lancer_chip(adapter))
4900 be_cmd_set_features(adapter);
4901
68d7bdcb 4902 be_schedule_worker(adapter);
e1ad8e33 4903 adapter->flags |= BE_FLAGS_SETUP_DONE;
f9449ab7 4904 return 0;
a54769f5
SP
4905err:
4906 be_clear(adapter);
4907 return status;
4908}
6b7c5b94 4909
66268739
IV
4910#ifdef CONFIG_NET_POLL_CONTROLLER
4911static void be_netpoll(struct net_device *netdev)
4912{
4913 struct be_adapter *adapter = netdev_priv(netdev);
10ef9ab4 4914 struct be_eq_obj *eqo;
66268739
IV
4915 int i;
4916
e49cc34f 4917 for_all_evt_queues(adapter, eqo, i) {
20947770 4918 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0, 0);
e49cc34f
SP
4919 napi_schedule(&eqo->napi);
4920 }
66268739
IV
4921}
4922#endif
4923
485bf569
SN
4924int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
4925{
4926 const struct firmware *fw;
4927 int status;
4928
4929 if (!netif_running(adapter->netdev)) {
4930 dev_err(&adapter->pdev->dev,
4931 "Firmware load not allowed (interface is down)\n");
940a3fcd 4932 return -ENETDOWN;
485bf569
SN
4933 }
4934
4935 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
4936 if (status)
4937 goto fw_exit;
4938
4939 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
4940
4941 if (lancer_chip(adapter))
4942 status = lancer_fw_download(adapter, fw);
4943 else
4944 status = be_fw_download(adapter, fw);
4945
eeb65ced 4946 if (!status)
e97e3cda 4947 be_cmd_get_fw_ver(adapter);
eeb65ced 4948
84517482
AK
4949fw_exit:
4950 release_firmware(fw);
4951 return status;
4952}
4953
add511b3
RP
4954static int be_ndo_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
4955 u16 flags)
a77dcb8c
AK
4956{
4957 struct be_adapter *adapter = netdev_priv(dev);
4958 struct nlattr *attr, *br_spec;
4959 int rem;
4960 int status = 0;
4961 u16 mode = 0;
4962
4963 if (!sriov_enabled(adapter))
4964 return -EOPNOTSUPP;
4965
4966 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
4ea85e83
TG
4967 if (!br_spec)
4968 return -EINVAL;
a77dcb8c
AK
4969
4970 nla_for_each_nested(attr, br_spec, rem) {
4971 if (nla_type(attr) != IFLA_BRIDGE_MODE)
4972 continue;
4973
b7c1a314
TG
4974 if (nla_len(attr) < sizeof(mode))
4975 return -EINVAL;
4976
a77dcb8c 4977 mode = nla_get_u16(attr);
ac0f5fba
SR
4978 if (BE3_chip(adapter) && mode == BRIDGE_MODE_VEPA)
4979 return -EOPNOTSUPP;
4980
a77dcb8c
AK
4981 if (mode != BRIDGE_MODE_VEPA && mode != BRIDGE_MODE_VEB)
4982 return -EINVAL;
4983
4984 status = be_cmd_set_hsw_config(adapter, 0, 0,
4985 adapter->if_handle,
4986 mode == BRIDGE_MODE_VEPA ?
4987 PORT_FWD_TYPE_VEPA :
e7bcbd7b 4988 PORT_FWD_TYPE_VEB, 0);
a77dcb8c
AK
4989 if (status)
4990 goto err;
4991
4992 dev_info(&adapter->pdev->dev, "enabled switch mode: %s\n",
4993 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
4994
4995 return status;
4996 }
4997err:
4998 dev_err(&adapter->pdev->dev, "Failed to set switch mode %s\n",
4999 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
5000
5001 return status;
5002}
5003
5004static int be_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
46c264da
ND
5005 struct net_device *dev, u32 filter_mask,
5006 int nlflags)
a77dcb8c
AK
5007{
5008 struct be_adapter *adapter = netdev_priv(dev);
5009 int status = 0;
5010 u8 hsw_mode;
5011
a77dcb8c
AK
5012 /* BE and Lancer chips support VEB mode only */
5013 if (BEx_chip(adapter) || lancer_chip(adapter)) {
8431706b
IV
5014 /* VEB is disabled in non-SR-IOV profiles on BE3/Lancer */
5015 if (!pci_sriov_get_totalvfs(adapter->pdev))
5016 return 0;
a77dcb8c
AK
5017 hsw_mode = PORT_FWD_TYPE_VEB;
5018 } else {
5019 status = be_cmd_get_hsw_config(adapter, NULL, 0,
e7bcbd7b
KA
5020 adapter->if_handle, &hsw_mode,
5021 NULL);
a77dcb8c
AK
5022 if (status)
5023 return 0;
ff9ed19d
KP
5024
5025 if (hsw_mode == PORT_FWD_TYPE_PASSTHRU)
5026 return 0;
a77dcb8c
AK
5027 }
5028
5029 return ndo_dflt_bridge_getlink(skb, pid, seq, dev,
5030 hsw_mode == PORT_FWD_TYPE_VEPA ?
2c3c031c 5031 BRIDGE_MODE_VEPA : BRIDGE_MODE_VEB,
7d4f8d87 5032 0, 0, nlflags, filter_mask, NULL);
a77dcb8c
AK
5033}
5034
b7172414
SP
5035static struct be_cmd_work *be_alloc_work(struct be_adapter *adapter,
5036 void (*func)(struct work_struct *))
5037{
5038 struct be_cmd_work *work;
5039
5040 work = kzalloc(sizeof(*work), GFP_ATOMIC);
5041 if (!work) {
5042 dev_err(&adapter->pdev->dev,
5043 "be_work memory allocation failed\n");
5044 return NULL;
5045 }
5046
5047 INIT_WORK(&work->work, func);
5048 work->adapter = adapter;
5049 return work;
5050}
5051
630f4b70
SB
5052/* VxLAN offload Notes:
5053 *
5054 * The stack defines tunnel offload flags (hw_enc_features) for IP and doesn't
5055 * distinguish various types of transports (VxLAN, GRE, NVGRE ..). So, offload
5056 * is expected to work across all types of IP tunnels once exported. Skyhawk
5057 * supports offloads for either VxLAN or NVGRE, exclusively. So we export VxLAN
16dde0d6
SB
5058 * offloads in hw_enc_features only when a VxLAN port is added. If other (non
5059 * VxLAN) tunnels are configured while VxLAN offloads are enabled, offloads for
5060 * those other tunnels are unexported on the fly through ndo_features_check().
630f4b70
SB
5061 *
5062 * Skyhawk supports VxLAN offloads only for one UDP dport. So, if the stack
bf8d9dfb
SB
5063 * adds more than one port, disable offloads and re-enable them again when
5064 * there's only one port left. We maintain a list of ports for this purpose.
630f4b70 5065 */
b7172414 5066static void be_work_add_vxlan_port(struct work_struct *work)
c9c47142 5067{
b7172414
SP
5068 struct be_cmd_work *cmd_work =
5069 container_of(work, struct be_cmd_work, work);
5070 struct be_adapter *adapter = cmd_work->adapter;
c9c47142 5071 struct device *dev = &adapter->pdev->dev;
b7172414 5072 __be16 port = cmd_work->info.vxlan_port;
bf8d9dfb 5073 struct be_vxlan_port *vxlan_port;
c9c47142
SP
5074 int status;
5075
bf8d9dfb
SB
5076 /* Bump up the alias count if it is an existing port */
5077 list_for_each_entry(vxlan_port, &adapter->vxlan_port_list, list) {
5078 if (vxlan_port->port == port) {
5079 vxlan_port->port_aliases++;
5080 goto done;
5081 }
1e5b311a
JB
5082 }
5083
bf8d9dfb
SB
5084 /* Add a new port to our list. We don't need a lock here since port
5085 * add/delete are done only in the context of a single-threaded work
5086 * queue (be_wq).
5087 */
5088 vxlan_port = kzalloc(sizeof(*vxlan_port), GFP_KERNEL);
5089 if (!vxlan_port)
5090 goto done;
5091
5092 vxlan_port->port = port;
5093 INIT_LIST_HEAD(&vxlan_port->list);
5094 list_add_tail(&vxlan_port->list, &adapter->vxlan_port_list);
5095 adapter->vxlan_port_count++;
5096
c9c47142 5097 if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS) {
c9c47142
SP
5098 dev_info(dev,
5099 "Only one UDP port supported for VxLAN offloads\n");
630f4b70 5100 dev_info(dev, "Disabling VxLAN offloads\n");
630f4b70 5101 goto err;
c9c47142
SP
5102 }
5103
bf8d9dfb 5104 if (adapter->vxlan_port_count > 1)
b7172414 5105 goto done;
630f4b70 5106
bf8d9dfb
SB
5107 status = be_enable_vxlan_offloads(adapter);
5108 if (!status)
5109 goto done;
630f4b70 5110
c9c47142
SP
5111err:
5112 be_disable_vxlan_offloads(adapter);
b7172414
SP
5113done:
5114 kfree(cmd_work);
bf8d9dfb 5115 return;
c9c47142
SP
5116}
5117
b7172414 5118static void be_work_del_vxlan_port(struct work_struct *work)
c9c47142 5119{
b7172414
SP
5120 struct be_cmd_work *cmd_work =
5121 container_of(work, struct be_cmd_work, work);
5122 struct be_adapter *adapter = cmd_work->adapter;
5123 __be16 port = cmd_work->info.vxlan_port;
bf8d9dfb 5124 struct be_vxlan_port *vxlan_port;
c9c47142 5125
bf8d9dfb
SB
5126 /* Nothing to be done if a port alias is being deleted */
5127 list_for_each_entry(vxlan_port, &adapter->vxlan_port_list, list) {
5128 if (vxlan_port->port == port) {
5129 if (vxlan_port->port_aliases) {
5130 vxlan_port->port_aliases--;
5131 goto done;
5132 }
5133 break;
5134 }
5135 }
5136
5137 /* No port aliases left; delete the port from the list */
5138 list_del(&vxlan_port->list);
5139 adapter->vxlan_port_count--;
c9c47142 5140
bf8d9dfb
SB
5141 /* Disable VxLAN offload if this is the offloaded port */
5142 if (adapter->vxlan_port == vxlan_port->port) {
5143 WARN_ON(adapter->vxlan_port_count);
5144 be_disable_vxlan_offloads(adapter);
5145 dev_info(&adapter->pdev->dev,
5146 "Disabled VxLAN offloads for UDP port %d\n",
5147 be16_to_cpu(port));
b7172414 5148 goto out;
1e5b311a
JB
5149 }
5150
bf8d9dfb
SB
5151 /* If only 1 port is left, re-enable VxLAN offload */
5152 if (adapter->vxlan_port_count == 1)
5153 be_enable_vxlan_offloads(adapter);
c9c47142 5154
b7172414 5155out:
bf8d9dfb
SB
5156 kfree(vxlan_port);
5157done:
b7172414
SP
5158 kfree(cmd_work);
5159}
5160
5161static void be_cfg_vxlan_port(struct net_device *netdev,
5162 struct udp_tunnel_info *ti,
5163 void (*func)(struct work_struct *))
5164{
5165 struct be_adapter *adapter = netdev_priv(netdev);
5166 struct be_cmd_work *cmd_work;
5167
5168 if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
5169 return;
5170
5171 if (lancer_chip(adapter) || BEx_chip(adapter) || be_is_mc(adapter))
5172 return;
5173
5174 cmd_work = be_alloc_work(adapter, func);
5175 if (cmd_work) {
5176 cmd_work->info.vxlan_port = ti->port;
5177 queue_work(be_wq, &cmd_work->work);
5178 }
5179}
5180
5181static void be_del_vxlan_port(struct net_device *netdev,
5182 struct udp_tunnel_info *ti)
5183{
5184 be_cfg_vxlan_port(netdev, ti, be_work_del_vxlan_port);
5185}
5186
5187static void be_add_vxlan_port(struct net_device *netdev,
5188 struct udp_tunnel_info *ti)
5189{
5190 be_cfg_vxlan_port(netdev, ti, be_work_add_vxlan_port);
c9c47142 5191}
725d548f 5192
5f35227e
JG
5193static netdev_features_t be_features_check(struct sk_buff *skb,
5194 struct net_device *dev,
5195 netdev_features_t features)
725d548f 5196{
16dde0d6
SB
5197 struct be_adapter *adapter = netdev_priv(dev);
5198 u8 l4_hdr = 0;
5199
822f8565
SR
5200 if (skb_is_gso(skb)) {
5201 /* IPv6 TSO requests with extension hdrs are a problem
5202 * to Lancer and BE3 HW. Disable TSO6 feature.
5203 */
5204 if (!skyhawk_chip(adapter) && is_ipv6_ext_hdr(skb))
5205 features &= ~NETIF_F_TSO6;
5206
5207 /* Lancer cannot handle the packet with MSS less than 256.
3df40aad 5208 * Also it can't handle a TSO packet with a single segment
822f8565
SR
5209 * Disable the GSO support in such cases
5210 */
3df40aad
SR
5211 if (lancer_chip(adapter) &&
5212 (skb_shinfo(skb)->gso_size < 256 ||
5213 skb_shinfo(skb)->gso_segs == 1))
822f8565
SR
5214 features &= ~NETIF_F_GSO_MASK;
5215 }
5216
cc6e9de6
VY
5217 /* The code below restricts offload features for some tunneled and
5218 * Q-in-Q packets.
16dde0d6
SB
5219 * Offload features for normal (non tunnel) packets are unchanged.
5220 */
cc6e9de6 5221 features = vlan_features_check(skb, features);
16dde0d6
SB
5222 if (!skb->encapsulation ||
5223 !(adapter->flags & BE_FLAGS_VXLAN_OFFLOADS))
5224 return features;
5225
5226 /* It's an encapsulated packet and VxLAN offloads are enabled. We
5227 * should disable tunnel offload features if it's not a VxLAN packet,
5228 * as tunnel offloads have been enabled only for VxLAN. This is done to
5229 * allow other tunneled traffic like GRE work fine while VxLAN
5230 * offloads are configured in Skyhawk-R.
5231 */
5232 switch (vlan_get_protocol(skb)) {
5233 case htons(ETH_P_IP):
5234 l4_hdr = ip_hdr(skb)->protocol;
5235 break;
5236 case htons(ETH_P_IPV6):
5237 l4_hdr = ipv6_hdr(skb)->nexthdr;
5238 break;
5239 default:
5240 return features;
5241 }
5242
5243 if (l4_hdr != IPPROTO_UDP ||
5244 skb->inner_protocol_type != ENCAP_TYPE_ETHER ||
5245 skb->inner_protocol != htons(ETH_P_TEB) ||
5246 skb_inner_mac_header(skb) - skb_transport_header(skb) !=
096de2f8
SD
5247 sizeof(struct udphdr) + sizeof(struct vxlanhdr) ||
5248 !adapter->vxlan_port ||
5249 udp_hdr(skb)->dest != adapter->vxlan_port)
a188222b 5250 return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
16dde0d6
SB
5251
5252 return features;
725d548f 5253}
c9c47142 5254
a155a5db
SB
5255static int be_get_phys_port_id(struct net_device *dev,
5256 struct netdev_phys_item_id *ppid)
5257{
5258 int i, id_len = CNTL_SERIAL_NUM_WORDS * CNTL_SERIAL_NUM_WORD_SZ + 1;
5259 struct be_adapter *adapter = netdev_priv(dev);
5260 u8 *id;
5261
5262 if (MAX_PHYS_ITEM_ID_LEN < id_len)
5263 return -ENOSPC;
5264
5265 ppid->id[0] = adapter->hba_port_num + 1;
5266 id = &ppid->id[1];
5267 for (i = CNTL_SERIAL_NUM_WORDS - 1; i >= 0;
5268 i--, id += CNTL_SERIAL_NUM_WORD_SZ)
5269 memcpy(id, &adapter->serial_num[i], CNTL_SERIAL_NUM_WORD_SZ);
5270
5271 ppid->id_len = id_len;
5272
5273 return 0;
5274}
5275
b7172414
SP
5276static void be_set_rx_mode(struct net_device *dev)
5277{
5278 struct be_adapter *adapter = netdev_priv(dev);
5279 struct be_cmd_work *work;
5280
5281 work = be_alloc_work(adapter, be_work_set_rx_mode);
5282 if (work)
5283 queue_work(be_wq, &work->work);
5284}
5285
e5686ad8 5286static const struct net_device_ops be_netdev_ops = {
6b7c5b94
SP
5287 .ndo_open = be_open,
5288 .ndo_stop = be_close,
5289 .ndo_start_xmit = be_xmit,
a54769f5 5290 .ndo_set_rx_mode = be_set_rx_mode,
6b7c5b94 5291 .ndo_set_mac_address = be_mac_addr_set,
ab1594e9 5292 .ndo_get_stats64 = be_get_stats64,
6b7c5b94 5293 .ndo_validate_addr = eth_validate_addr,
6b7c5b94
SP
5294 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
5295 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
64600ea5 5296 .ndo_set_vf_mac = be_set_vf_mac,
1da87b7f 5297 .ndo_set_vf_vlan = be_set_vf_vlan,
ed616689 5298 .ndo_set_vf_rate = be_set_vf_tx_rate,
66268739 5299 .ndo_get_vf_config = be_get_vf_config,
bdce2ad7 5300 .ndo_set_vf_link_state = be_set_vf_link_state,
e7bcbd7b 5301 .ndo_set_vf_spoofchk = be_set_vf_spoofchk,
c1b3bdb2 5302 .ndo_tx_timeout = be_tx_timeout,
66268739
IV
5303#ifdef CONFIG_NET_POLL_CONTROLLER
5304 .ndo_poll_controller = be_netpoll,
5305#endif
a77dcb8c
AK
5306 .ndo_bridge_setlink = be_ndo_bridge_setlink,
5307 .ndo_bridge_getlink = be_ndo_bridge_getlink,
bde6b7cd
AD
5308 .ndo_udp_tunnel_add = be_add_vxlan_port,
5309 .ndo_udp_tunnel_del = be_del_vxlan_port,
5f35227e 5310 .ndo_features_check = be_features_check,
a155a5db 5311 .ndo_get_phys_port_id = be_get_phys_port_id,
6b7c5b94
SP
5312};
5313
5314static void be_netdev_init(struct net_device *netdev)
5315{
5316 struct be_adapter *adapter = netdev_priv(netdev);
5317
6332c8d3 5318 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
2d52527e 5319 NETIF_F_GSO_UDP_TUNNEL |
8b8ddc68 5320 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
f646968f 5321 NETIF_F_HW_VLAN_CTAG_TX;
62219066 5322 if ((be_if_cap_flags(adapter) & BE_IF_FLAGS_RSS))
8b8ddc68 5323 netdev->hw_features |= NETIF_F_RXHASH;
6332c8d3
MM
5324
5325 netdev->features |= netdev->hw_features |
f646968f 5326 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER;
4b972914 5327
eb8a50d9 5328 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
79032644 5329 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
51c59870 5330
fbc13f01
AK
5331 netdev->priv_flags |= IFF_UNICAST_FLT;
5332
6b7c5b94
SP
5333 netdev->flags |= IFF_MULTICAST;
5334
127bfce5 5335 netif_set_gso_max_size(netdev, BE_MAX_GSO_SIZE - ETH_HLEN);
c190e3c8 5336
10ef9ab4 5337 netdev->netdev_ops = &be_netdev_ops;
6b7c5b94 5338
7ad24ea4 5339 netdev->ethtool_ops = &be_ethtool_ops;
d894be57
JW
5340
5341 /* MTU range: 256 - 9000 */
5342 netdev->min_mtu = BE_MIN_MTU;
5343 netdev->max_mtu = BE_MAX_MTU;
6b7c5b94
SP
5344}
5345
87ac1a52
KA
5346static void be_cleanup(struct be_adapter *adapter)
5347{
5348 struct net_device *netdev = adapter->netdev;
5349
5350 rtnl_lock();
5351 netif_device_detach(netdev);
5352 if (netif_running(netdev))
5353 be_close(netdev);
5354 rtnl_unlock();
5355
5356 be_clear(adapter);
5357}
5358
484d76fd 5359static int be_resume(struct be_adapter *adapter)
78fad34e 5360{
d0e1b319 5361 struct net_device *netdev = adapter->netdev;
78fad34e
SP
5362 int status;
5363
78fad34e
SP
5364 status = be_setup(adapter);
5365 if (status)
484d76fd 5366 return status;
78fad34e 5367
08d9910c
HFS
5368 rtnl_lock();
5369 if (netif_running(netdev))
d0e1b319 5370 status = be_open(netdev);
08d9910c
HFS
5371 rtnl_unlock();
5372
5373 if (status)
5374 return status;
78fad34e 5375
d0e1b319
KA
5376 netif_device_attach(netdev);
5377
484d76fd
KA
5378 return 0;
5379}
5380
710f3e59
SB
5381static void be_soft_reset(struct be_adapter *adapter)
5382{
5383 u32 val;
5384
5385 dev_info(&adapter->pdev->dev, "Initiating chip soft reset\n");
5386 val = ioread32(adapter->pcicfg + SLIPORT_SOFTRESET_OFFSET);
5387 val |= SLIPORT_SOFTRESET_SR_MASK;
5388 iowrite32(val, adapter->pcicfg + SLIPORT_SOFTRESET_OFFSET);
5389}
5390
5391static bool be_err_is_recoverable(struct be_adapter *adapter)
5392{
5393 struct be_error_recovery *err_rec = &adapter->error_recovery;
5394 unsigned long initial_idle_time =
5395 msecs_to_jiffies(ERR_RECOVERY_IDLE_TIME);
5396 unsigned long recovery_interval =
5397 msecs_to_jiffies(ERR_RECOVERY_INTERVAL);
5398 u16 ue_err_code;
5399 u32 val;
5400
5401 val = be_POST_stage_get(adapter);
5402 if ((val & POST_STAGE_RECOVERABLE_ERR) != POST_STAGE_RECOVERABLE_ERR)
5403 return false;
5404 ue_err_code = val & POST_ERR_RECOVERY_CODE_MASK;
5405 if (ue_err_code == 0)
5406 return false;
5407
5408 dev_err(&adapter->pdev->dev, "Recoverable HW error code: 0x%x\n",
5409 ue_err_code);
5410
2faf2657 5411 if (time_before_eq(jiffies - err_rec->probe_time, initial_idle_time)) {
710f3e59
SB
5412 dev_err(&adapter->pdev->dev,
5413 "Cannot recover within %lu sec from driver load\n",
5414 jiffies_to_msecs(initial_idle_time) / MSEC_PER_SEC);
5415 return false;
5416 }
5417
2faf2657
KE
5418 if (err_rec->last_recovery_time && time_before_eq(
5419 jiffies - err_rec->last_recovery_time, recovery_interval)) {
710f3e59
SB
5420 dev_err(&adapter->pdev->dev,
5421 "Cannot recover within %lu sec from last recovery\n",
5422 jiffies_to_msecs(recovery_interval) / MSEC_PER_SEC);
5423 return false;
5424 }
5425
5426 if (ue_err_code == err_rec->last_err_code) {
5427 dev_err(&adapter->pdev->dev,
5428 "Cannot recover from a consecutive TPE error\n");
5429 return false;
5430 }
5431
5432 err_rec->last_recovery_time = jiffies;
5433 err_rec->last_err_code = ue_err_code;
5434 return true;
5435}
5436
5437static int be_tpe_recover(struct be_adapter *adapter)
5438{
5439 struct be_error_recovery *err_rec = &adapter->error_recovery;
5440 int status = -EAGAIN;
5441 u32 val;
5442
5443 switch (err_rec->recovery_state) {
5444 case ERR_RECOVERY_ST_NONE:
5445 err_rec->recovery_state = ERR_RECOVERY_ST_DETECT;
5446 err_rec->resched_delay = ERR_RECOVERY_UE_DETECT_DURATION;
5447 break;
5448
5449 case ERR_RECOVERY_ST_DETECT:
5450 val = be_POST_stage_get(adapter);
5451 if ((val & POST_STAGE_RECOVERABLE_ERR) !=
5452 POST_STAGE_RECOVERABLE_ERR) {
5453 dev_err(&adapter->pdev->dev,
5454 "Unrecoverable HW error detected: 0x%x\n", val);
5455 status = -EINVAL;
5456 err_rec->resched_delay = 0;
5457 break;
5458 }
5459
5460 dev_err(&adapter->pdev->dev, "Recoverable HW error detected\n");
5461
5462 /* Only PF0 initiates Chip Soft Reset. But PF0 must wait UE2SR
5463 * milliseconds before it checks for final error status in
5464 * SLIPORT_SEMAPHORE to determine if recovery criteria is met.
5465 * If it does, then PF0 initiates a Soft Reset.
5466 */
5467 if (adapter->pf_num == 0) {
5468 err_rec->recovery_state = ERR_RECOVERY_ST_RESET;
5469 err_rec->resched_delay = err_rec->ue_to_reset_time -
5470 ERR_RECOVERY_UE_DETECT_DURATION;
5471 break;
5472 }
5473
5474 err_rec->recovery_state = ERR_RECOVERY_ST_PRE_POLL;
5475 err_rec->resched_delay = err_rec->ue_to_poll_time -
5476 ERR_RECOVERY_UE_DETECT_DURATION;
5477 break;
5478
5479 case ERR_RECOVERY_ST_RESET:
5480 if (!be_err_is_recoverable(adapter)) {
5481 dev_err(&adapter->pdev->dev,
5482 "Failed to meet recovery criteria\n");
5483 status = -EIO;
5484 err_rec->resched_delay = 0;
5485 break;
5486 }
5487 be_soft_reset(adapter);
5488 err_rec->recovery_state = ERR_RECOVERY_ST_PRE_POLL;
5489 err_rec->resched_delay = err_rec->ue_to_poll_time -
5490 err_rec->ue_to_reset_time;
5491 break;
5492
5493 case ERR_RECOVERY_ST_PRE_POLL:
5494 err_rec->recovery_state = ERR_RECOVERY_ST_REINIT;
5495 err_rec->resched_delay = 0;
5496 status = 0; /* done */
5497 break;
5498
5499 default:
5500 status = -EINVAL;
5501 err_rec->resched_delay = 0;
5502 break;
5503 }
5504
5505 return status;
5506}
5507
484d76fd
KA
5508static int be_err_recover(struct be_adapter *adapter)
5509{
484d76fd
KA
5510 int status;
5511
710f3e59
SB
5512 if (!lancer_chip(adapter)) {
5513 if (!adapter->error_recovery.recovery_supported ||
5514 adapter->priv_flags & BE_DISABLE_TPE_RECOVERY)
5515 return -EIO;
5516 status = be_tpe_recover(adapter);
5517 if (status)
5518 goto err;
5519 }
1babbad4
PR
5520
5521 /* Wait for adapter to reach quiescent state before
5522 * destroying queues
5523 */
5524 status = be_fw_wait_ready(adapter);
5525 if (status)
5526 goto err;
5527
710f3e59
SB
5528 adapter->flags |= BE_FLAGS_TRY_RECOVERY;
5529
1babbad4
PR
5530 be_cleanup(adapter);
5531
484d76fd
KA
5532 status = be_resume(adapter);
5533 if (status)
5534 goto err;
5535
710f3e59
SB
5536 adapter->flags &= ~BE_FLAGS_TRY_RECOVERY;
5537
78fad34e 5538err:
78fad34e
SP
5539 return status;
5540}
5541
eb7dd46c 5542static void be_err_detection_task(struct work_struct *work)
78fad34e 5543{
710f3e59
SB
5544 struct be_error_recovery *err_rec =
5545 container_of(work, struct be_error_recovery,
5546 err_detection_work.work);
78fad34e 5547 struct be_adapter *adapter =
710f3e59
SB
5548 container_of(err_rec, struct be_adapter,
5549 error_recovery);
5550 u32 resched_delay = ERR_RECOVERY_DETECTION_DELAY;
1babbad4
PR
5551 struct device *dev = &adapter->pdev->dev;
5552 int recovery_status;
78fad34e
SP
5553
5554 be_detect_error(adapter);
710f3e59 5555 if (!be_check_error(adapter, BE_ERROR_HW))
1babbad4
PR
5556 goto reschedule_task;
5557
710f3e59 5558 recovery_status = be_err_recover(adapter);
1babbad4 5559 if (!recovery_status) {
710f3e59
SB
5560 err_rec->recovery_retries = 0;
5561 err_rec->recovery_state = ERR_RECOVERY_ST_NONE;
1babbad4
PR
5562 dev_info(dev, "Adapter recovery successful\n");
5563 goto reschedule_task;
710f3e59
SB
5564 } else if (!lancer_chip(adapter) && err_rec->resched_delay) {
5565 /* BEx/SH recovery state machine */
5566 if (adapter->pf_num == 0 &&
5567 err_rec->recovery_state > ERR_RECOVERY_ST_DETECT)
5568 dev_err(&adapter->pdev->dev,
5569 "Adapter recovery in progress\n");
5570 resched_delay = err_rec->resched_delay;
5571 goto reschedule_task;
5572 } else if (lancer_chip(adapter) && be_virtfn(adapter)) {
1babbad4
PR
5573 /* For VFs, check if PF have allocated resources
5574 * every second.
5575 */
5576 dev_err(dev, "Re-trying adapter recovery\n");
5577 goto reschedule_task;
710f3e59
SB
5578 } else if (lancer_chip(adapter) && err_rec->recovery_retries++ <
5579 ERR_RECOVERY_MAX_RETRY_COUNT) {
972f37b4
PR
5580 /* In case of another error during recovery, it takes 30 sec
5581 * for adapter to come out of error. Retry error recovery after
5582 * this time interval.
5583 */
5584 dev_err(&adapter->pdev->dev, "Re-trying adapter recovery\n");
710f3e59 5585 resched_delay = ERR_RECOVERY_RETRY_DELAY;
972f37b4 5586 goto reschedule_task;
1babbad4
PR
5587 } else {
5588 dev_err(dev, "Adapter recovery failed\n");
710f3e59 5589 dev_err(dev, "Please reboot server to recover\n");
78fad34e
SP
5590 }
5591
1babbad4 5592 return;
710f3e59 5593
1babbad4 5594reschedule_task:
710f3e59 5595 be_schedule_err_detection(adapter, resched_delay);
78fad34e
SP
5596}
5597
5598static void be_log_sfp_info(struct be_adapter *adapter)
5599{
5600 int status;
5601
5602 status = be_cmd_query_sfp_info(adapter);
5603 if (!status) {
5604 dev_err(&adapter->pdev->dev,
51d1f98a
AK
5605 "Port %c: %s Vendor: %s part no: %s",
5606 adapter->port_name,
5607 be_misconfig_evt_port_state[adapter->phy_state],
5608 adapter->phy.vendor_name,
78fad34e
SP
5609 adapter->phy.vendor_pn);
5610 }
51d1f98a 5611 adapter->flags &= ~BE_FLAGS_PHY_MISCONFIGURED;
78fad34e
SP
5612}
5613
5614static void be_worker(struct work_struct *work)
5615{
5616 struct be_adapter *adapter =
5617 container_of(work, struct be_adapter, work.work);
5618 struct be_rx_obj *rxo;
5619 int i;
5620
d3480615
GP
5621 if (be_physfn(adapter) &&
5622 MODULO(adapter->work_counter, adapter->be_get_temp_freq) == 0)
5623 be_cmd_get_die_temperature(adapter);
5624
78fad34e
SP
5625 /* when interrupts are not yet enabled, just reap any pending
5626 * mcc completions
5627 */
5628 if (!netif_running(adapter->netdev)) {
5629 local_bh_disable();
5630 be_process_mcc(adapter);
5631 local_bh_enable();
5632 goto reschedule;
5633 }
5634
5635 if (!adapter->stats_cmd_sent) {
5636 if (lancer_chip(adapter))
5637 lancer_cmd_get_pport_stats(adapter,
5638 &adapter->stats_cmd);
5639 else
5640 be_cmd_get_stats(adapter, &adapter->stats_cmd);
5641 }
5642
78fad34e
SP
5643 for_all_rx_queues(adapter, rxo, i) {
5644 /* Replenish RX-queues starved due to memory
5645 * allocation failures.
5646 */
5647 if (rxo->rx_post_starved)
5648 be_post_rx_frags(rxo, GFP_KERNEL, MAX_RX_POST);
5649 }
5650
20947770
PR
5651 /* EQ-delay update for Skyhawk is done while notifying EQ */
5652 if (!skyhawk_chip(adapter))
5653 be_eqd_update(adapter, false);
78fad34e 5654
51d1f98a 5655 if (adapter->flags & BE_FLAGS_PHY_MISCONFIGURED)
78fad34e
SP
5656 be_log_sfp_info(adapter);
5657
5658reschedule:
5659 adapter->work_counter++;
b7172414 5660 queue_delayed_work(be_wq, &adapter->work, msecs_to_jiffies(1000));
78fad34e
SP
5661}
5662
6b7c5b94
SP
5663static void be_unmap_pci_bars(struct be_adapter *adapter)
5664{
c5b3ad4c
SP
5665 if (adapter->csr)
5666 pci_iounmap(adapter->pdev, adapter->csr);
8788fdc2 5667 if (adapter->db)
ce66f781 5668 pci_iounmap(adapter->pdev, adapter->db);
a69bf3c5
DM
5669 if (adapter->pcicfg && adapter->pcicfg_mapped)
5670 pci_iounmap(adapter->pdev, adapter->pcicfg);
045508a8
PP
5671}
5672
ce66f781
SP
5673static int db_bar(struct be_adapter *adapter)
5674{
18c57c74 5675 if (lancer_chip(adapter) || be_virtfn(adapter))
ce66f781
SP
5676 return 0;
5677 else
5678 return 4;
5679}
5680
5681static int be_roce_map_pci_bars(struct be_adapter *adapter)
045508a8 5682{
dbf0f2a7 5683 if (skyhawk_chip(adapter)) {
ce66f781
SP
5684 adapter->roce_db.size = 4096;
5685 adapter->roce_db.io_addr = pci_resource_start(adapter->pdev,
5686 db_bar(adapter));
5687 adapter->roce_db.total_size = pci_resource_len(adapter->pdev,
5688 db_bar(adapter));
5689 }
045508a8 5690 return 0;
6b7c5b94
SP
5691}
5692
5693static int be_map_pci_bars(struct be_adapter *adapter)
5694{
0fa74a4b 5695 struct pci_dev *pdev = adapter->pdev;
6b7c5b94 5696 u8 __iomem *addr;
78fad34e
SP
5697 u32 sli_intf;
5698
5699 pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
5700 adapter->sli_family = (sli_intf & SLI_INTF_FAMILY_MASK) >>
5701 SLI_INTF_FAMILY_SHIFT;
5702 adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
fe6d2a38 5703
c5b3ad4c 5704 if (BEx_chip(adapter) && be_physfn(adapter)) {
0fa74a4b 5705 adapter->csr = pci_iomap(pdev, 2, 0);
ddf1169f 5706 if (!adapter->csr)
c5b3ad4c
SP
5707 return -ENOMEM;
5708 }
5709
25848c90 5710 addr = pci_iomap(pdev, db_bar(adapter), 0);
ddf1169f 5711 if (!addr)
6b7c5b94 5712 goto pci_map_err;
ba343c77 5713 adapter->db = addr;
ce66f781 5714
25848c90
SR
5715 if (skyhawk_chip(adapter) || BEx_chip(adapter)) {
5716 if (be_physfn(adapter)) {
5717 /* PCICFG is the 2nd BAR in BE2 */
5718 addr = pci_iomap(pdev, BE2_chip(adapter) ? 1 : 0, 0);
5719 if (!addr)
5720 goto pci_map_err;
5721 adapter->pcicfg = addr;
a69bf3c5 5722 adapter->pcicfg_mapped = true;
25848c90
SR
5723 } else {
5724 adapter->pcicfg = adapter->db + SRIOV_VF_PCICFG_OFFSET;
a69bf3c5 5725 adapter->pcicfg_mapped = false;
25848c90
SR
5726 }
5727 }
5728
ce66f781 5729 be_roce_map_pci_bars(adapter);
6b7c5b94 5730 return 0;
ce66f781 5731
6b7c5b94 5732pci_map_err:
25848c90 5733 dev_err(&pdev->dev, "Error in mapping PCI BARs\n");
6b7c5b94
SP
5734 be_unmap_pci_bars(adapter);
5735 return -ENOMEM;
5736}
5737
78fad34e 5738static void be_drv_cleanup(struct be_adapter *adapter)
6b7c5b94 5739{
8788fdc2 5740 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
78fad34e 5741 struct device *dev = &adapter->pdev->dev;
6b7c5b94
SP
5742
5743 if (mem->va)
78fad34e 5744 dma_free_coherent(dev, mem->size, mem->va, mem->dma);
e7b909a6 5745
5b8821b7 5746 mem = &adapter->rx_filter;
e7b909a6 5747 if (mem->va)
78fad34e
SP
5748 dma_free_coherent(dev, mem->size, mem->va, mem->dma);
5749
5750 mem = &adapter->stats_cmd;
5751 if (mem->va)
5752 dma_free_coherent(dev, mem->size, mem->va, mem->dma);
6b7c5b94
SP
5753}
5754
78fad34e
SP
5755/* Allocate and initialize various fields in be_adapter struct */
5756static int be_drv_init(struct be_adapter *adapter)
6b7c5b94 5757{
8788fdc2
SP
5758 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
5759 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
5b8821b7 5760 struct be_dma_mem *rx_filter = &adapter->rx_filter;
78fad34e
SP
5761 struct be_dma_mem *stats_cmd = &adapter->stats_cmd;
5762 struct device *dev = &adapter->pdev->dev;
5763 int status = 0;
6b7c5b94
SP
5764
5765 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
e51000db
SB
5766 mbox_mem_alloc->va = dma_zalloc_coherent(dev, mbox_mem_alloc->size,
5767 &mbox_mem_alloc->dma,
5768 GFP_KERNEL);
78fad34e
SP
5769 if (!mbox_mem_alloc->va)
5770 return -ENOMEM;
5771
6b7c5b94
SP
5772 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
5773 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
5774 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
e7b909a6 5775
5b8821b7 5776 rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
78fad34e
SP
5777 rx_filter->va = dma_zalloc_coherent(dev, rx_filter->size,
5778 &rx_filter->dma, GFP_KERNEL);
ddf1169f 5779 if (!rx_filter->va) {
e7b909a6
SP
5780 status = -ENOMEM;
5781 goto free_mbox;
5782 }
1f9061d2 5783
78fad34e
SP
5784 if (lancer_chip(adapter))
5785 stats_cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
5786 else if (BE2_chip(adapter))
5787 stats_cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
5788 else if (BE3_chip(adapter))
5789 stats_cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
5790 else
5791 stats_cmd->size = sizeof(struct be_cmd_req_get_stats_v2);
5792 stats_cmd->va = dma_zalloc_coherent(dev, stats_cmd->size,
5793 &stats_cmd->dma, GFP_KERNEL);
5794 if (!stats_cmd->va) {
5795 status = -ENOMEM;
5796 goto free_rx_filter;
5797 }
5798
2984961c 5799 mutex_init(&adapter->mbox_lock);
b7172414
SP
5800 mutex_init(&adapter->mcc_lock);
5801 mutex_init(&adapter->rx_filter_lock);
8788fdc2 5802 spin_lock_init(&adapter->mcc_cq_lock);
5eeff635 5803 init_completion(&adapter->et_cmd_compl);
e7b909a6 5804
78fad34e 5805 pci_save_state(adapter->pdev);
6b7c5b94 5806
78fad34e 5807 INIT_DELAYED_WORK(&adapter->work, be_worker);
710f3e59
SB
5808
5809 adapter->error_recovery.recovery_state = ERR_RECOVERY_ST_NONE;
5810 adapter->error_recovery.resched_delay = 0;
5811 INIT_DELAYED_WORK(&adapter->error_recovery.err_detection_work,
eb7dd46c 5812 be_err_detection_task);
6b7c5b94 5813
78fad34e
SP
5814 adapter->rx_fc = true;
5815 adapter->tx_fc = true;
6b7c5b94 5816
78fad34e
SP
5817 /* Must be a power of 2 or else MODULO will BUG_ON */
5818 adapter->be_get_temp_freq = 64;
ca34fe38 5819
bf8d9dfb 5820 INIT_LIST_HEAD(&adapter->vxlan_port_list);
6b7c5b94 5821 return 0;
78fad34e
SP
5822
5823free_rx_filter:
5824 dma_free_coherent(dev, rx_filter->size, rx_filter->va, rx_filter->dma);
5825free_mbox:
5826 dma_free_coherent(dev, mbox_mem_alloc->size, mbox_mem_alloc->va,
5827 mbox_mem_alloc->dma);
5828 return status;
6b7c5b94
SP
5829}
5830
3bc6b06c 5831static void be_remove(struct pci_dev *pdev)
6b7c5b94
SP
5832{
5833 struct be_adapter *adapter = pci_get_drvdata(pdev);
8d56ff11 5834
6b7c5b94
SP
5835 if (!adapter)
5836 return;
5837
045508a8 5838 be_roce_dev_remove(adapter);
8cef7a78 5839 be_intr_set(adapter, false);
045508a8 5840
eb7dd46c 5841 be_cancel_err_detection(adapter);
f67ef7ba 5842
6b7c5b94
SP
5843 unregister_netdev(adapter->netdev);
5844
5fb379ee
SP
5845 be_clear(adapter);
5846
f72099e0
SK
5847 if (!pci_vfs_assigned(adapter->pdev))
5848 be_cmd_reset_function(adapter);
5849
bf99e50d
PR
5850 /* tell fw we're done with firing cmds */
5851 be_cmd_fw_clean(adapter);
5852
78fad34e
SP
5853 be_unmap_pci_bars(adapter);
5854 be_drv_cleanup(adapter);
6b7c5b94 5855
d6b6d987
SP
5856 pci_disable_pcie_error_reporting(pdev);
5857
6b7c5b94
SP
5858 pci_release_regions(pdev);
5859 pci_disable_device(pdev);
5860
5861 free_netdev(adapter->netdev);
5862}
5863
9a03259c
AB
5864static ssize_t be_hwmon_show_temp(struct device *dev,
5865 struct device_attribute *dev_attr,
5866 char *buf)
29e9122b
VD
5867{
5868 struct be_adapter *adapter = dev_get_drvdata(dev);
5869
5870 /* Unit: millidegree Celsius */
5871 if (adapter->hwmon_info.be_on_die_temp == BE_INVALID_DIE_TEMP)
5872 return -EIO;
5873 else
5874 return sprintf(buf, "%u\n",
5875 adapter->hwmon_info.be_on_die_temp * 1000);
5876}
5877
d3757ba4 5878static SENSOR_DEVICE_ATTR(temp1_input, 0444,
29e9122b
VD
5879 be_hwmon_show_temp, NULL, 1);
5880
5881static struct attribute *be_hwmon_attrs[] = {
5882 &sensor_dev_attr_temp1_input.dev_attr.attr,
5883 NULL
5884};
5885
5886ATTRIBUTE_GROUPS(be_hwmon);
5887
d379142b
SP
5888static char *mc_name(struct be_adapter *adapter)
5889{
f93f160b
VV
5890 char *str = ""; /* default */
5891
5892 switch (adapter->mc_type) {
5893 case UMC:
5894 str = "UMC";
5895 break;
5896 case FLEX10:
5897 str = "FLEX10";
5898 break;
5899 case vNIC1:
5900 str = "vNIC-1";
5901 break;
5902 case nPAR:
5903 str = "nPAR";
5904 break;
5905 case UFP:
5906 str = "UFP";
5907 break;
5908 case vNIC2:
5909 str = "vNIC-2";
5910 break;
5911 default:
5912 str = "";
5913 }
5914
5915 return str;
d379142b
SP
5916}
5917
5918static inline char *func_name(struct be_adapter *adapter)
5919{
5920 return be_physfn(adapter) ? "PF" : "VF";
5921}
5922
f7062ee5
SP
5923static inline char *nic_name(struct pci_dev *pdev)
5924{
5925 switch (pdev->device) {
5926 case OC_DEVICE_ID1:
5927 return OC_NAME;
5928 case OC_DEVICE_ID2:
5929 return OC_NAME_BE;
5930 case OC_DEVICE_ID3:
5931 case OC_DEVICE_ID4:
5932 return OC_NAME_LANCER;
5933 case BE_DEVICE_ID2:
5934 return BE3_NAME;
5935 case OC_DEVICE_ID5:
5936 case OC_DEVICE_ID6:
5937 return OC_NAME_SH;
5938 default:
5939 return BE_NAME;
5940 }
5941}
5942
1dd06ae8 5943static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id)
6b7c5b94 5944{
6b7c5b94
SP
5945 struct be_adapter *adapter;
5946 struct net_device *netdev;
21252377 5947 int status = 0;
6b7c5b94 5948
acbafeb1
SP
5949 dev_info(&pdev->dev, "%s version is %s\n", DRV_NAME, DRV_VER);
5950
6b7c5b94
SP
5951 status = pci_enable_device(pdev);
5952 if (status)
5953 goto do_none;
5954
5955 status = pci_request_regions(pdev, DRV_NAME);
5956 if (status)
5957 goto disable_dev;
5958 pci_set_master(pdev);
5959
7f640062 5960 netdev = alloc_etherdev_mqs(sizeof(*adapter), MAX_TX_QS, MAX_RX_QS);
ddf1169f 5961 if (!netdev) {
6b7c5b94
SP
5962 status = -ENOMEM;
5963 goto rel_reg;
5964 }
5965 adapter = netdev_priv(netdev);
5966 adapter->pdev = pdev;
5967 pci_set_drvdata(pdev, adapter);
5968 adapter->netdev = netdev;
2243e2e9 5969 SET_NETDEV_DEV(netdev, &pdev->dev);
6b7c5b94 5970
4c15c243 5971 status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
6b7c5b94
SP
5972 if (!status) {
5973 netdev->features |= NETIF_F_HIGHDMA;
5974 } else {
4c15c243 5975 status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
6b7c5b94
SP
5976 if (status) {
5977 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
5978 goto free_netdev;
5979 }
5980 }
5981
2f951a9a
KA
5982 status = pci_enable_pcie_error_reporting(pdev);
5983 if (!status)
5984 dev_info(&pdev->dev, "PCIe error reporting enabled\n");
d6b6d987 5985
78fad34e 5986 status = be_map_pci_bars(adapter);
6b7c5b94 5987 if (status)
39f1d94d 5988 goto free_netdev;
6b7c5b94 5989
78fad34e
SP
5990 status = be_drv_init(adapter);
5991 if (status)
5992 goto unmap_bars;
5993
5fb379ee
SP
5994 status = be_setup(adapter);
5995 if (status)
78fad34e 5996 goto drv_cleanup;
2243e2e9 5997
3abcdeda 5998 be_netdev_init(netdev);
6b7c5b94
SP
5999 status = register_netdev(netdev);
6000 if (status != 0)
5fb379ee 6001 goto unsetup;
6b7c5b94 6002
045508a8
PP
6003 be_roce_dev_add(adapter);
6004
972f37b4 6005 be_schedule_err_detection(adapter, ERR_DETECTION_DELAY);
710f3e59 6006 adapter->error_recovery.probe_time = jiffies;
b4e32a71 6007
29e9122b 6008 /* On Die temperature not supported for VF. */
9a03259c 6009 if (be_physfn(adapter) && IS_ENABLED(CONFIG_BE2NET_HWMON)) {
29e9122b
VD
6010 adapter->hwmon_info.hwmon_dev =
6011 devm_hwmon_device_register_with_groups(&pdev->dev,
6012 DRV_NAME,
6013 adapter,
6014 be_hwmon_groups);
6015 adapter->hwmon_info.be_on_die_temp = BE_INVALID_DIE_TEMP;
6016 }
6017
d379142b 6018 dev_info(&pdev->dev, "%s: %s %s port %c\n", nic_name(pdev),
21252377 6019 func_name(adapter), mc_name(adapter), adapter->port_name);
34b1ef04 6020
6b7c5b94
SP
6021 return 0;
6022
5fb379ee
SP
6023unsetup:
6024 be_clear(adapter);
78fad34e
SP
6025drv_cleanup:
6026 be_drv_cleanup(adapter);
6027unmap_bars:
6028 be_unmap_pci_bars(adapter);
f9449ab7 6029free_netdev:
fe6d2a38 6030 free_netdev(netdev);
6b7c5b94
SP
6031rel_reg:
6032 pci_release_regions(pdev);
6033disable_dev:
6034 pci_disable_device(pdev);
6035do_none:
c4ca2374 6036 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
6b7c5b94
SP
6037 return status;
6038}
6039
6040static int be_suspend(struct pci_dev *pdev, pm_message_t state)
6041{
6042 struct be_adapter *adapter = pci_get_drvdata(pdev);
6b7c5b94 6043
d4360d6f 6044 be_intr_set(adapter, false);
eb7dd46c 6045 be_cancel_err_detection(adapter);
f67ef7ba 6046
87ac1a52 6047 be_cleanup(adapter);
6b7c5b94
SP
6048
6049 pci_save_state(pdev);
6050 pci_disable_device(pdev);
6051 pci_set_power_state(pdev, pci_choose_state(pdev, state));
6052 return 0;
6053}
6054
484d76fd 6055static int be_pci_resume(struct pci_dev *pdev)
6b7c5b94 6056{
6b7c5b94 6057 struct be_adapter *adapter = pci_get_drvdata(pdev);
484d76fd 6058 int status = 0;
6b7c5b94
SP
6059
6060 status = pci_enable_device(pdev);
6061 if (status)
6062 return status;
6063
6b7c5b94
SP
6064 pci_restore_state(pdev);
6065
484d76fd 6066 status = be_resume(adapter);
2243e2e9
SP
6067 if (status)
6068 return status;
6069
972f37b4 6070 be_schedule_err_detection(adapter, ERR_DETECTION_DELAY);
eb7dd46c 6071
6b7c5b94
SP
6072 return 0;
6073}
6074
82456b03
SP
6075/*
6076 * An FLR will stop BE from DMAing any data.
6077 */
6078static void be_shutdown(struct pci_dev *pdev)
6079{
6080 struct be_adapter *adapter = pci_get_drvdata(pdev);
82456b03 6081
2d5d4154
AK
6082 if (!adapter)
6083 return;
82456b03 6084
d114f99a 6085 be_roce_dev_shutdown(adapter);
0f4a6828 6086 cancel_delayed_work_sync(&adapter->work);
eb7dd46c 6087 be_cancel_err_detection(adapter);
a4ca055f 6088
2d5d4154 6089 netif_device_detach(adapter->netdev);
82456b03 6090
57841869
AK
6091 be_cmd_reset_function(adapter);
6092
82456b03 6093 pci_disable_device(pdev);
82456b03
SP
6094}
6095
cf588477 6096static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
748b539a 6097 pci_channel_state_t state)
cf588477
SP
6098{
6099 struct be_adapter *adapter = pci_get_drvdata(pdev);
cf588477
SP
6100
6101 dev_err(&adapter->pdev->dev, "EEH error detected\n");
6102
68f22793
PR
6103 be_roce_dev_remove(adapter);
6104
954f6825
VD
6105 if (!be_check_error(adapter, BE_ERROR_EEH)) {
6106 be_set_error(adapter, BE_ERROR_EEH);
cf588477 6107
eb7dd46c 6108 be_cancel_err_detection(adapter);
cf588477 6109
87ac1a52 6110 be_cleanup(adapter);
cf588477 6111 }
cf588477
SP
6112
6113 if (state == pci_channel_io_perm_failure)
6114 return PCI_ERS_RESULT_DISCONNECT;
6115
6116 pci_disable_device(pdev);
6117
eeb7fc7b
SK
6118 /* The error could cause the FW to trigger a flash debug dump.
6119 * Resetting the card while flash dump is in progress
c8a54163
PR
6120 * can cause it not to recover; wait for it to finish.
6121 * Wait only for first function as it is needed only once per
6122 * adapter.
eeb7fc7b 6123 */
c8a54163
PR
6124 if (pdev->devfn == 0)
6125 ssleep(30);
6126
cf588477
SP
6127 return PCI_ERS_RESULT_NEED_RESET;
6128}
6129
6130static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
6131{
6132 struct be_adapter *adapter = pci_get_drvdata(pdev);
6133 int status;
6134
6135 dev_info(&adapter->pdev->dev, "EEH reset\n");
cf588477
SP
6136
6137 status = pci_enable_device(pdev);
6138 if (status)
6139 return PCI_ERS_RESULT_DISCONNECT;
6140
6141 pci_set_master(pdev);
cf588477
SP
6142 pci_restore_state(pdev);
6143
6144 /* Check if card is ok and fw is ready */
c5b3ad4c
SP
6145 dev_info(&adapter->pdev->dev,
6146 "Waiting for FW to be ready after EEH reset\n");
bf99e50d 6147 status = be_fw_wait_ready(adapter);
cf588477
SP
6148 if (status)
6149 return PCI_ERS_RESULT_DISCONNECT;
6150
d6b6d987 6151 pci_cleanup_aer_uncorrect_error_status(pdev);
954f6825 6152 be_clear_error(adapter, BE_CLEAR_ALL);
cf588477
SP
6153 return PCI_ERS_RESULT_RECOVERED;
6154}
6155
6156static void be_eeh_resume(struct pci_dev *pdev)
6157{
6158 int status = 0;
6159 struct be_adapter *adapter = pci_get_drvdata(pdev);
cf588477
SP
6160
6161 dev_info(&adapter->pdev->dev, "EEH resume\n");
6162
6163 pci_save_state(pdev);
6164
484d76fd 6165 status = be_resume(adapter);
bf99e50d
PR
6166 if (status)
6167 goto err;
6168
68f22793
PR
6169 be_roce_dev_add(adapter);
6170
972f37b4 6171 be_schedule_err_detection(adapter, ERR_DETECTION_DELAY);
cf588477
SP
6172 return;
6173err:
6174 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
cf588477
SP
6175}
6176
ace40aff
VV
6177static int be_pci_sriov_configure(struct pci_dev *pdev, int num_vfs)
6178{
6179 struct be_adapter *adapter = pci_get_drvdata(pdev);
b9263cbf 6180 struct be_resources vft_res = {0};
ace40aff
VV
6181 int status;
6182
6183 if (!num_vfs)
6184 be_vf_clear(adapter);
6185
6186 adapter->num_vfs = num_vfs;
6187
6188 if (adapter->num_vfs == 0 && pci_vfs_assigned(pdev)) {
6189 dev_warn(&pdev->dev,
6190 "Cannot disable VFs while they are assigned\n");
6191 return -EBUSY;
6192 }
6193
6194 /* When the HW is in SRIOV capable configuration, the PF-pool resources
6195 * are equally distributed across the max-number of VFs. The user may
6196 * request only a subset of the max-vfs to be enabled.
6197 * Based on num_vfs, redistribute the resources across num_vfs so that
6198 * each VF will have access to more number of resources.
6199 * This facility is not available in BE3 FW.
6200 * Also, this is done by FW in Lancer chip.
6201 */
6202 if (skyhawk_chip(adapter) && !pci_num_vf(pdev)) {
b9263cbf
SR
6203 be_calculate_vf_res(adapter, adapter->num_vfs,
6204 &vft_res);
ace40aff 6205 status = be_cmd_set_sriov_config(adapter, adapter->pool_res,
b9263cbf 6206 adapter->num_vfs, &vft_res);
ace40aff
VV
6207 if (status)
6208 dev_err(&pdev->dev,
6209 "Failed to optimize SR-IOV resources\n");
6210 }
6211
6212 status = be_get_resources(adapter);
6213 if (status)
6214 return be_cmd_status(status);
6215
6216 /* Updating real_num_tx/rx_queues() requires rtnl_lock() */
6217 rtnl_lock();
6218 status = be_update_queues(adapter);
6219 rtnl_unlock();
6220 if (status)
6221 return be_cmd_status(status);
6222
6223 if (adapter->num_vfs)
6224 status = be_vf_setup(adapter);
6225
6226 if (!status)
6227 return adapter->num_vfs;
6228
6229 return 0;
6230}
6231
3646f0e5 6232static const struct pci_error_handlers be_eeh_handlers = {
cf588477
SP
6233 .error_detected = be_eeh_err_detected,
6234 .slot_reset = be_eeh_reset,
6235 .resume = be_eeh_resume,
6236};
6237
6b7c5b94
SP
6238static struct pci_driver be_driver = {
6239 .name = DRV_NAME,
6240 .id_table = be_dev_ids,
6241 .probe = be_probe,
6242 .remove = be_remove,
6243 .suspend = be_suspend,
484d76fd 6244 .resume = be_pci_resume,
82456b03 6245 .shutdown = be_shutdown,
ace40aff 6246 .sriov_configure = be_pci_sriov_configure,
cf588477 6247 .err_handler = &be_eeh_handlers
6b7c5b94
SP
6248};
6249
6250static int __init be_init_module(void)
6251{
710f3e59
SB
6252 int status;
6253
8e95a202
JP
6254 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
6255 rx_frag_size != 2048) {
6b7c5b94
SP
6256 printk(KERN_WARNING DRV_NAME
6257 " : Module param rx_frag_size must be 2048/4096/8192."
6258 " Using 2048\n");
6259 rx_frag_size = 2048;
6260 }
6b7c5b94 6261
ace40aff
VV
6262 if (num_vfs > 0) {
6263 pr_info(DRV_NAME " : Module param num_vfs is obsolete.");
6264 pr_info(DRV_NAME " : Use sysfs method to enable VFs\n");
6265 }
6266
b7172414
SP
6267 be_wq = create_singlethread_workqueue("be_wq");
6268 if (!be_wq) {
6269 pr_warn(DRV_NAME "workqueue creation failed\n");
6270 return -1;
6271 }
6272
710f3e59
SB
6273 be_err_recovery_workq =
6274 create_singlethread_workqueue("be_err_recover");
6275 if (!be_err_recovery_workq)
6276 pr_warn(DRV_NAME "Could not create error recovery workqueue\n");
6277
6278 status = pci_register_driver(&be_driver);
6279 if (status) {
6280 destroy_workqueue(be_wq);
6281 be_destroy_err_recovery_workq();
6282 }
6283 return status;
6b7c5b94
SP
6284}
6285module_init(be_init_module);
6286
6287static void __exit be_exit_module(void)
6288{
6289 pci_unregister_driver(&be_driver);
b7172414 6290
710f3e59
SB
6291 be_destroy_err_recovery_workq();
6292
b7172414
SP
6293 if (be_wq)
6294 destroy_workqueue(be_wq);
6b7c5b94
SP
6295}
6296module_exit(be_exit_module);