be2net: support asymmetric rx/tx queue counts
[linux-2.6-block.git] / drivers / net / ethernet / emulex / benet / be_main.c
CommitLineData
6b7c5b94 1/*
d19261b8 2 * Copyright (C) 2005 - 2015 Emulex
6b7c5b94
SP
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
d2145cde 11 * linux-drivers@emulex.com
6b7c5b94 12 *
d2145cde
AK
13 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
6b7c5b94
SP
16 */
17
70c71606 18#include <linux/prefetch.h>
9d9779e7 19#include <linux/module.h>
6b7c5b94 20#include "be.h"
8788fdc2 21#include "be_cmds.h"
65f71b8b 22#include <asm/div64.h>
d6b6d987 23#include <linux/aer.h>
a77dcb8c 24#include <linux/if_bridge.h>
6384a4d0 25#include <net/busy_poll.h>
c9c47142 26#include <net/vxlan.h>
6b7c5b94
SP
27
28MODULE_VERSION(DRV_VER);
6b7c5b94 29MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
00d3d51e 30MODULE_AUTHOR("Emulex Corporation");
6b7c5b94
SP
31MODULE_LICENSE("GPL");
32
ace40aff
VV
33/* num_vfs module param is obsolete.
34 * Use sysfs method to enable/disable VFs.
35 */
ba343c77 36static unsigned int num_vfs;
ba343c77 37module_param(num_vfs, uint, S_IRUGO);
ba343c77 38MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
6b7c5b94 39
11ac75ed
SP
40static ushort rx_frag_size = 2048;
41module_param(rx_frag_size, ushort, S_IRUGO);
42MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
43
9baa3c34 44static const struct pci_device_id be_dev_ids[] = {
c4ca2374 45 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
59fd5d87 46 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
c4ca2374
AK
47 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
48 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
fe6d2a38 49 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
12f4d0a8 50 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
ecedb6ae 51 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
76b73530 52 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID6)},
6b7c5b94
SP
53 { 0 }
54};
55MODULE_DEVICE_TABLE(pci, be_dev_ids);
7c185276 56/* UE Status Low CSR */
42c8b11e 57static const char * const ue_status_low_desc[] = {
7c185276
AK
58 "CEV",
59 "CTX",
60 "DBUF",
61 "ERX",
62 "Host",
63 "MPU",
64 "NDMA",
65 "PTC ",
66 "RDMA ",
67 "RXF ",
68 "RXIPS ",
69 "RXULP0 ",
70 "RXULP1 ",
71 "RXULP2 ",
72 "TIM ",
73 "TPOST ",
74 "TPRE ",
75 "TXIPS ",
76 "TXULP0 ",
77 "TXULP1 ",
78 "UC ",
79 "WDMA ",
80 "TXULP2 ",
81 "HOST1 ",
82 "P0_OB_LINK ",
83 "P1_OB_LINK ",
84 "HOST_GPIO ",
85 "MBOX ",
6bdf8f55
VV
86 "ERX2 ",
87 "SPARE ",
88 "JTAG ",
89 "MPU_INTPEND "
7c185276 90};
e2fb1afa 91
7c185276 92/* UE Status High CSR */
42c8b11e 93static const char * const ue_status_hi_desc[] = {
7c185276
AK
94 "LPCMEMHOST",
95 "MGMT_MAC",
96 "PCS0ONLINE",
97 "MPU_IRAM",
98 "PCS1ONLINE",
99 "PCTL0",
100 "PCTL1",
101 "PMEM",
102 "RR",
103 "TXPB",
104 "RXPP",
105 "XAUI",
106 "TXP",
107 "ARM",
108 "IPC",
109 "HOST2",
110 "HOST3",
111 "HOST4",
112 "HOST5",
113 "HOST6",
114 "HOST7",
6bdf8f55
VV
115 "ECRC",
116 "Poison TLP",
42c8b11e 117 "NETC",
6bdf8f55
VV
118 "PERIPH",
119 "LLTXULP",
120 "D2P",
121 "RCON",
122 "LDMA",
123 "LLTXP",
124 "LLTXPB",
7c185276
AK
125 "Unknown"
126};
6b7c5b94 127
c1bb0a55
VD
128#define BE_VF_IF_EN_FLAGS (BE_IF_FLAGS_UNTAGGED | \
129 BE_IF_FLAGS_BROADCAST | \
130 BE_IF_FLAGS_MULTICAST | \
131 BE_IF_FLAGS_PASS_L3L4_ERRORS)
132
6b7c5b94
SP
133static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
134{
135 struct be_dma_mem *mem = &q->dma_mem;
03d28ffe 136
1cfafab9 137 if (mem->va) {
2b7bcebf
IV
138 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
139 mem->dma);
1cfafab9
SP
140 mem->va = NULL;
141 }
6b7c5b94
SP
142}
143
144static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
748b539a 145 u16 len, u16 entry_size)
6b7c5b94
SP
146{
147 struct be_dma_mem *mem = &q->dma_mem;
148
149 memset(q, 0, sizeof(*q));
150 q->len = len;
151 q->entry_size = entry_size;
152 mem->size = len * entry_size;
ede23fa8
JP
153 mem->va = dma_zalloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
154 GFP_KERNEL);
6b7c5b94 155 if (!mem->va)
10ef9ab4 156 return -ENOMEM;
6b7c5b94
SP
157 return 0;
158}
159
68c45a2d 160static void be_reg_intr_set(struct be_adapter *adapter, bool enable)
6b7c5b94 161{
db3ea781 162 u32 reg, enabled;
5f0b849e 163
db3ea781 164 pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
748b539a 165 &reg);
db3ea781
SP
166 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
167
5f0b849e 168 if (!enabled && enable)
6b7c5b94 169 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
5f0b849e 170 else if (enabled && !enable)
6b7c5b94 171 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
5f0b849e 172 else
6b7c5b94 173 return;
5f0b849e 174
db3ea781 175 pci_write_config_dword(adapter->pdev,
748b539a 176 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
6b7c5b94
SP
177}
178
68c45a2d
SK
179static void be_intr_set(struct be_adapter *adapter, bool enable)
180{
181 int status = 0;
182
183 /* On lancer interrupts can't be controlled via this register */
184 if (lancer_chip(adapter))
185 return;
186
954f6825 187 if (be_check_error(adapter, BE_ERROR_EEH))
68c45a2d
SK
188 return;
189
190 status = be_cmd_intr_set(adapter, enable);
191 if (status)
192 be_reg_intr_set(adapter, enable);
193}
194
8788fdc2 195static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
6b7c5b94
SP
196{
197 u32 val = 0;
03d28ffe 198
954f6825
VD
199 if (be_check_error(adapter, BE_ERROR_HW))
200 return;
201
6b7c5b94
SP
202 val |= qid & DB_RQ_RING_ID_MASK;
203 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
f3eb62d2
SP
204
205 wmb();
8788fdc2 206 iowrite32(val, adapter->db + DB_RQ_OFFSET);
6b7c5b94
SP
207}
208
94d73aaa
VV
209static void be_txq_notify(struct be_adapter *adapter, struct be_tx_obj *txo,
210 u16 posted)
6b7c5b94
SP
211{
212 u32 val = 0;
03d28ffe 213
954f6825
VD
214 if (be_check_error(adapter, BE_ERROR_HW))
215 return;
216
94d73aaa 217 val |= txo->q.id & DB_TXULP_RING_ID_MASK;
6b7c5b94 218 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
f3eb62d2
SP
219
220 wmb();
94d73aaa 221 iowrite32(val, adapter->db + txo->db_offset);
6b7c5b94
SP
222}
223
8788fdc2 224static void be_eq_notify(struct be_adapter *adapter, u16 qid,
20947770
PR
225 bool arm, bool clear_int, u16 num_popped,
226 u32 eq_delay_mult_enc)
6b7c5b94
SP
227{
228 u32 val = 0;
03d28ffe 229
6b7c5b94 230 val |= qid & DB_EQ_RING_ID_MASK;
748b539a 231 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) << DB_EQ_RING_ID_EXT_MASK_SHIFT);
cf588477 232
954f6825 233 if (be_check_error(adapter, BE_ERROR_HW))
cf588477
SP
234 return;
235
6b7c5b94
SP
236 if (arm)
237 val |= 1 << DB_EQ_REARM_SHIFT;
238 if (clear_int)
239 val |= 1 << DB_EQ_CLR_SHIFT;
240 val |= 1 << DB_EQ_EVNT_SHIFT;
241 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
20947770 242 val |= eq_delay_mult_enc << DB_EQ_R2I_DLY_SHIFT;
8788fdc2 243 iowrite32(val, adapter->db + DB_EQ_OFFSET);
6b7c5b94
SP
244}
245
8788fdc2 246void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
6b7c5b94
SP
247{
248 u32 val = 0;
03d28ffe 249
6b7c5b94 250 val |= qid & DB_CQ_RING_ID_MASK;
fe6d2a38
SP
251 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
252 DB_CQ_RING_ID_EXT_MASK_SHIFT);
cf588477 253
954f6825 254 if (be_check_error(adapter, BE_ERROR_HW))
cf588477
SP
255 return;
256
6b7c5b94
SP
257 if (arm)
258 val |= 1 << DB_CQ_REARM_SHIFT;
259 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
8788fdc2 260 iowrite32(val, adapter->db + DB_CQ_OFFSET);
6b7c5b94
SP
261}
262
6b7c5b94
SP
263static int be_mac_addr_set(struct net_device *netdev, void *p)
264{
265 struct be_adapter *adapter = netdev_priv(netdev);
5a712c13 266 struct device *dev = &adapter->pdev->dev;
6b7c5b94 267 struct sockaddr *addr = p;
5a712c13
SP
268 int status;
269 u8 mac[ETH_ALEN];
270 u32 old_pmac_id = adapter->pmac_id[0], curr_pmac_id = 0;
6b7c5b94 271
ca9e4988
AK
272 if (!is_valid_ether_addr(addr->sa_data))
273 return -EADDRNOTAVAIL;
274
ff32f8ab
VV
275 /* Proceed further only if, User provided MAC is different
276 * from active MAC
277 */
278 if (ether_addr_equal(addr->sa_data, netdev->dev_addr))
279 return 0;
280
bcc84140
KA
281 /* if device is not running, copy MAC to netdev->dev_addr */
282 if (!netif_running(netdev))
283 goto done;
284
5a712c13
SP
285 /* The PMAC_ADD cmd may fail if the VF doesn't have FILTMGMT
286 * privilege or if PF did not provision the new MAC address.
287 * On BE3, this cmd will always fail if the VF doesn't have the
288 * FILTMGMT privilege. This failure is OK, only if the PF programmed
289 * the MAC for the VF.
704e4c88 290 */
5a712c13
SP
291 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
292 adapter->if_handle, &adapter->pmac_id[0], 0);
293 if (!status) {
294 curr_pmac_id = adapter->pmac_id[0];
295
296 /* Delete the old programmed MAC. This call may fail if the
297 * old MAC was already deleted by the PF driver.
298 */
299 if (adapter->pmac_id[0] != old_pmac_id)
300 be_cmd_pmac_del(adapter, adapter->if_handle,
301 old_pmac_id, 0);
704e4c88
PR
302 }
303
5a712c13
SP
304 /* Decide if the new MAC is successfully activated only after
305 * querying the FW
704e4c88 306 */
b188f090
SR
307 status = be_cmd_get_active_mac(adapter, curr_pmac_id, mac,
308 adapter->if_handle, true, 0);
a65027e4 309 if (status)
e3a7ae2c 310 goto err;
6b7c5b94 311
5a712c13
SP
312 /* The MAC change did not happen, either due to lack of privilege
313 * or PF didn't pre-provision.
314 */
61d23e9f 315 if (!ether_addr_equal(addr->sa_data, mac)) {
5a712c13
SP
316 status = -EPERM;
317 goto err;
318 }
bcc84140
KA
319done:
320 ether_addr_copy(netdev->dev_addr, addr->sa_data);
321 dev_info(dev, "MAC address changed to %pM\n", addr->sa_data);
e3a7ae2c
SK
322 return 0;
323err:
5a712c13 324 dev_warn(dev, "MAC address change to %pM failed\n", addr->sa_data);
6b7c5b94
SP
325 return status;
326}
327
ca34fe38
SP
328/* BE2 supports only v0 cmd */
329static void *hw_stats_from_cmd(struct be_adapter *adapter)
330{
331 if (BE2_chip(adapter)) {
332 struct be_cmd_resp_get_stats_v0 *cmd = adapter->stats_cmd.va;
333
334 return &cmd->hw_stats;
61000861 335 } else if (BE3_chip(adapter)) {
ca34fe38
SP
336 struct be_cmd_resp_get_stats_v1 *cmd = adapter->stats_cmd.va;
337
61000861
AK
338 return &cmd->hw_stats;
339 } else {
340 struct be_cmd_resp_get_stats_v2 *cmd = adapter->stats_cmd.va;
341
ca34fe38
SP
342 return &cmd->hw_stats;
343 }
344}
345
346/* BE2 supports only v0 cmd */
347static void *be_erx_stats_from_cmd(struct be_adapter *adapter)
348{
349 if (BE2_chip(adapter)) {
350 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
351
352 return &hw_stats->erx;
61000861 353 } else if (BE3_chip(adapter)) {
ca34fe38
SP
354 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
355
61000861
AK
356 return &hw_stats->erx;
357 } else {
358 struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
359
ca34fe38
SP
360 return &hw_stats->erx;
361 }
362}
363
364static void populate_be_v0_stats(struct be_adapter *adapter)
89a88ab8 365{
ac124ff9
SP
366 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
367 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
368 struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
89a88ab8 369 struct be_port_rxf_stats_v0 *port_stats =
ac124ff9
SP
370 &rxf_stats->port[adapter->port_num];
371 struct be_drv_stats *drvs = &adapter->drv_stats;
89a88ab8 372
ac124ff9 373 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
89a88ab8
AK
374 drvs->rx_pause_frames = port_stats->rx_pause_frames;
375 drvs->rx_crc_errors = port_stats->rx_crc_errors;
376 drvs->rx_control_frames = port_stats->rx_control_frames;
377 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
378 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
379 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
380 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
381 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
382 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
383 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
384 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
385 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
386 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
387 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
ac124ff9 388 drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
89a88ab8
AK
389 drvs->rx_dropped_header_too_small =
390 port_stats->rx_dropped_header_too_small;
18fb06a1
SR
391 drvs->rx_address_filtered =
392 port_stats->rx_address_filtered +
393 port_stats->rx_vlan_filtered;
89a88ab8
AK
394 drvs->rx_alignment_symbol_errors =
395 port_stats->rx_alignment_symbol_errors;
396
397 drvs->tx_pauseframes = port_stats->tx_pauseframes;
398 drvs->tx_controlframes = port_stats->tx_controlframes;
399
400 if (adapter->port_num)
ac124ff9 401 drvs->jabber_events = rxf_stats->port1_jabber_events;
89a88ab8 402 else
ac124ff9 403 drvs->jabber_events = rxf_stats->port0_jabber_events;
89a88ab8 404 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
89a88ab8 405 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
89a88ab8
AK
406 drvs->forwarded_packets = rxf_stats->forwarded_packets;
407 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
ac124ff9
SP
408 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
409 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
89a88ab8
AK
410 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
411}
412
ca34fe38 413static void populate_be_v1_stats(struct be_adapter *adapter)
89a88ab8 414{
ac124ff9
SP
415 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
416 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
417 struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
89a88ab8 418 struct be_port_rxf_stats_v1 *port_stats =
ac124ff9
SP
419 &rxf_stats->port[adapter->port_num];
420 struct be_drv_stats *drvs = &adapter->drv_stats;
89a88ab8 421
ac124ff9 422 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
02fe7027
AK
423 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
424 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
89a88ab8
AK
425 drvs->rx_pause_frames = port_stats->rx_pause_frames;
426 drvs->rx_crc_errors = port_stats->rx_crc_errors;
427 drvs->rx_control_frames = port_stats->rx_control_frames;
428 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
429 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
430 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
431 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
432 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
433 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
434 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
435 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
436 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
437 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
438 drvs->rx_dropped_header_too_small =
439 port_stats->rx_dropped_header_too_small;
440 drvs->rx_input_fifo_overflow_drop =
441 port_stats->rx_input_fifo_overflow_drop;
18fb06a1 442 drvs->rx_address_filtered = port_stats->rx_address_filtered;
89a88ab8
AK
443 drvs->rx_alignment_symbol_errors =
444 port_stats->rx_alignment_symbol_errors;
ac124ff9 445 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
89a88ab8
AK
446 drvs->tx_pauseframes = port_stats->tx_pauseframes;
447 drvs->tx_controlframes = port_stats->tx_controlframes;
b5adffc4 448 drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
89a88ab8
AK
449 drvs->jabber_events = port_stats->jabber_events;
450 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
89a88ab8 451 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
89a88ab8
AK
452 drvs->forwarded_packets = rxf_stats->forwarded_packets;
453 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
ac124ff9
SP
454 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
455 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
89a88ab8
AK
456 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
457}
458
61000861
AK
459static void populate_be_v2_stats(struct be_adapter *adapter)
460{
461 struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
462 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
463 struct be_rxf_stats_v2 *rxf_stats = &hw_stats->rxf;
464 struct be_port_rxf_stats_v2 *port_stats =
465 &rxf_stats->port[adapter->port_num];
466 struct be_drv_stats *drvs = &adapter->drv_stats;
467
468 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
469 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
470 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
471 drvs->rx_pause_frames = port_stats->rx_pause_frames;
472 drvs->rx_crc_errors = port_stats->rx_crc_errors;
473 drvs->rx_control_frames = port_stats->rx_control_frames;
474 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
475 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
476 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
477 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
478 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
479 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
480 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
481 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
482 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
483 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
484 drvs->rx_dropped_header_too_small =
485 port_stats->rx_dropped_header_too_small;
486 drvs->rx_input_fifo_overflow_drop =
487 port_stats->rx_input_fifo_overflow_drop;
488 drvs->rx_address_filtered = port_stats->rx_address_filtered;
489 drvs->rx_alignment_symbol_errors =
490 port_stats->rx_alignment_symbol_errors;
491 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
492 drvs->tx_pauseframes = port_stats->tx_pauseframes;
493 drvs->tx_controlframes = port_stats->tx_controlframes;
494 drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
495 drvs->jabber_events = port_stats->jabber_events;
496 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
497 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
498 drvs->forwarded_packets = rxf_stats->forwarded_packets;
499 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
500 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
501 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
502 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
748b539a 503 if (be_roce_supported(adapter)) {
461ae379
AK
504 drvs->rx_roce_bytes_lsd = port_stats->roce_bytes_received_lsd;
505 drvs->rx_roce_bytes_msd = port_stats->roce_bytes_received_msd;
506 drvs->rx_roce_frames = port_stats->roce_frames_received;
507 drvs->roce_drops_crc = port_stats->roce_drops_crc;
508 drvs->roce_drops_payload_len =
509 port_stats->roce_drops_payload_len;
510 }
61000861
AK
511}
512
005d5696
SX
513static void populate_lancer_stats(struct be_adapter *adapter)
514{
005d5696 515 struct be_drv_stats *drvs = &adapter->drv_stats;
748b539a 516 struct lancer_pport_stats *pport_stats = pport_stats_from_cmd(adapter);
ac124ff9
SP
517
518 be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
519 drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
520 drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
521 drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
005d5696 522 drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
ac124ff9 523 drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
005d5696
SX
524 drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
525 drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
526 drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
527 drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
528 drvs->rx_dropped_tcp_length =
529 pport_stats->rx_dropped_invalid_tcp_length;
530 drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
531 drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
532 drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
533 drvs->rx_dropped_header_too_small =
534 pport_stats->rx_dropped_header_too_small;
535 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
18fb06a1
SR
536 drvs->rx_address_filtered =
537 pport_stats->rx_address_filtered +
538 pport_stats->rx_vlan_filtered;
ac124ff9 539 drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
005d5696 540 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
ac124ff9
SP
541 drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
542 drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
005d5696 543 drvs->jabber_events = pport_stats->rx_jabbers;
ac124ff9
SP
544 drvs->forwarded_packets = pport_stats->num_forwards_lo;
545 drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
005d5696 546 drvs->rx_drops_too_many_frags =
ac124ff9 547 pport_stats->rx_drops_too_many_frags_lo;
005d5696 548}
89a88ab8 549
09c1c68f
SP
550static void accumulate_16bit_val(u32 *acc, u16 val)
551{
552#define lo(x) (x & 0xFFFF)
553#define hi(x) (x & 0xFFFF0000)
554 bool wrapped = val < lo(*acc);
555 u32 newacc = hi(*acc) + val;
556
557 if (wrapped)
558 newacc += 65536;
559 ACCESS_ONCE(*acc) = newacc;
560}
561
4188e7df 562static void populate_erx_stats(struct be_adapter *adapter,
748b539a 563 struct be_rx_obj *rxo, u32 erx_stat)
a6c578ef
AK
564{
565 if (!BEx_chip(adapter))
566 rx_stats(rxo)->rx_drops_no_frags = erx_stat;
567 else
568 /* below erx HW counter can actually wrap around after
569 * 65535. Driver accumulates a 32-bit value
570 */
571 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
572 (u16)erx_stat);
573}
574
89a88ab8
AK
575void be_parse_stats(struct be_adapter *adapter)
576{
61000861 577 struct be_erx_stats_v2 *erx = be_erx_stats_from_cmd(adapter);
ac124ff9
SP
578 struct be_rx_obj *rxo;
579 int i;
a6c578ef 580 u32 erx_stat;
ac124ff9 581
ca34fe38
SP
582 if (lancer_chip(adapter)) {
583 populate_lancer_stats(adapter);
005d5696 584 } else {
ca34fe38
SP
585 if (BE2_chip(adapter))
586 populate_be_v0_stats(adapter);
61000861
AK
587 else if (BE3_chip(adapter))
588 /* for BE3 */
ca34fe38 589 populate_be_v1_stats(adapter);
61000861
AK
590 else
591 populate_be_v2_stats(adapter);
d51ebd33 592
61000861 593 /* erx_v2 is longer than v0, v1. use v2 for v0, v1 access */
ca34fe38 594 for_all_rx_queues(adapter, rxo, i) {
a6c578ef
AK
595 erx_stat = erx->rx_drops_no_fragments[rxo->q.id];
596 populate_erx_stats(adapter, rxo, erx_stat);
ca34fe38 597 }
09c1c68f 598 }
89a88ab8
AK
599}
600
ab1594e9 601static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
748b539a 602 struct rtnl_link_stats64 *stats)
6b7c5b94 603{
ab1594e9 604 struct be_adapter *adapter = netdev_priv(netdev);
89a88ab8 605 struct be_drv_stats *drvs = &adapter->drv_stats;
3abcdeda 606 struct be_rx_obj *rxo;
3c8def97 607 struct be_tx_obj *txo;
ab1594e9
SP
608 u64 pkts, bytes;
609 unsigned int start;
3abcdeda 610 int i;
6b7c5b94 611
3abcdeda 612 for_all_rx_queues(adapter, rxo, i) {
ab1594e9 613 const struct be_rx_stats *rx_stats = rx_stats(rxo);
03d28ffe 614
ab1594e9 615 do {
57a7744e 616 start = u64_stats_fetch_begin_irq(&rx_stats->sync);
ab1594e9
SP
617 pkts = rx_stats(rxo)->rx_pkts;
618 bytes = rx_stats(rxo)->rx_bytes;
57a7744e 619 } while (u64_stats_fetch_retry_irq(&rx_stats->sync, start));
ab1594e9
SP
620 stats->rx_packets += pkts;
621 stats->rx_bytes += bytes;
622 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
623 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
624 rx_stats(rxo)->rx_drops_no_frags;
3abcdeda
SP
625 }
626
3c8def97 627 for_all_tx_queues(adapter, txo, i) {
ab1594e9 628 const struct be_tx_stats *tx_stats = tx_stats(txo);
03d28ffe 629
ab1594e9 630 do {
57a7744e 631 start = u64_stats_fetch_begin_irq(&tx_stats->sync);
ab1594e9
SP
632 pkts = tx_stats(txo)->tx_pkts;
633 bytes = tx_stats(txo)->tx_bytes;
57a7744e 634 } while (u64_stats_fetch_retry_irq(&tx_stats->sync, start));
ab1594e9
SP
635 stats->tx_packets += pkts;
636 stats->tx_bytes += bytes;
3c8def97 637 }
6b7c5b94
SP
638
639 /* bad pkts received */
ab1594e9 640 stats->rx_errors = drvs->rx_crc_errors +
89a88ab8
AK
641 drvs->rx_alignment_symbol_errors +
642 drvs->rx_in_range_errors +
643 drvs->rx_out_range_errors +
644 drvs->rx_frame_too_long +
645 drvs->rx_dropped_too_small +
646 drvs->rx_dropped_too_short +
647 drvs->rx_dropped_header_too_small +
648 drvs->rx_dropped_tcp_length +
ab1594e9 649 drvs->rx_dropped_runt;
68110868 650
6b7c5b94 651 /* detailed rx errors */
ab1594e9 652 stats->rx_length_errors = drvs->rx_in_range_errors +
89a88ab8
AK
653 drvs->rx_out_range_errors +
654 drvs->rx_frame_too_long;
68110868 655
ab1594e9 656 stats->rx_crc_errors = drvs->rx_crc_errors;
6b7c5b94
SP
657
658 /* frame alignment errors */
ab1594e9 659 stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
68110868 660
6b7c5b94
SP
661 /* receiver fifo overrun */
662 /* drops_no_pbuf is no per i/f, it's per BE card */
ab1594e9 663 stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
89a88ab8
AK
664 drvs->rx_input_fifo_overflow_drop +
665 drvs->rx_drops_no_pbuf;
ab1594e9 666 return stats;
6b7c5b94
SP
667}
668
b236916a 669void be_link_status_update(struct be_adapter *adapter, u8 link_status)
6b7c5b94 670{
6b7c5b94
SP
671 struct net_device *netdev = adapter->netdev;
672
b236916a 673 if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
ea172a01 674 netif_carrier_off(netdev);
b236916a 675 adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
6b7c5b94 676 }
b236916a 677
bdce2ad7 678 if (link_status)
b236916a
AK
679 netif_carrier_on(netdev);
680 else
681 netif_carrier_off(netdev);
18824894
IV
682
683 netdev_info(netdev, "Link is %s\n", link_status ? "Up" : "Down");
6b7c5b94
SP
684}
685
5f07b3c5 686static void be_tx_stats_update(struct be_tx_obj *txo, struct sk_buff *skb)
6b7c5b94 687{
3c8def97 688 struct be_tx_stats *stats = tx_stats(txo);
8670f2a5 689 u64 tx_pkts = skb_shinfo(skb)->gso_segs ? : 1;
3c8def97 690
ab1594e9 691 u64_stats_update_begin(&stats->sync);
ac124ff9 692 stats->tx_reqs++;
5f07b3c5 693 stats->tx_bytes += skb->len;
8670f2a5
SB
694 stats->tx_pkts += tx_pkts;
695 if (skb->encapsulation && skb->ip_summed == CHECKSUM_PARTIAL)
696 stats->tx_vxlan_offload_pkts += tx_pkts;
ab1594e9 697 u64_stats_update_end(&stats->sync);
6b7c5b94
SP
698}
699
5f07b3c5
SP
700/* Returns number of WRBs needed for the skb */
701static u32 skb_wrb_cnt(struct sk_buff *skb)
6b7c5b94 702{
5f07b3c5
SP
703 /* +1 for the header wrb */
704 return 1 + (skb_headlen(skb) ? 1 : 0) + skb_shinfo(skb)->nr_frags;
6b7c5b94
SP
705}
706
707static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
708{
f986afcb
SP
709 wrb->frag_pa_hi = cpu_to_le32(upper_32_bits(addr));
710 wrb->frag_pa_lo = cpu_to_le32(lower_32_bits(addr));
711 wrb->frag_len = cpu_to_le32(len & ETH_WRB_FRAG_LEN_MASK);
712 wrb->rsvd0 = 0;
713}
714
715/* A dummy wrb is just all zeros. Using a separate routine for dummy-wrb
716 * to avoid the swap and shift/mask operations in wrb_fill().
717 */
718static inline void wrb_fill_dummy(struct be_eth_wrb *wrb)
719{
720 wrb->frag_pa_hi = 0;
721 wrb->frag_pa_lo = 0;
722 wrb->frag_len = 0;
89b1f496 723 wrb->rsvd0 = 0;
6b7c5b94
SP
724}
725
1ded132d 726static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
748b539a 727 struct sk_buff *skb)
1ded132d
AK
728{
729 u8 vlan_prio;
730 u16 vlan_tag;
731
df8a39de 732 vlan_tag = skb_vlan_tag_get(skb);
1ded132d
AK
733 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
734 /* If vlan priority provided by OS is NOT in available bmap */
735 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
736 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
fdf81bfb 737 adapter->recommended_prio_bits;
1ded132d
AK
738
739 return vlan_tag;
740}
741
c9c47142
SP
742/* Used only for IP tunnel packets */
743static u16 skb_inner_ip_proto(struct sk_buff *skb)
744{
745 return (inner_ip_hdr(skb)->version == 4) ?
746 inner_ip_hdr(skb)->protocol : inner_ipv6_hdr(skb)->nexthdr;
747}
748
749static u16 skb_ip_proto(struct sk_buff *skb)
750{
751 return (ip_hdr(skb)->version == 4) ?
752 ip_hdr(skb)->protocol : ipv6_hdr(skb)->nexthdr;
753}
754
cf5671e6
SB
755static inline bool be_is_txq_full(struct be_tx_obj *txo)
756{
757 return atomic_read(&txo->q.used) + BE_MAX_TX_FRAG_COUNT >= txo->q.len;
758}
759
760static inline bool be_can_txq_wake(struct be_tx_obj *txo)
761{
762 return atomic_read(&txo->q.used) < txo->q.len / 2;
763}
764
765static inline bool be_is_tx_compl_pending(struct be_tx_obj *txo)
766{
767 return atomic_read(&txo->q.used) > txo->pend_wrb_cnt;
768}
769
804abcdb
SB
770static void be_get_wrb_params_from_skb(struct be_adapter *adapter,
771 struct sk_buff *skb,
772 struct be_wrb_params *wrb_params)
6b7c5b94 773{
804abcdb 774 u16 proto;
6b7c5b94 775
49e4b847 776 if (skb_is_gso(skb)) {
804abcdb
SB
777 BE_WRB_F_SET(wrb_params->features, LSO, 1);
778 wrb_params->lso_mss = skb_shinfo(skb)->gso_size;
fe6d2a38 779 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
804abcdb 780 BE_WRB_F_SET(wrb_params->features, LSO6, 1);
6b7c5b94 781 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
c9c47142 782 if (skb->encapsulation) {
804abcdb 783 BE_WRB_F_SET(wrb_params->features, IPCS, 1);
c9c47142
SP
784 proto = skb_inner_ip_proto(skb);
785 } else {
786 proto = skb_ip_proto(skb);
787 }
788 if (proto == IPPROTO_TCP)
804abcdb 789 BE_WRB_F_SET(wrb_params->features, TCPCS, 1);
c9c47142 790 else if (proto == IPPROTO_UDP)
804abcdb 791 BE_WRB_F_SET(wrb_params->features, UDPCS, 1);
6b7c5b94
SP
792 }
793
df8a39de 794 if (skb_vlan_tag_present(skb)) {
804abcdb
SB
795 BE_WRB_F_SET(wrb_params->features, VLAN, 1);
796 wrb_params->vlan_tag = be_get_tx_vlan_tag(adapter, skb);
6b7c5b94
SP
797 }
798
804abcdb
SB
799 BE_WRB_F_SET(wrb_params->features, CRC, 1);
800}
5f07b3c5 801
804abcdb
SB
802static void wrb_fill_hdr(struct be_adapter *adapter,
803 struct be_eth_hdr_wrb *hdr,
804 struct be_wrb_params *wrb_params,
805 struct sk_buff *skb)
806{
807 memset(hdr, 0, sizeof(*hdr));
808
809 SET_TX_WRB_HDR_BITS(crc, hdr,
810 BE_WRB_F_GET(wrb_params->features, CRC));
811 SET_TX_WRB_HDR_BITS(ipcs, hdr,
812 BE_WRB_F_GET(wrb_params->features, IPCS));
813 SET_TX_WRB_HDR_BITS(tcpcs, hdr,
814 BE_WRB_F_GET(wrb_params->features, TCPCS));
815 SET_TX_WRB_HDR_BITS(udpcs, hdr,
816 BE_WRB_F_GET(wrb_params->features, UDPCS));
817
818 SET_TX_WRB_HDR_BITS(lso, hdr,
819 BE_WRB_F_GET(wrb_params->features, LSO));
820 SET_TX_WRB_HDR_BITS(lso6, hdr,
821 BE_WRB_F_GET(wrb_params->features, LSO6));
822 SET_TX_WRB_HDR_BITS(lso_mss, hdr, wrb_params->lso_mss);
823
824 /* Hack to skip HW VLAN tagging needs evt = 1, compl = 0. When this
825 * hack is not needed, the evt bit is set while ringing DB.
5f07b3c5 826 */
804abcdb
SB
827 SET_TX_WRB_HDR_BITS(event, hdr,
828 BE_WRB_F_GET(wrb_params->features, VLAN_SKIP_HW));
829 SET_TX_WRB_HDR_BITS(vlan, hdr,
830 BE_WRB_F_GET(wrb_params->features, VLAN));
831 SET_TX_WRB_HDR_BITS(vlan_tag, hdr, wrb_params->vlan_tag);
832
833 SET_TX_WRB_HDR_BITS(num_wrb, hdr, skb_wrb_cnt(skb));
834 SET_TX_WRB_HDR_BITS(len, hdr, skb->len);
760c295e
VD
835 SET_TX_WRB_HDR_BITS(mgmt, hdr,
836 BE_WRB_F_GET(wrb_params->features, OS2BMC));
6b7c5b94
SP
837}
838
2b7bcebf 839static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
748b539a 840 bool unmap_single)
7101e111
SP
841{
842 dma_addr_t dma;
f986afcb 843 u32 frag_len = le32_to_cpu(wrb->frag_len);
7101e111 844
7101e111 845
f986afcb
SP
846 dma = (u64)le32_to_cpu(wrb->frag_pa_hi) << 32 |
847 (u64)le32_to_cpu(wrb->frag_pa_lo);
848 if (frag_len) {
7101e111 849 if (unmap_single)
f986afcb 850 dma_unmap_single(dev, dma, frag_len, DMA_TO_DEVICE);
7101e111 851 else
f986afcb 852 dma_unmap_page(dev, dma, frag_len, DMA_TO_DEVICE);
7101e111
SP
853 }
854}
6b7c5b94 855
79a0d7d8 856/* Grab a WRB header for xmit */
b0fd2eb2 857static u32 be_tx_get_wrb_hdr(struct be_tx_obj *txo)
79a0d7d8 858{
b0fd2eb2 859 u32 head = txo->q.head;
79a0d7d8
SB
860
861 queue_head_inc(&txo->q);
862 return head;
863}
864
865/* Set up the WRB header for xmit */
866static void be_tx_setup_wrb_hdr(struct be_adapter *adapter,
867 struct be_tx_obj *txo,
868 struct be_wrb_params *wrb_params,
869 struct sk_buff *skb, u16 head)
870{
871 u32 num_frags = skb_wrb_cnt(skb);
872 struct be_queue_info *txq = &txo->q;
873 struct be_eth_hdr_wrb *hdr = queue_index_node(txq, head);
874
875 wrb_fill_hdr(adapter, hdr, wrb_params, skb);
876 be_dws_cpu_to_le(hdr, sizeof(*hdr));
877
878 BUG_ON(txo->sent_skb_list[head]);
879 txo->sent_skb_list[head] = skb;
880 txo->last_req_hdr = head;
881 atomic_add(num_frags, &txq->used);
882 txo->last_req_wrb_cnt = num_frags;
883 txo->pend_wrb_cnt += num_frags;
884}
885
886/* Setup a WRB fragment (buffer descriptor) for xmit */
887static void be_tx_setup_wrb_frag(struct be_tx_obj *txo, dma_addr_t busaddr,
888 int len)
889{
890 struct be_eth_wrb *wrb;
891 struct be_queue_info *txq = &txo->q;
892
893 wrb = queue_head_node(txq);
894 wrb_fill(wrb, busaddr, len);
895 queue_head_inc(txq);
896}
897
898/* Bring the queue back to the state it was in before be_xmit_enqueue() routine
899 * was invoked. The producer index is restored to the previous packet and the
900 * WRBs of the current packet are unmapped. Invoked to handle tx setup errors.
901 */
902static void be_xmit_restore(struct be_adapter *adapter,
b0fd2eb2 903 struct be_tx_obj *txo, u32 head, bool map_single,
79a0d7d8
SB
904 u32 copied)
905{
906 struct device *dev;
907 struct be_eth_wrb *wrb;
908 struct be_queue_info *txq = &txo->q;
909
910 dev = &adapter->pdev->dev;
911 txq->head = head;
912
913 /* skip the first wrb (hdr); it's not mapped */
914 queue_head_inc(txq);
915 while (copied) {
916 wrb = queue_head_node(txq);
917 unmap_tx_frag(dev, wrb, map_single);
918 map_single = false;
919 copied -= le32_to_cpu(wrb->frag_len);
920 queue_head_inc(txq);
921 }
922
923 txq->head = head;
924}
925
926/* Enqueue the given packet for transmit. This routine allocates WRBs for the
927 * packet, dma maps the packet buffers and sets up the WRBs. Returns the number
928 * of WRBs used up by the packet.
929 */
5f07b3c5 930static u32 be_xmit_enqueue(struct be_adapter *adapter, struct be_tx_obj *txo,
804abcdb
SB
931 struct sk_buff *skb,
932 struct be_wrb_params *wrb_params)
6b7c5b94 933{
5f07b3c5 934 u32 i, copied = 0, wrb_cnt = skb_wrb_cnt(skb);
2b7bcebf 935 struct device *dev = &adapter->pdev->dev;
5f07b3c5 936 struct be_queue_info *txq = &txo->q;
7101e111 937 bool map_single = false;
b0fd2eb2 938 u32 head = txq->head;
79a0d7d8
SB
939 dma_addr_t busaddr;
940 int len;
6b7c5b94 941
79a0d7d8 942 head = be_tx_get_wrb_hdr(txo);
6b7c5b94 943
ebc8d2ab 944 if (skb->len > skb->data_len) {
79a0d7d8 945 len = skb_headlen(skb);
03d28ffe 946
2b7bcebf
IV
947 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
948 if (dma_mapping_error(dev, busaddr))
7101e111
SP
949 goto dma_err;
950 map_single = true;
79a0d7d8 951 be_tx_setup_wrb_frag(txo, busaddr, len);
ebc8d2ab
DM
952 copied += len;
953 }
6b7c5b94 954
ebc8d2ab 955 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
748b539a 956 const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
79a0d7d8 957 len = skb_frag_size(frag);
03d28ffe 958
79a0d7d8 959 busaddr = skb_frag_dma_map(dev, frag, 0, len, DMA_TO_DEVICE);
2b7bcebf 960 if (dma_mapping_error(dev, busaddr))
7101e111 961 goto dma_err;
79a0d7d8
SB
962 be_tx_setup_wrb_frag(txo, busaddr, len);
963 copied += len;
6b7c5b94
SP
964 }
965
79a0d7d8 966 be_tx_setup_wrb_hdr(adapter, txo, wrb_params, skb, head);
6b7c5b94 967
5f07b3c5
SP
968 be_tx_stats_update(txo, skb);
969 return wrb_cnt;
6b7c5b94 970
7101e111 971dma_err:
79a0d7d8
SB
972 adapter->drv_stats.dma_map_errors++;
973 be_xmit_restore(adapter, txo, head, map_single, copied);
7101e111 974 return 0;
6b7c5b94
SP
975}
976
f7062ee5
SP
977static inline int qnq_async_evt_rcvd(struct be_adapter *adapter)
978{
979 return adapter->flags & BE_FLAGS_QNQ_ASYNC_EVT_RCVD;
980}
981
93040ae5 982static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
bc0c3405 983 struct sk_buff *skb,
804abcdb
SB
984 struct be_wrb_params
985 *wrb_params)
93040ae5
SK
986{
987 u16 vlan_tag = 0;
988
989 skb = skb_share_check(skb, GFP_ATOMIC);
990 if (unlikely(!skb))
991 return skb;
992
df8a39de 993 if (skb_vlan_tag_present(skb))
93040ae5 994 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
52fe29e4
SB
995
996 if (qnq_async_evt_rcvd(adapter) && adapter->pvid) {
997 if (!vlan_tag)
998 vlan_tag = adapter->pvid;
999 /* f/w workaround to set skip_hw_vlan = 1, informs the F/W to
1000 * skip VLAN insertion
1001 */
804abcdb 1002 BE_WRB_F_SET(wrb_params->features, VLAN_SKIP_HW, 1);
52fe29e4 1003 }
bc0c3405
AK
1004
1005 if (vlan_tag) {
62749e2c
JP
1006 skb = vlan_insert_tag_set_proto(skb, htons(ETH_P_8021Q),
1007 vlan_tag);
bc0c3405
AK
1008 if (unlikely(!skb))
1009 return skb;
bc0c3405
AK
1010 skb->vlan_tci = 0;
1011 }
1012
1013 /* Insert the outer VLAN, if any */
1014 if (adapter->qnq_vid) {
1015 vlan_tag = adapter->qnq_vid;
62749e2c
JP
1016 skb = vlan_insert_tag_set_proto(skb, htons(ETH_P_8021Q),
1017 vlan_tag);
bc0c3405
AK
1018 if (unlikely(!skb))
1019 return skb;
804abcdb 1020 BE_WRB_F_SET(wrb_params->features, VLAN_SKIP_HW, 1);
bc0c3405
AK
1021 }
1022
93040ae5
SK
1023 return skb;
1024}
1025
bc0c3405
AK
1026static bool be_ipv6_exthdr_check(struct sk_buff *skb)
1027{
1028 struct ethhdr *eh = (struct ethhdr *)skb->data;
1029 u16 offset = ETH_HLEN;
1030
1031 if (eh->h_proto == htons(ETH_P_IPV6)) {
1032 struct ipv6hdr *ip6h = (struct ipv6hdr *)(skb->data + offset);
1033
1034 offset += sizeof(struct ipv6hdr);
1035 if (ip6h->nexthdr != NEXTHDR_TCP &&
1036 ip6h->nexthdr != NEXTHDR_UDP) {
1037 struct ipv6_opt_hdr *ehdr =
504fbf1e 1038 (struct ipv6_opt_hdr *)(skb->data + offset);
bc0c3405
AK
1039
1040 /* offending pkt: 2nd byte following IPv6 hdr is 0xff */
1041 if (ehdr->hdrlen == 0xff)
1042 return true;
1043 }
1044 }
1045 return false;
1046}
1047
1048static int be_vlan_tag_tx_chk(struct be_adapter *adapter, struct sk_buff *skb)
1049{
df8a39de 1050 return skb_vlan_tag_present(skb) || adapter->pvid || adapter->qnq_vid;
bc0c3405
AK
1051}
1052
748b539a 1053static int be_ipv6_tx_stall_chk(struct be_adapter *adapter, struct sk_buff *skb)
bc0c3405 1054{
ee9c799c 1055 return BE3_chip(adapter) && be_ipv6_exthdr_check(skb);
bc0c3405
AK
1056}
1057
ec495fac
VV
1058static struct sk_buff *be_lancer_xmit_workarounds(struct be_adapter *adapter,
1059 struct sk_buff *skb,
804abcdb
SB
1060 struct be_wrb_params
1061 *wrb_params)
6b7c5b94 1062{
d2cb6ce7 1063 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
ee9c799c
SP
1064 unsigned int eth_hdr_len;
1065 struct iphdr *ip;
93040ae5 1066
1297f9db
AK
1067 /* For padded packets, BE HW modifies tot_len field in IP header
1068 * incorrecly when VLAN tag is inserted by HW.
3904dcc4 1069 * For padded packets, Lancer computes incorrect checksum.
1ded132d 1070 */
ee9c799c
SP
1071 eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
1072 VLAN_ETH_HLEN : ETH_HLEN;
3904dcc4 1073 if (skb->len <= 60 &&
df8a39de 1074 (lancer_chip(adapter) || skb_vlan_tag_present(skb)) &&
ee9c799c 1075 is_ipv4_pkt(skb)) {
93040ae5
SK
1076 ip = (struct iphdr *)ip_hdr(skb);
1077 pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len));
1078 }
1ded132d 1079
d2cb6ce7 1080 /* If vlan tag is already inlined in the packet, skip HW VLAN
f93f160b 1081 * tagging in pvid-tagging mode
d2cb6ce7 1082 */
f93f160b 1083 if (be_pvid_tagging_enabled(adapter) &&
d2cb6ce7 1084 veh->h_vlan_proto == htons(ETH_P_8021Q))
804abcdb 1085 BE_WRB_F_SET(wrb_params->features, VLAN_SKIP_HW, 1);
d2cb6ce7 1086
93040ae5
SK
1087 /* HW has a bug wherein it will calculate CSUM for VLAN
1088 * pkts even though it is disabled.
1089 * Manually insert VLAN in pkt.
1090 */
1091 if (skb->ip_summed != CHECKSUM_PARTIAL &&
df8a39de 1092 skb_vlan_tag_present(skb)) {
804abcdb 1093 skb = be_insert_vlan_in_pkt(adapter, skb, wrb_params);
bc0c3405 1094 if (unlikely(!skb))
c9128951 1095 goto err;
bc0c3405
AK
1096 }
1097
1098 /* HW may lockup when VLAN HW tagging is requested on
1099 * certain ipv6 packets. Drop such pkts if the HW workaround to
1100 * skip HW tagging is not enabled by FW.
1101 */
1102 if (unlikely(be_ipv6_tx_stall_chk(adapter, skb) &&
cd3307aa
KA
1103 (adapter->pvid || adapter->qnq_vid) &&
1104 !qnq_async_evt_rcvd(adapter)))
bc0c3405
AK
1105 goto tx_drop;
1106
1107 /* Manual VLAN tag insertion to prevent:
1108 * ASIC lockup when the ASIC inserts VLAN tag into
1109 * certain ipv6 packets. Insert VLAN tags in driver,
1110 * and set event, completion, vlan bits accordingly
1111 * in the Tx WRB.
1112 */
1113 if (be_ipv6_tx_stall_chk(adapter, skb) &&
1114 be_vlan_tag_tx_chk(adapter, skb)) {
804abcdb 1115 skb = be_insert_vlan_in_pkt(adapter, skb, wrb_params);
1ded132d 1116 if (unlikely(!skb))
c9128951 1117 goto err;
1ded132d
AK
1118 }
1119
ee9c799c
SP
1120 return skb;
1121tx_drop:
1122 dev_kfree_skb_any(skb);
c9128951 1123err:
ee9c799c
SP
1124 return NULL;
1125}
1126
ec495fac
VV
1127static struct sk_buff *be_xmit_workarounds(struct be_adapter *adapter,
1128 struct sk_buff *skb,
804abcdb 1129 struct be_wrb_params *wrb_params)
ec495fac 1130{
127bfce5 1131 int err;
1132
8227e990
SR
1133 /* Lancer, SH and BE3 in SRIOV mode have a bug wherein
1134 * packets that are 32b or less may cause a transmit stall
1135 * on that port. The workaround is to pad such packets
1136 * (len <= 32 bytes) to a minimum length of 36b.
ec495fac 1137 */
8227e990 1138 if (skb->len <= 32) {
74b6939d 1139 if (skb_put_padto(skb, 36))
ec495fac 1140 return NULL;
ec495fac
VV
1141 }
1142
1143 if (BEx_chip(adapter) || lancer_chip(adapter)) {
804abcdb 1144 skb = be_lancer_xmit_workarounds(adapter, skb, wrb_params);
ec495fac
VV
1145 if (!skb)
1146 return NULL;
1147 }
1148
127bfce5 1149 /* The stack can send us skbs with length greater than
1150 * what the HW can handle. Trim the extra bytes.
1151 */
1152 WARN_ON_ONCE(skb->len > BE_MAX_GSO_SIZE);
1153 err = pskb_trim(skb, BE_MAX_GSO_SIZE);
1154 WARN_ON(err);
1155
ec495fac
VV
1156 return skb;
1157}
1158
5f07b3c5
SP
1159static void be_xmit_flush(struct be_adapter *adapter, struct be_tx_obj *txo)
1160{
1161 struct be_queue_info *txq = &txo->q;
1162 struct be_eth_hdr_wrb *hdr = queue_index_node(txq, txo->last_req_hdr);
1163
1164 /* Mark the last request eventable if it hasn't been marked already */
1165 if (!(hdr->dw[2] & cpu_to_le32(TX_HDR_WRB_EVT)))
1166 hdr->dw[2] |= cpu_to_le32(TX_HDR_WRB_EVT | TX_HDR_WRB_COMPL);
1167
1168 /* compose a dummy wrb if there are odd set of wrbs to notify */
1169 if (!lancer_chip(adapter) && (txo->pend_wrb_cnt & 1)) {
f986afcb 1170 wrb_fill_dummy(queue_head_node(txq));
5f07b3c5
SP
1171 queue_head_inc(txq);
1172 atomic_inc(&txq->used);
1173 txo->pend_wrb_cnt++;
1174 hdr->dw[2] &= ~cpu_to_le32(TX_HDR_WRB_NUM_MASK <<
1175 TX_HDR_WRB_NUM_SHIFT);
1176 hdr->dw[2] |= cpu_to_le32((txo->last_req_wrb_cnt + 1) <<
1177 TX_HDR_WRB_NUM_SHIFT);
1178 }
1179 be_txq_notify(adapter, txo, txo->pend_wrb_cnt);
1180 txo->pend_wrb_cnt = 0;
1181}
1182
760c295e
VD
1183/* OS2BMC related */
1184
1185#define DHCP_CLIENT_PORT 68
1186#define DHCP_SERVER_PORT 67
1187#define NET_BIOS_PORT1 137
1188#define NET_BIOS_PORT2 138
1189#define DHCPV6_RAS_PORT 547
1190
1191#define is_mc_allowed_on_bmc(adapter, eh) \
1192 (!is_multicast_filt_enabled(adapter) && \
1193 is_multicast_ether_addr(eh->h_dest) && \
1194 !is_broadcast_ether_addr(eh->h_dest))
1195
1196#define is_bc_allowed_on_bmc(adapter, eh) \
1197 (!is_broadcast_filt_enabled(adapter) && \
1198 is_broadcast_ether_addr(eh->h_dest))
1199
1200#define is_arp_allowed_on_bmc(adapter, skb) \
1201 (is_arp(skb) && is_arp_filt_enabled(adapter))
1202
1203#define is_broadcast_packet(eh, adapter) \
1204 (is_multicast_ether_addr(eh->h_dest) && \
1205 !compare_ether_addr(eh->h_dest, adapter->netdev->broadcast))
1206
1207#define is_arp(skb) (skb->protocol == htons(ETH_P_ARP))
1208
1209#define is_arp_filt_enabled(adapter) \
1210 (adapter->bmc_filt_mask & (BMC_FILT_BROADCAST_ARP))
1211
1212#define is_dhcp_client_filt_enabled(adapter) \
1213 (adapter->bmc_filt_mask & BMC_FILT_BROADCAST_DHCP_CLIENT)
1214
1215#define is_dhcp_srvr_filt_enabled(adapter) \
1216 (adapter->bmc_filt_mask & BMC_FILT_BROADCAST_DHCP_SERVER)
1217
1218#define is_nbios_filt_enabled(adapter) \
1219 (adapter->bmc_filt_mask & BMC_FILT_BROADCAST_NET_BIOS)
1220
1221#define is_ipv6_na_filt_enabled(adapter) \
1222 (adapter->bmc_filt_mask & \
1223 BMC_FILT_MULTICAST_IPV6_NEIGH_ADVER)
1224
1225#define is_ipv6_ra_filt_enabled(adapter) \
1226 (adapter->bmc_filt_mask & BMC_FILT_MULTICAST_IPV6_RA)
1227
1228#define is_ipv6_ras_filt_enabled(adapter) \
1229 (adapter->bmc_filt_mask & BMC_FILT_MULTICAST_IPV6_RAS)
1230
1231#define is_broadcast_filt_enabled(adapter) \
1232 (adapter->bmc_filt_mask & BMC_FILT_BROADCAST)
1233
1234#define is_multicast_filt_enabled(adapter) \
1235 (adapter->bmc_filt_mask & BMC_FILT_MULTICAST)
1236
1237static bool be_send_pkt_to_bmc(struct be_adapter *adapter,
1238 struct sk_buff **skb)
1239{
1240 struct ethhdr *eh = (struct ethhdr *)(*skb)->data;
1241 bool os2bmc = false;
1242
1243 if (!be_is_os2bmc_enabled(adapter))
1244 goto done;
1245
1246 if (!is_multicast_ether_addr(eh->h_dest))
1247 goto done;
1248
1249 if (is_mc_allowed_on_bmc(adapter, eh) ||
1250 is_bc_allowed_on_bmc(adapter, eh) ||
1251 is_arp_allowed_on_bmc(adapter, (*skb))) {
1252 os2bmc = true;
1253 goto done;
1254 }
1255
1256 if ((*skb)->protocol == htons(ETH_P_IPV6)) {
1257 struct ipv6hdr *hdr = ipv6_hdr((*skb));
1258 u8 nexthdr = hdr->nexthdr;
1259
1260 if (nexthdr == IPPROTO_ICMPV6) {
1261 struct icmp6hdr *icmp6 = icmp6_hdr((*skb));
1262
1263 switch (icmp6->icmp6_type) {
1264 case NDISC_ROUTER_ADVERTISEMENT:
1265 os2bmc = is_ipv6_ra_filt_enabled(adapter);
1266 goto done;
1267 case NDISC_NEIGHBOUR_ADVERTISEMENT:
1268 os2bmc = is_ipv6_na_filt_enabled(adapter);
1269 goto done;
1270 default:
1271 break;
1272 }
1273 }
1274 }
1275
1276 if (is_udp_pkt((*skb))) {
1277 struct udphdr *udp = udp_hdr((*skb));
1278
1645d997 1279 switch (ntohs(udp->dest)) {
760c295e
VD
1280 case DHCP_CLIENT_PORT:
1281 os2bmc = is_dhcp_client_filt_enabled(adapter);
1282 goto done;
1283 case DHCP_SERVER_PORT:
1284 os2bmc = is_dhcp_srvr_filt_enabled(adapter);
1285 goto done;
1286 case NET_BIOS_PORT1:
1287 case NET_BIOS_PORT2:
1288 os2bmc = is_nbios_filt_enabled(adapter);
1289 goto done;
1290 case DHCPV6_RAS_PORT:
1291 os2bmc = is_ipv6_ras_filt_enabled(adapter);
1292 goto done;
1293 default:
1294 break;
1295 }
1296 }
1297done:
1298 /* For packets over a vlan, which are destined
1299 * to BMC, asic expects the vlan to be inline in the packet.
1300 */
1301 if (os2bmc)
1302 *skb = be_insert_vlan_in_pkt(adapter, *skb, NULL);
1303
1304 return os2bmc;
1305}
1306
ee9c799c
SP
1307static netdev_tx_t be_xmit(struct sk_buff *skb, struct net_device *netdev)
1308{
1309 struct be_adapter *adapter = netdev_priv(netdev);
5f07b3c5
SP
1310 u16 q_idx = skb_get_queue_mapping(skb);
1311 struct be_tx_obj *txo = &adapter->tx_obj[q_idx];
804abcdb 1312 struct be_wrb_params wrb_params = { 0 };
804abcdb 1313 bool flush = !skb->xmit_more;
5f07b3c5 1314 u16 wrb_cnt;
ee9c799c 1315
804abcdb 1316 skb = be_xmit_workarounds(adapter, skb, &wrb_params);
5f07b3c5
SP
1317 if (unlikely(!skb))
1318 goto drop;
6b7c5b94 1319
804abcdb
SB
1320 be_get_wrb_params_from_skb(adapter, skb, &wrb_params);
1321
1322 wrb_cnt = be_xmit_enqueue(adapter, txo, skb, &wrb_params);
5f07b3c5
SP
1323 if (unlikely(!wrb_cnt)) {
1324 dev_kfree_skb_any(skb);
1325 goto drop;
1326 }
cd8f76c0 1327
760c295e
VD
1328 /* if os2bmc is enabled and if the pkt is destined to bmc,
1329 * enqueue the pkt a 2nd time with mgmt bit set.
1330 */
1331 if (be_send_pkt_to_bmc(adapter, &skb)) {
1332 BE_WRB_F_SET(wrb_params.features, OS2BMC, 1);
1333 wrb_cnt = be_xmit_enqueue(adapter, txo, skb, &wrb_params);
1334 if (unlikely(!wrb_cnt))
1335 goto drop;
1336 else
1337 skb_get(skb);
1338 }
1339
cf5671e6 1340 if (be_is_txq_full(txo)) {
5f07b3c5
SP
1341 netif_stop_subqueue(netdev, q_idx);
1342 tx_stats(txo)->tx_stops++;
1343 }
c190e3c8 1344
5f07b3c5
SP
1345 if (flush || __netif_subqueue_stopped(netdev, q_idx))
1346 be_xmit_flush(adapter, txo);
6b7c5b94 1347
5f07b3c5
SP
1348 return NETDEV_TX_OK;
1349drop:
1350 tx_stats(txo)->tx_drv_drops++;
1351 /* Flush the already enqueued tx requests */
1352 if (flush && txo->pend_wrb_cnt)
1353 be_xmit_flush(adapter, txo);
6b7c5b94 1354
6b7c5b94
SP
1355 return NETDEV_TX_OK;
1356}
1357
1358static int be_change_mtu(struct net_device *netdev, int new_mtu)
1359{
1360 struct be_adapter *adapter = netdev_priv(netdev);
0d3f5cce
KA
1361 struct device *dev = &adapter->pdev->dev;
1362
1363 if (new_mtu < BE_MIN_MTU || new_mtu > BE_MAX_MTU) {
1364 dev_info(dev, "MTU must be between %d and %d bytes\n",
1365 BE_MIN_MTU, BE_MAX_MTU);
6b7c5b94
SP
1366 return -EINVAL;
1367 }
0d3f5cce
KA
1368
1369 dev_info(dev, "MTU changed from %d to %d bytes\n",
748b539a 1370 netdev->mtu, new_mtu);
6b7c5b94
SP
1371 netdev->mtu = new_mtu;
1372 return 0;
1373}
1374
f66b7cfd
SP
1375static inline bool be_in_all_promisc(struct be_adapter *adapter)
1376{
1377 return (adapter->if_flags & BE_IF_FLAGS_ALL_PROMISCUOUS) ==
1378 BE_IF_FLAGS_ALL_PROMISCUOUS;
1379}
1380
1381static int be_set_vlan_promisc(struct be_adapter *adapter)
1382{
1383 struct device *dev = &adapter->pdev->dev;
1384 int status;
1385
1386 if (adapter->if_flags & BE_IF_FLAGS_VLAN_PROMISCUOUS)
1387 return 0;
1388
1389 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_VLAN_PROMISCUOUS, ON);
1390 if (!status) {
1391 dev_info(dev, "Enabled VLAN promiscuous mode\n");
1392 adapter->if_flags |= BE_IF_FLAGS_VLAN_PROMISCUOUS;
1393 } else {
1394 dev_err(dev, "Failed to enable VLAN promiscuous mode\n");
1395 }
1396 return status;
1397}
1398
1399static int be_clear_vlan_promisc(struct be_adapter *adapter)
1400{
1401 struct device *dev = &adapter->pdev->dev;
1402 int status;
1403
1404 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_VLAN_PROMISCUOUS, OFF);
1405 if (!status) {
1406 dev_info(dev, "Disabling VLAN promiscuous mode\n");
1407 adapter->if_flags &= ~BE_IF_FLAGS_VLAN_PROMISCUOUS;
1408 }
1409 return status;
1410}
1411
6b7c5b94 1412/*
82903e4b
AK
1413 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
1414 * If the user configures more, place BE in vlan promiscuous mode.
6b7c5b94 1415 */
10329df8 1416static int be_vid_config(struct be_adapter *adapter)
6b7c5b94 1417{
50762667 1418 struct device *dev = &adapter->pdev->dev;
10329df8 1419 u16 vids[BE_NUM_VLANS_SUPPORTED];
f6cbd364 1420 u16 num = 0, i = 0;
82903e4b 1421 int status = 0;
1da87b7f 1422
c0e64ef4 1423 /* No need to further configure vids if in promiscuous mode */
f66b7cfd 1424 if (be_in_all_promisc(adapter))
c0e64ef4
SP
1425 return 0;
1426
92bf14ab 1427 if (adapter->vlans_added > be_max_vlans(adapter))
f66b7cfd 1428 return be_set_vlan_promisc(adapter);
0fc16ebf
PR
1429
1430 /* Construct VLAN Table to give to HW */
f6cbd364
RN
1431 for_each_set_bit(i, adapter->vids, VLAN_N_VID)
1432 vids[num++] = cpu_to_le16(i);
0fc16ebf 1433
435452aa 1434 status = be_cmd_vlan_config(adapter, adapter->if_handle, vids, num, 0);
0fc16ebf 1435 if (status) {
f66b7cfd 1436 dev_err(dev, "Setting HW VLAN filtering failed\n");
d9d604f8 1437 /* Set to VLAN promisc mode as setting VLAN filter failed */
77be8c1c
KA
1438 if (addl_status(status) == MCC_ADDL_STATUS_INSUFFICIENT_VLANS ||
1439 addl_status(status) ==
4c60005f 1440 MCC_ADDL_STATUS_INSUFFICIENT_RESOURCES)
f66b7cfd
SP
1441 return be_set_vlan_promisc(adapter);
1442 } else if (adapter->if_flags & BE_IF_FLAGS_VLAN_PROMISCUOUS) {
1443 status = be_clear_vlan_promisc(adapter);
6b7c5b94 1444 }
0fc16ebf 1445 return status;
6b7c5b94
SP
1446}
1447
80d5c368 1448static int be_vlan_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
6b7c5b94
SP
1449{
1450 struct be_adapter *adapter = netdev_priv(netdev);
80817cbf 1451 int status = 0;
6b7c5b94 1452
a85e9986
PR
1453 /* Packets with VID 0 are always received by Lancer by default */
1454 if (lancer_chip(adapter) && vid == 0)
48291c22
VV
1455 return status;
1456
f6cbd364 1457 if (test_bit(vid, adapter->vids))
48291c22 1458 return status;
a85e9986 1459
f6cbd364 1460 set_bit(vid, adapter->vids);
a6b74e01 1461 adapter->vlans_added++;
8e586137 1462
a6b74e01
SK
1463 status = be_vid_config(adapter);
1464 if (status) {
1465 adapter->vlans_added--;
f6cbd364 1466 clear_bit(vid, adapter->vids);
a6b74e01 1467 }
48291c22 1468
80817cbf 1469 return status;
6b7c5b94
SP
1470}
1471
80d5c368 1472static int be_vlan_rem_vid(struct net_device *netdev, __be16 proto, u16 vid)
6b7c5b94
SP
1473{
1474 struct be_adapter *adapter = netdev_priv(netdev);
1475
a85e9986
PR
1476 /* Packets with VID 0 are always received by Lancer by default */
1477 if (lancer_chip(adapter) && vid == 0)
9d4dfe4a 1478 return 0;
a85e9986 1479
41dcdfbd
SB
1480 if (!test_bit(vid, adapter->vids))
1481 return 0;
1482
f6cbd364 1483 clear_bit(vid, adapter->vids);
9d4dfe4a
KA
1484 adapter->vlans_added--;
1485
1486 return be_vid_config(adapter);
6b7c5b94
SP
1487}
1488
f66b7cfd 1489static void be_clear_all_promisc(struct be_adapter *adapter)
7ad09458 1490{
ac34b743 1491 be_cmd_rx_filter(adapter, BE_IF_FLAGS_ALL_PROMISCUOUS, OFF);
f66b7cfd 1492 adapter->if_flags &= ~BE_IF_FLAGS_ALL_PROMISCUOUS;
7ad09458
S
1493}
1494
f66b7cfd
SP
1495static void be_set_all_promisc(struct be_adapter *adapter)
1496{
1497 be_cmd_rx_filter(adapter, BE_IF_FLAGS_ALL_PROMISCUOUS, ON);
1498 adapter->if_flags |= BE_IF_FLAGS_ALL_PROMISCUOUS;
1499}
1500
1501static void be_set_mc_promisc(struct be_adapter *adapter)
6b7c5b94 1502{
0fc16ebf 1503 int status;
6b7c5b94 1504
f66b7cfd
SP
1505 if (adapter->if_flags & BE_IF_FLAGS_MCAST_PROMISCUOUS)
1506 return;
6b7c5b94 1507
f66b7cfd
SP
1508 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_MCAST_PROMISCUOUS, ON);
1509 if (!status)
1510 adapter->if_flags |= BE_IF_FLAGS_MCAST_PROMISCUOUS;
1511}
1512
1513static void be_set_mc_list(struct be_adapter *adapter)
1514{
1515 int status;
1516
1517 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_MULTICAST, ON);
1518 if (!status)
1519 adapter->if_flags &= ~BE_IF_FLAGS_MCAST_PROMISCUOUS;
1520 else
1521 be_set_mc_promisc(adapter);
1522}
1523
1524static void be_set_uc_list(struct be_adapter *adapter)
1525{
1526 struct netdev_hw_addr *ha;
1527 int i = 1; /* First slot is claimed by the Primary MAC */
1528
1529 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++)
1530 be_cmd_pmac_del(adapter, adapter->if_handle,
1531 adapter->pmac_id[i], 0);
1532
1533 if (netdev_uc_count(adapter->netdev) > be_max_uc(adapter)) {
1534 be_set_all_promisc(adapter);
1535 return;
6b7c5b94
SP
1536 }
1537
f66b7cfd
SP
1538 netdev_for_each_uc_addr(ha, adapter->netdev) {
1539 adapter->uc_macs++; /* First slot is for Primary MAC */
1540 be_cmd_pmac_add(adapter, (u8 *)ha->addr, adapter->if_handle,
1541 &adapter->pmac_id[adapter->uc_macs], 0);
1542 }
1543}
6b7c5b94 1544
f66b7cfd
SP
1545static void be_clear_uc_list(struct be_adapter *adapter)
1546{
1547 int i;
fbc13f01 1548
f66b7cfd
SP
1549 for (i = 1; i < (adapter->uc_macs + 1); i++)
1550 be_cmd_pmac_del(adapter, adapter->if_handle,
1551 adapter->pmac_id[i], 0);
1552 adapter->uc_macs = 0;
1553}
fbc13f01 1554
f66b7cfd
SP
1555static void be_set_rx_mode(struct net_device *netdev)
1556{
1557 struct be_adapter *adapter = netdev_priv(netdev);
fbc13f01 1558
f66b7cfd
SP
1559 if (netdev->flags & IFF_PROMISC) {
1560 be_set_all_promisc(adapter);
1561 return;
fbc13f01
AK
1562 }
1563
f66b7cfd
SP
1564 /* Interface was previously in promiscuous mode; disable it */
1565 if (be_in_all_promisc(adapter)) {
1566 be_clear_all_promisc(adapter);
1567 if (adapter->vlans_added)
1568 be_vid_config(adapter);
0fc16ebf 1569 }
a0794885 1570
f66b7cfd
SP
1571 /* Enable multicast promisc if num configured exceeds what we support */
1572 if (netdev->flags & IFF_ALLMULTI ||
1573 netdev_mc_count(netdev) > be_max_mc(adapter)) {
1574 be_set_mc_promisc(adapter);
a0794885 1575 return;
f66b7cfd 1576 }
a0794885 1577
f66b7cfd
SP
1578 if (netdev_uc_count(netdev) != adapter->uc_macs)
1579 be_set_uc_list(adapter);
1580
1581 be_set_mc_list(adapter);
6b7c5b94
SP
1582}
1583
ba343c77
SB
1584static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
1585{
1586 struct be_adapter *adapter = netdev_priv(netdev);
11ac75ed 1587 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
ba343c77
SB
1588 int status;
1589
11ac75ed 1590 if (!sriov_enabled(adapter))
ba343c77
SB
1591 return -EPERM;
1592
11ac75ed 1593 if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
ba343c77
SB
1594 return -EINVAL;
1595
3c31aaf3
VV
1596 /* Proceed further only if user provided MAC is different
1597 * from active MAC
1598 */
1599 if (ether_addr_equal(mac, vf_cfg->mac_addr))
1600 return 0;
1601
3175d8c2
SP
1602 if (BEx_chip(adapter)) {
1603 be_cmd_pmac_del(adapter, vf_cfg->if_handle, vf_cfg->pmac_id,
1604 vf + 1);
ba343c77 1605
11ac75ed
SP
1606 status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
1607 &vf_cfg->pmac_id, vf + 1);
3175d8c2
SP
1608 } else {
1609 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
1610 vf + 1);
590c391d
PR
1611 }
1612
abccf23e
KA
1613 if (status) {
1614 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed: %#x",
1615 mac, vf, status);
1616 return be_cmd_status(status);
1617 }
64600ea5 1618
abccf23e
KA
1619 ether_addr_copy(vf_cfg->mac_addr, mac);
1620
1621 return 0;
ba343c77
SB
1622}
1623
64600ea5 1624static int be_get_vf_config(struct net_device *netdev, int vf,
748b539a 1625 struct ifla_vf_info *vi)
64600ea5
AK
1626{
1627 struct be_adapter *adapter = netdev_priv(netdev);
11ac75ed 1628 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
64600ea5 1629
11ac75ed 1630 if (!sriov_enabled(adapter))
64600ea5
AK
1631 return -EPERM;
1632
11ac75ed 1633 if (vf >= adapter->num_vfs)
64600ea5
AK
1634 return -EINVAL;
1635
1636 vi->vf = vf;
ed616689
SC
1637 vi->max_tx_rate = vf_cfg->tx_rate;
1638 vi->min_tx_rate = 0;
a60b3a13
AK
1639 vi->vlan = vf_cfg->vlan_tag & VLAN_VID_MASK;
1640 vi->qos = vf_cfg->vlan_tag >> VLAN_PRIO_SHIFT;
11ac75ed 1641 memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
bdce2ad7 1642 vi->linkstate = adapter->vf_cfg[vf].plink_tracking;
e7bcbd7b 1643 vi->spoofchk = adapter->vf_cfg[vf].spoofchk;
64600ea5
AK
1644
1645 return 0;
1646}
1647
435452aa
VV
1648static int be_set_vf_tvt(struct be_adapter *adapter, int vf, u16 vlan)
1649{
1650 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1651 u16 vids[BE_NUM_VLANS_SUPPORTED];
1652 int vf_if_id = vf_cfg->if_handle;
1653 int status;
1654
1655 /* Enable Transparent VLAN Tagging */
e7bcbd7b 1656 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1, vf_if_id, 0, 0);
435452aa
VV
1657 if (status)
1658 return status;
1659
1660 /* Clear pre-programmed VLAN filters on VF if any, if TVT is enabled */
1661 vids[0] = 0;
1662 status = be_cmd_vlan_config(adapter, vf_if_id, vids, 1, vf + 1);
1663 if (!status)
1664 dev_info(&adapter->pdev->dev,
1665 "Cleared guest VLANs on VF%d", vf);
1666
1667 /* After TVT is enabled, disallow VFs to program VLAN filters */
1668 if (vf_cfg->privileges & BE_PRIV_FILTMGMT) {
1669 status = be_cmd_set_fn_privileges(adapter, vf_cfg->privileges &
1670 ~BE_PRIV_FILTMGMT, vf + 1);
1671 if (!status)
1672 vf_cfg->privileges &= ~BE_PRIV_FILTMGMT;
1673 }
1674 return 0;
1675}
1676
1677static int be_clear_vf_tvt(struct be_adapter *adapter, int vf)
1678{
1679 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1680 struct device *dev = &adapter->pdev->dev;
1681 int status;
1682
1683 /* Reset Transparent VLAN Tagging. */
1684 status = be_cmd_set_hsw_config(adapter, BE_RESET_VLAN_TAG_ID, vf + 1,
e7bcbd7b 1685 vf_cfg->if_handle, 0, 0);
435452aa
VV
1686 if (status)
1687 return status;
1688
1689 /* Allow VFs to program VLAN filtering */
1690 if (!(vf_cfg->privileges & BE_PRIV_FILTMGMT)) {
1691 status = be_cmd_set_fn_privileges(adapter, vf_cfg->privileges |
1692 BE_PRIV_FILTMGMT, vf + 1);
1693 if (!status) {
1694 vf_cfg->privileges |= BE_PRIV_FILTMGMT;
1695 dev_info(dev, "VF%d: FILTMGMT priv enabled", vf);
1696 }
1697 }
1698
1699 dev_info(dev,
1700 "Disable/re-enable i/f in VM to clear Transparent VLAN tag");
1701 return 0;
1702}
1703
748b539a 1704static int be_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos)
1da87b7f
AK
1705{
1706 struct be_adapter *adapter = netdev_priv(netdev);
b9fc0e53 1707 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
435452aa 1708 int status;
1da87b7f 1709
11ac75ed 1710 if (!sriov_enabled(adapter))
1da87b7f
AK
1711 return -EPERM;
1712
b9fc0e53 1713 if (vf >= adapter->num_vfs || vlan > 4095 || qos > 7)
1da87b7f
AK
1714 return -EINVAL;
1715
b9fc0e53
AK
1716 if (vlan || qos) {
1717 vlan |= qos << VLAN_PRIO_SHIFT;
435452aa 1718 status = be_set_vf_tvt(adapter, vf, vlan);
1da87b7f 1719 } else {
435452aa 1720 status = be_clear_vf_tvt(adapter, vf);
1da87b7f
AK
1721 }
1722
abccf23e
KA
1723 if (status) {
1724 dev_err(&adapter->pdev->dev,
435452aa
VV
1725 "VLAN %d config on VF %d failed : %#x\n", vlan, vf,
1726 status);
abccf23e
KA
1727 return be_cmd_status(status);
1728 }
1729
1730 vf_cfg->vlan_tag = vlan;
abccf23e 1731 return 0;
1da87b7f
AK
1732}
1733
ed616689
SC
1734static int be_set_vf_tx_rate(struct net_device *netdev, int vf,
1735 int min_tx_rate, int max_tx_rate)
e1d18735
AK
1736{
1737 struct be_adapter *adapter = netdev_priv(netdev);
0f77ba73
RN
1738 struct device *dev = &adapter->pdev->dev;
1739 int percent_rate, status = 0;
1740 u16 link_speed = 0;
1741 u8 link_status;
e1d18735 1742
11ac75ed 1743 if (!sriov_enabled(adapter))
e1d18735
AK
1744 return -EPERM;
1745
94f434c2 1746 if (vf >= adapter->num_vfs)
e1d18735
AK
1747 return -EINVAL;
1748
ed616689
SC
1749 if (min_tx_rate)
1750 return -EINVAL;
1751
0f77ba73
RN
1752 if (!max_tx_rate)
1753 goto config_qos;
1754
1755 status = be_cmd_link_status_query(adapter, &link_speed,
1756 &link_status, 0);
1757 if (status)
1758 goto err;
1759
1760 if (!link_status) {
1761 dev_err(dev, "TX-rate setting not allowed when link is down\n");
940a3fcd 1762 status = -ENETDOWN;
0f77ba73
RN
1763 goto err;
1764 }
1765
1766 if (max_tx_rate < 100 || max_tx_rate > link_speed) {
1767 dev_err(dev, "TX-rate must be between 100 and %d Mbps\n",
1768 link_speed);
1769 status = -EINVAL;
1770 goto err;
1771 }
1772
1773 /* On Skyhawk the QOS setting must be done only as a % value */
1774 percent_rate = link_speed / 100;
1775 if (skyhawk_chip(adapter) && (max_tx_rate % percent_rate)) {
1776 dev_err(dev, "TX-rate must be a multiple of %d Mbps\n",
1777 percent_rate);
1778 status = -EINVAL;
1779 goto err;
94f434c2 1780 }
e1d18735 1781
0f77ba73
RN
1782config_qos:
1783 status = be_cmd_config_qos(adapter, max_tx_rate, link_speed, vf + 1);
e1d18735 1784 if (status)
0f77ba73
RN
1785 goto err;
1786
1787 adapter->vf_cfg[vf].tx_rate = max_tx_rate;
1788 return 0;
1789
1790err:
1791 dev_err(dev, "TX-rate setting of %dMbps on VF%d failed\n",
1792 max_tx_rate, vf);
abccf23e 1793 return be_cmd_status(status);
e1d18735 1794}
e2fb1afa 1795
bdce2ad7
SR
1796static int be_set_vf_link_state(struct net_device *netdev, int vf,
1797 int link_state)
1798{
1799 struct be_adapter *adapter = netdev_priv(netdev);
1800 int status;
1801
1802 if (!sriov_enabled(adapter))
1803 return -EPERM;
1804
1805 if (vf >= adapter->num_vfs)
1806 return -EINVAL;
1807
1808 status = be_cmd_set_logical_link_config(adapter, link_state, vf+1);
abccf23e
KA
1809 if (status) {
1810 dev_err(&adapter->pdev->dev,
1811 "Link state change on VF %d failed: %#x\n", vf, status);
1812 return be_cmd_status(status);
1813 }
bdce2ad7 1814
abccf23e
KA
1815 adapter->vf_cfg[vf].plink_tracking = link_state;
1816
1817 return 0;
bdce2ad7 1818}
e1d18735 1819
e7bcbd7b
KA
1820static int be_set_vf_spoofchk(struct net_device *netdev, int vf, bool enable)
1821{
1822 struct be_adapter *adapter = netdev_priv(netdev);
1823 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1824 u8 spoofchk;
1825 int status;
1826
1827 if (!sriov_enabled(adapter))
1828 return -EPERM;
1829
1830 if (vf >= adapter->num_vfs)
1831 return -EINVAL;
1832
1833 if (BEx_chip(adapter))
1834 return -EOPNOTSUPP;
1835
1836 if (enable == vf_cfg->spoofchk)
1837 return 0;
1838
1839 spoofchk = enable ? ENABLE_MAC_SPOOFCHK : DISABLE_MAC_SPOOFCHK;
1840
1841 status = be_cmd_set_hsw_config(adapter, 0, vf + 1, vf_cfg->if_handle,
1842 0, spoofchk);
1843 if (status) {
1844 dev_err(&adapter->pdev->dev,
1845 "Spoofchk change on VF %d failed: %#x\n", vf, status);
1846 return be_cmd_status(status);
1847 }
1848
1849 vf_cfg->spoofchk = enable;
1850 return 0;
1851}
1852
2632bafd
SP
1853static void be_aic_update(struct be_aic_obj *aic, u64 rx_pkts, u64 tx_pkts,
1854 ulong now)
6b7c5b94 1855{
2632bafd
SP
1856 aic->rx_pkts_prev = rx_pkts;
1857 aic->tx_reqs_prev = tx_pkts;
1858 aic->jiffies = now;
1859}
ac124ff9 1860
20947770 1861static int be_get_new_eqd(struct be_eq_obj *eqo)
2632bafd 1862{
20947770
PR
1863 struct be_adapter *adapter = eqo->adapter;
1864 int eqd, start;
2632bafd 1865 struct be_aic_obj *aic;
2632bafd
SP
1866 struct be_rx_obj *rxo;
1867 struct be_tx_obj *txo;
20947770 1868 u64 rx_pkts = 0, tx_pkts = 0;
2632bafd
SP
1869 ulong now;
1870 u32 pps, delta;
20947770 1871 int i;
10ef9ab4 1872
20947770
PR
1873 aic = &adapter->aic_obj[eqo->idx];
1874 if (!aic->enable) {
1875 if (aic->jiffies)
1876 aic->jiffies = 0;
1877 eqd = aic->et_eqd;
1878 return eqd;
1879 }
6b7c5b94 1880
20947770 1881 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
2632bafd 1882 do {
57a7744e 1883 start = u64_stats_fetch_begin_irq(&rxo->stats.sync);
20947770 1884 rx_pkts += rxo->stats.rx_pkts;
57a7744e 1885 } while (u64_stats_fetch_retry_irq(&rxo->stats.sync, start));
20947770 1886 }
10ef9ab4 1887
20947770 1888 for_all_tx_queues_on_eq(adapter, eqo, txo, i) {
2632bafd 1889 do {
57a7744e 1890 start = u64_stats_fetch_begin_irq(&txo->stats.sync);
20947770 1891 tx_pkts += txo->stats.tx_reqs;
57a7744e 1892 } while (u64_stats_fetch_retry_irq(&txo->stats.sync, start));
20947770 1893 }
6b7c5b94 1894
20947770
PR
1895 /* Skip, if wrapped around or first calculation */
1896 now = jiffies;
1897 if (!aic->jiffies || time_before(now, aic->jiffies) ||
1898 rx_pkts < aic->rx_pkts_prev ||
1899 tx_pkts < aic->tx_reqs_prev) {
1900 be_aic_update(aic, rx_pkts, tx_pkts, now);
1901 return aic->prev_eqd;
1902 }
2632bafd 1903
20947770
PR
1904 delta = jiffies_to_msecs(now - aic->jiffies);
1905 if (delta == 0)
1906 return aic->prev_eqd;
10ef9ab4 1907
20947770
PR
1908 pps = (((u32)(rx_pkts - aic->rx_pkts_prev) * 1000) / delta) +
1909 (((u32)(tx_pkts - aic->tx_reqs_prev) * 1000) / delta);
1910 eqd = (pps / 15000) << 2;
2632bafd 1911
20947770
PR
1912 if (eqd < 8)
1913 eqd = 0;
1914 eqd = min_t(u32, eqd, aic->max_eqd);
1915 eqd = max_t(u32, eqd, aic->min_eqd);
1916
1917 be_aic_update(aic, rx_pkts, tx_pkts, now);
1918
1919 return eqd;
1920}
1921
1922/* For Skyhawk-R only */
1923static u32 be_get_eq_delay_mult_enc(struct be_eq_obj *eqo)
1924{
1925 struct be_adapter *adapter = eqo->adapter;
1926 struct be_aic_obj *aic = &adapter->aic_obj[eqo->idx];
1927 ulong now = jiffies;
1928 int eqd;
1929 u32 mult_enc;
1930
1931 if (!aic->enable)
1932 return 0;
1933
3c0d49aa 1934 if (jiffies_to_msecs(now - aic->jiffies) < 1)
20947770
PR
1935 eqd = aic->prev_eqd;
1936 else
1937 eqd = be_get_new_eqd(eqo);
1938
1939 if (eqd > 100)
1940 mult_enc = R2I_DLY_ENC_1;
1941 else if (eqd > 60)
1942 mult_enc = R2I_DLY_ENC_2;
1943 else if (eqd > 20)
1944 mult_enc = R2I_DLY_ENC_3;
1945 else
1946 mult_enc = R2I_DLY_ENC_0;
1947
1948 aic->prev_eqd = eqd;
1949
1950 return mult_enc;
1951}
1952
1953void be_eqd_update(struct be_adapter *adapter, bool force_update)
1954{
1955 struct be_set_eqd set_eqd[MAX_EVT_QS];
1956 struct be_aic_obj *aic;
1957 struct be_eq_obj *eqo;
1958 int i, num = 0, eqd;
1959
1960 for_all_evt_queues(adapter, eqo, i) {
1961 aic = &adapter->aic_obj[eqo->idx];
1962 eqd = be_get_new_eqd(eqo);
1963 if (force_update || eqd != aic->prev_eqd) {
2632bafd
SP
1964 set_eqd[num].delay_multiplier = (eqd * 65)/100;
1965 set_eqd[num].eq_id = eqo->q.id;
1966 aic->prev_eqd = eqd;
1967 num++;
1968 }
ac124ff9 1969 }
2632bafd
SP
1970
1971 if (num)
1972 be_cmd_modify_eqd(adapter, set_eqd, num);
6b7c5b94
SP
1973}
1974
3abcdeda 1975static void be_rx_stats_update(struct be_rx_obj *rxo,
748b539a 1976 struct be_rx_compl_info *rxcp)
4097f663 1977{
ac124ff9 1978 struct be_rx_stats *stats = rx_stats(rxo);
1ef78abe 1979
ab1594e9 1980 u64_stats_update_begin(&stats->sync);
3abcdeda 1981 stats->rx_compl++;
2e588f84 1982 stats->rx_bytes += rxcp->pkt_size;
3abcdeda 1983 stats->rx_pkts++;
8670f2a5
SB
1984 if (rxcp->tunneled)
1985 stats->rx_vxlan_offload_pkts++;
2e588f84 1986 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
3abcdeda 1987 stats->rx_mcast_pkts++;
2e588f84 1988 if (rxcp->err)
ac124ff9 1989 stats->rx_compl_err++;
ab1594e9 1990 u64_stats_update_end(&stats->sync);
4097f663
SP
1991}
1992
2e588f84 1993static inline bool csum_passed(struct be_rx_compl_info *rxcp)
728a9972 1994{
19fad86f 1995 /* L4 checksum is not reliable for non TCP/UDP packets.
c9c47142
SP
1996 * Also ignore ipcksm for ipv6 pkts
1997 */
2e588f84 1998 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
c9c47142 1999 (rxcp->ip_csum || rxcp->ipv6) && !rxcp->err;
728a9972
AK
2000}
2001
0b0ef1d0 2002static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo)
6b7c5b94 2003{
10ef9ab4 2004 struct be_adapter *adapter = rxo->adapter;
6b7c5b94 2005 struct be_rx_page_info *rx_page_info;
3abcdeda 2006 struct be_queue_info *rxq = &rxo->q;
b0fd2eb2 2007 u32 frag_idx = rxq->tail;
6b7c5b94 2008
3abcdeda 2009 rx_page_info = &rxo->page_info_tbl[frag_idx];
6b7c5b94
SP
2010 BUG_ON(!rx_page_info->page);
2011
e50287be 2012 if (rx_page_info->last_frag) {
2b7bcebf
IV
2013 dma_unmap_page(&adapter->pdev->dev,
2014 dma_unmap_addr(rx_page_info, bus),
2015 adapter->big_page_size, DMA_FROM_DEVICE);
e50287be
SP
2016 rx_page_info->last_frag = false;
2017 } else {
2018 dma_sync_single_for_cpu(&adapter->pdev->dev,
2019 dma_unmap_addr(rx_page_info, bus),
2020 rx_frag_size, DMA_FROM_DEVICE);
205859a2 2021 }
6b7c5b94 2022
0b0ef1d0 2023 queue_tail_inc(rxq);
6b7c5b94
SP
2024 atomic_dec(&rxq->used);
2025 return rx_page_info;
2026}
2027
2028/* Throwaway the data in the Rx completion */
10ef9ab4
SP
2029static void be_rx_compl_discard(struct be_rx_obj *rxo,
2030 struct be_rx_compl_info *rxcp)
6b7c5b94 2031{
6b7c5b94 2032 struct be_rx_page_info *page_info;
2e588f84 2033 u16 i, num_rcvd = rxcp->num_rcvd;
6b7c5b94 2034
e80d9da6 2035 for (i = 0; i < num_rcvd; i++) {
0b0ef1d0 2036 page_info = get_rx_page_info(rxo);
e80d9da6
PR
2037 put_page(page_info->page);
2038 memset(page_info, 0, sizeof(*page_info));
6b7c5b94
SP
2039 }
2040}
2041
2042/*
2043 * skb_fill_rx_data forms a complete skb for an ether frame
2044 * indicated by rxcp.
2045 */
10ef9ab4
SP
2046static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
2047 struct be_rx_compl_info *rxcp)
6b7c5b94 2048{
6b7c5b94 2049 struct be_rx_page_info *page_info;
2e588f84
SP
2050 u16 i, j;
2051 u16 hdr_len, curr_frag_len, remaining;
6b7c5b94 2052 u8 *start;
6b7c5b94 2053
0b0ef1d0 2054 page_info = get_rx_page_info(rxo);
6b7c5b94
SP
2055 start = page_address(page_info->page) + page_info->page_offset;
2056 prefetch(start);
2057
2058 /* Copy data in the first descriptor of this completion */
2e588f84 2059 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
6b7c5b94 2060
6b7c5b94
SP
2061 skb->len = curr_frag_len;
2062 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
ac1ae5f3 2063 memcpy(skb->data, start, curr_frag_len);
6b7c5b94
SP
2064 /* Complete packet has now been moved to data */
2065 put_page(page_info->page);
2066 skb->data_len = 0;
2067 skb->tail += curr_frag_len;
2068 } else {
ac1ae5f3
ED
2069 hdr_len = ETH_HLEN;
2070 memcpy(skb->data, start, hdr_len);
6b7c5b94 2071 skb_shinfo(skb)->nr_frags = 1;
b061b39e 2072 skb_frag_set_page(skb, 0, page_info->page);
6b7c5b94
SP
2073 skb_shinfo(skb)->frags[0].page_offset =
2074 page_info->page_offset + hdr_len;
748b539a
SP
2075 skb_frag_size_set(&skb_shinfo(skb)->frags[0],
2076 curr_frag_len - hdr_len);
6b7c5b94 2077 skb->data_len = curr_frag_len - hdr_len;
bdb28a97 2078 skb->truesize += rx_frag_size;
6b7c5b94
SP
2079 skb->tail += hdr_len;
2080 }
205859a2 2081 page_info->page = NULL;
6b7c5b94 2082
2e588f84
SP
2083 if (rxcp->pkt_size <= rx_frag_size) {
2084 BUG_ON(rxcp->num_rcvd != 1);
2085 return;
6b7c5b94
SP
2086 }
2087
2088 /* More frags present for this completion */
2e588f84
SP
2089 remaining = rxcp->pkt_size - curr_frag_len;
2090 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
0b0ef1d0 2091 page_info = get_rx_page_info(rxo);
2e588f84 2092 curr_frag_len = min(remaining, rx_frag_size);
6b7c5b94 2093
bd46cb6c
AK
2094 /* Coalesce all frags from the same physical page in one slot */
2095 if (page_info->page_offset == 0) {
2096 /* Fresh page */
2097 j++;
b061b39e 2098 skb_frag_set_page(skb, j, page_info->page);
bd46cb6c
AK
2099 skb_shinfo(skb)->frags[j].page_offset =
2100 page_info->page_offset;
9e903e08 2101 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
bd46cb6c
AK
2102 skb_shinfo(skb)->nr_frags++;
2103 } else {
2104 put_page(page_info->page);
2105 }
2106
9e903e08 2107 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
6b7c5b94
SP
2108 skb->len += curr_frag_len;
2109 skb->data_len += curr_frag_len;
bdb28a97 2110 skb->truesize += rx_frag_size;
2e588f84 2111 remaining -= curr_frag_len;
205859a2 2112 page_info->page = NULL;
6b7c5b94 2113 }
bd46cb6c 2114 BUG_ON(j > MAX_SKB_FRAGS);
6b7c5b94
SP
2115}
2116
5be93b9a 2117/* Process the RX completion indicated by rxcp when GRO is disabled */
6384a4d0 2118static void be_rx_compl_process(struct be_rx_obj *rxo, struct napi_struct *napi,
10ef9ab4 2119 struct be_rx_compl_info *rxcp)
6b7c5b94 2120{
10ef9ab4 2121 struct be_adapter *adapter = rxo->adapter;
6332c8d3 2122 struct net_device *netdev = adapter->netdev;
6b7c5b94 2123 struct sk_buff *skb;
89420424 2124
bb349bb4 2125 skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE);
a058a632 2126 if (unlikely(!skb)) {
ac124ff9 2127 rx_stats(rxo)->rx_drops_no_skbs++;
10ef9ab4 2128 be_rx_compl_discard(rxo, rxcp);
6b7c5b94
SP
2129 return;
2130 }
2131
10ef9ab4 2132 skb_fill_rx_data(rxo, skb, rxcp);
6b7c5b94 2133
6332c8d3 2134 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
728a9972 2135 skb->ip_summed = CHECKSUM_UNNECESSARY;
c6ce2f4b
SK
2136 else
2137 skb_checksum_none_assert(skb);
6b7c5b94 2138
6332c8d3 2139 skb->protocol = eth_type_trans(skb, netdev);
aaa6daec 2140 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
10ef9ab4 2141 if (netdev->features & NETIF_F_RXHASH)
d2464c8c 2142 skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
c9c47142 2143
b6c0e89d 2144 skb->csum_level = rxcp->tunneled;
6384a4d0 2145 skb_mark_napi_id(skb, napi);
6b7c5b94 2146
343e43c0 2147 if (rxcp->vlanf)
86a9bad3 2148 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
4c5102f9
AK
2149
2150 netif_receive_skb(skb);
6b7c5b94
SP
2151}
2152
5be93b9a 2153/* Process the RX completion indicated by rxcp when GRO is enabled */
4188e7df
JH
2154static void be_rx_compl_process_gro(struct be_rx_obj *rxo,
2155 struct napi_struct *napi,
2156 struct be_rx_compl_info *rxcp)
6b7c5b94 2157{
10ef9ab4 2158 struct be_adapter *adapter = rxo->adapter;
6b7c5b94 2159 struct be_rx_page_info *page_info;
5be93b9a 2160 struct sk_buff *skb = NULL;
2e588f84
SP
2161 u16 remaining, curr_frag_len;
2162 u16 i, j;
3968fa1e 2163
10ef9ab4 2164 skb = napi_get_frags(napi);
5be93b9a 2165 if (!skb) {
10ef9ab4 2166 be_rx_compl_discard(rxo, rxcp);
5be93b9a
AK
2167 return;
2168 }
2169
2e588f84
SP
2170 remaining = rxcp->pkt_size;
2171 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
0b0ef1d0 2172 page_info = get_rx_page_info(rxo);
6b7c5b94
SP
2173
2174 curr_frag_len = min(remaining, rx_frag_size);
2175
bd46cb6c
AK
2176 /* Coalesce all frags from the same physical page in one slot */
2177 if (i == 0 || page_info->page_offset == 0) {
2178 /* First frag or Fresh page */
2179 j++;
b061b39e 2180 skb_frag_set_page(skb, j, page_info->page);
5be93b9a
AK
2181 skb_shinfo(skb)->frags[j].page_offset =
2182 page_info->page_offset;
9e903e08 2183 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
bd46cb6c
AK
2184 } else {
2185 put_page(page_info->page);
2186 }
9e903e08 2187 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
bdb28a97 2188 skb->truesize += rx_frag_size;
bd46cb6c 2189 remaining -= curr_frag_len;
6b7c5b94
SP
2190 memset(page_info, 0, sizeof(*page_info));
2191 }
bd46cb6c 2192 BUG_ON(j > MAX_SKB_FRAGS);
6b7c5b94 2193
5be93b9a 2194 skb_shinfo(skb)->nr_frags = j + 1;
2e588f84
SP
2195 skb->len = rxcp->pkt_size;
2196 skb->data_len = rxcp->pkt_size;
5be93b9a 2197 skb->ip_summed = CHECKSUM_UNNECESSARY;
aaa6daec 2198 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
4b972914 2199 if (adapter->netdev->features & NETIF_F_RXHASH)
d2464c8c 2200 skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
c9c47142 2201
b6c0e89d 2202 skb->csum_level = rxcp->tunneled;
5be93b9a 2203
343e43c0 2204 if (rxcp->vlanf)
86a9bad3 2205 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
4c5102f9 2206
10ef9ab4 2207 napi_gro_frags(napi);
2e588f84
SP
2208}
2209
10ef9ab4
SP
2210static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
2211 struct be_rx_compl_info *rxcp)
2e588f84 2212{
c3c18bc1
SP
2213 rxcp->pkt_size = GET_RX_COMPL_V1_BITS(pktsize, compl);
2214 rxcp->vlanf = GET_RX_COMPL_V1_BITS(vtp, compl);
2215 rxcp->err = GET_RX_COMPL_V1_BITS(err, compl);
2216 rxcp->tcpf = GET_RX_COMPL_V1_BITS(tcpf, compl);
2217 rxcp->udpf = GET_RX_COMPL_V1_BITS(udpf, compl);
2218 rxcp->ip_csum = GET_RX_COMPL_V1_BITS(ipcksm, compl);
2219 rxcp->l4_csum = GET_RX_COMPL_V1_BITS(l4_cksm, compl);
2220 rxcp->ipv6 = GET_RX_COMPL_V1_BITS(ip_version, compl);
2221 rxcp->num_rcvd = GET_RX_COMPL_V1_BITS(numfrags, compl);
2222 rxcp->pkt_type = GET_RX_COMPL_V1_BITS(cast_enc, compl);
2223 rxcp->rss_hash = GET_RX_COMPL_V1_BITS(rsshash, compl);
15d72184 2224 if (rxcp->vlanf) {
c3c18bc1
SP
2225 rxcp->qnq = GET_RX_COMPL_V1_BITS(qnq, compl);
2226 rxcp->vlan_tag = GET_RX_COMPL_V1_BITS(vlan_tag, compl);
15d72184 2227 }
c3c18bc1 2228 rxcp->port = GET_RX_COMPL_V1_BITS(port, compl);
c9c47142 2229 rxcp->tunneled =
c3c18bc1 2230 GET_RX_COMPL_V1_BITS(tunneled, compl);
2e588f84
SP
2231}
2232
10ef9ab4
SP
2233static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
2234 struct be_rx_compl_info *rxcp)
2e588f84 2235{
c3c18bc1
SP
2236 rxcp->pkt_size = GET_RX_COMPL_V0_BITS(pktsize, compl);
2237 rxcp->vlanf = GET_RX_COMPL_V0_BITS(vtp, compl);
2238 rxcp->err = GET_RX_COMPL_V0_BITS(err, compl);
2239 rxcp->tcpf = GET_RX_COMPL_V0_BITS(tcpf, compl);
2240 rxcp->udpf = GET_RX_COMPL_V0_BITS(udpf, compl);
2241 rxcp->ip_csum = GET_RX_COMPL_V0_BITS(ipcksm, compl);
2242 rxcp->l4_csum = GET_RX_COMPL_V0_BITS(l4_cksm, compl);
2243 rxcp->ipv6 = GET_RX_COMPL_V0_BITS(ip_version, compl);
2244 rxcp->num_rcvd = GET_RX_COMPL_V0_BITS(numfrags, compl);
2245 rxcp->pkt_type = GET_RX_COMPL_V0_BITS(cast_enc, compl);
2246 rxcp->rss_hash = GET_RX_COMPL_V0_BITS(rsshash, compl);
15d72184 2247 if (rxcp->vlanf) {
c3c18bc1
SP
2248 rxcp->qnq = GET_RX_COMPL_V0_BITS(qnq, compl);
2249 rxcp->vlan_tag = GET_RX_COMPL_V0_BITS(vlan_tag, compl);
15d72184 2250 }
c3c18bc1
SP
2251 rxcp->port = GET_RX_COMPL_V0_BITS(port, compl);
2252 rxcp->ip_frag = GET_RX_COMPL_V0_BITS(ip_frag, compl);
2e588f84
SP
2253}
2254
2255static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
2256{
2257 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
2258 struct be_rx_compl_info *rxcp = &rxo->rxcp;
2259 struct be_adapter *adapter = rxo->adapter;
6b7c5b94 2260
2e588f84
SP
2261 /* For checking the valid bit it is Ok to use either definition as the
2262 * valid bit is at the same position in both v0 and v1 Rx compl */
2263 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
2264 return NULL;
6b7c5b94 2265
2e588f84
SP
2266 rmb();
2267 be_dws_le_to_cpu(compl, sizeof(*compl));
6b7c5b94 2268
2e588f84 2269 if (adapter->be3_native)
10ef9ab4 2270 be_parse_rx_compl_v1(compl, rxcp);
2e588f84 2271 else
10ef9ab4 2272 be_parse_rx_compl_v0(compl, rxcp);
6b7c5b94 2273
e38b1706
SK
2274 if (rxcp->ip_frag)
2275 rxcp->l4_csum = 0;
2276
15d72184 2277 if (rxcp->vlanf) {
f93f160b
VV
2278 /* In QNQ modes, if qnq bit is not set, then the packet was
2279 * tagged only with the transparent outer vlan-tag and must
2280 * not be treated as a vlan packet by host
2281 */
2282 if (be_is_qnq_mode(adapter) && !rxcp->qnq)
15d72184 2283 rxcp->vlanf = 0;
6b7c5b94 2284
15d72184 2285 if (!lancer_chip(adapter))
3c709f8f 2286 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
6b7c5b94 2287
939cf306 2288 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
f6cbd364 2289 !test_bit(rxcp->vlan_tag, adapter->vids))
15d72184
SP
2290 rxcp->vlanf = 0;
2291 }
2e588f84
SP
2292
2293 /* As the compl has been parsed, reset it; we wont touch it again */
2294 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
6b7c5b94 2295
3abcdeda 2296 queue_tail_inc(&rxo->cq);
6b7c5b94
SP
2297 return rxcp;
2298}
2299
1829b086 2300static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
6b7c5b94 2301{
6b7c5b94 2302 u32 order = get_order(size);
1829b086 2303
6b7c5b94 2304 if (order > 0)
1829b086
ED
2305 gfp |= __GFP_COMP;
2306 return alloc_pages(gfp, order);
6b7c5b94
SP
2307}
2308
2309/*
2310 * Allocate a page, split it to fragments of size rx_frag_size and post as
2311 * receive buffers to BE
2312 */
c30d7266 2313static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp, u32 frags_needed)
6b7c5b94 2314{
3abcdeda 2315 struct be_adapter *adapter = rxo->adapter;
26d92f92 2316 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
3abcdeda 2317 struct be_queue_info *rxq = &rxo->q;
6b7c5b94 2318 struct page *pagep = NULL;
ba42fad0 2319 struct device *dev = &adapter->pdev->dev;
6b7c5b94
SP
2320 struct be_eth_rx_d *rxd;
2321 u64 page_dmaaddr = 0, frag_dmaaddr;
c30d7266 2322 u32 posted, page_offset = 0, notify = 0;
6b7c5b94 2323
3abcdeda 2324 page_info = &rxo->page_info_tbl[rxq->head];
c30d7266 2325 for (posted = 0; posted < frags_needed && !page_info->page; posted++) {
6b7c5b94 2326 if (!pagep) {
1829b086 2327 pagep = be_alloc_pages(adapter->big_page_size, gfp);
6b7c5b94 2328 if (unlikely(!pagep)) {
ac124ff9 2329 rx_stats(rxo)->rx_post_fail++;
6b7c5b94
SP
2330 break;
2331 }
ba42fad0
IV
2332 page_dmaaddr = dma_map_page(dev, pagep, 0,
2333 adapter->big_page_size,
2b7bcebf 2334 DMA_FROM_DEVICE);
ba42fad0
IV
2335 if (dma_mapping_error(dev, page_dmaaddr)) {
2336 put_page(pagep);
2337 pagep = NULL;
d3de1540 2338 adapter->drv_stats.dma_map_errors++;
ba42fad0
IV
2339 break;
2340 }
e50287be 2341 page_offset = 0;
6b7c5b94
SP
2342 } else {
2343 get_page(pagep);
e50287be 2344 page_offset += rx_frag_size;
6b7c5b94 2345 }
e50287be 2346 page_info->page_offset = page_offset;
6b7c5b94 2347 page_info->page = pagep;
6b7c5b94
SP
2348
2349 rxd = queue_head_node(rxq);
e50287be 2350 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
6b7c5b94
SP
2351 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
2352 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
6b7c5b94
SP
2353
2354 /* Any space left in the current big page for another frag? */
2355 if ((page_offset + rx_frag_size + rx_frag_size) >
2356 adapter->big_page_size) {
2357 pagep = NULL;
e50287be
SP
2358 page_info->last_frag = true;
2359 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
2360 } else {
2361 dma_unmap_addr_set(page_info, bus, frag_dmaaddr);
6b7c5b94 2362 }
26d92f92
SP
2363
2364 prev_page_info = page_info;
2365 queue_head_inc(rxq);
10ef9ab4 2366 page_info = &rxo->page_info_tbl[rxq->head];
6b7c5b94 2367 }
e50287be
SP
2368
2369 /* Mark the last frag of a page when we break out of the above loop
2370 * with no more slots available in the RXQ
2371 */
2372 if (pagep) {
2373 prev_page_info->last_frag = true;
2374 dma_unmap_addr_set(prev_page_info, bus, page_dmaaddr);
2375 }
6b7c5b94
SP
2376
2377 if (posted) {
6b7c5b94 2378 atomic_add(posted, &rxq->used);
6384a4d0
SP
2379 if (rxo->rx_post_starved)
2380 rxo->rx_post_starved = false;
c30d7266 2381 do {
69304cc9 2382 notify = min(MAX_NUM_POST_ERX_DB, posted);
c30d7266
AK
2383 be_rxq_notify(adapter, rxq->id, notify);
2384 posted -= notify;
2385 } while (posted);
ea1dae11
SP
2386 } else if (atomic_read(&rxq->used) == 0) {
2387 /* Let be_worker replenish when memory is available */
3abcdeda 2388 rxo->rx_post_starved = true;
6b7c5b94 2389 }
6b7c5b94
SP
2390}
2391
152ffe5b 2392static struct be_tx_compl_info *be_tx_compl_get(struct be_tx_obj *txo)
6b7c5b94 2393{
152ffe5b
SB
2394 struct be_queue_info *tx_cq = &txo->cq;
2395 struct be_tx_compl_info *txcp = &txo->txcp;
2396 struct be_eth_tx_compl *compl = queue_tail_node(tx_cq);
6b7c5b94 2397
152ffe5b 2398 if (compl->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
6b7c5b94
SP
2399 return NULL;
2400
152ffe5b 2401 /* Ensure load ordering of valid bit dword and other dwords below */
f3eb62d2 2402 rmb();
152ffe5b 2403 be_dws_le_to_cpu(compl, sizeof(*compl));
6b7c5b94 2404
152ffe5b
SB
2405 txcp->status = GET_TX_COMPL_BITS(status, compl);
2406 txcp->end_index = GET_TX_COMPL_BITS(wrb_index, compl);
6b7c5b94 2407
152ffe5b 2408 compl->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
6b7c5b94
SP
2409 queue_tail_inc(tx_cq);
2410 return txcp;
2411}
2412
3c8def97 2413static u16 be_tx_compl_process(struct be_adapter *adapter,
748b539a 2414 struct be_tx_obj *txo, u16 last_index)
6b7c5b94 2415{
5f07b3c5 2416 struct sk_buff **sent_skbs = txo->sent_skb_list;
3c8def97 2417 struct be_queue_info *txq = &txo->q;
5f07b3c5
SP
2418 struct sk_buff *skb = NULL;
2419 bool unmap_skb_hdr = false;
a73b796e 2420 struct be_eth_wrb *wrb;
b0fd2eb2 2421 u16 num_wrbs = 0;
2422 u32 frag_index;
6b7c5b94 2423
ec43b1a6 2424 do {
5f07b3c5
SP
2425 if (sent_skbs[txq->tail]) {
2426 /* Free skb from prev req */
2427 if (skb)
2428 dev_consume_skb_any(skb);
2429 skb = sent_skbs[txq->tail];
2430 sent_skbs[txq->tail] = NULL;
2431 queue_tail_inc(txq); /* skip hdr wrb */
2432 num_wrbs++;
2433 unmap_skb_hdr = true;
2434 }
a73b796e 2435 wrb = queue_tail_node(txq);
5f07b3c5 2436 frag_index = txq->tail;
2b7bcebf 2437 unmap_tx_frag(&adapter->pdev->dev, wrb,
5f07b3c5 2438 (unmap_skb_hdr && skb_headlen(skb)));
ec43b1a6 2439 unmap_skb_hdr = false;
6b7c5b94 2440 queue_tail_inc(txq);
5f07b3c5
SP
2441 num_wrbs++;
2442 } while (frag_index != last_index);
2443 dev_consume_skb_any(skb);
6b7c5b94 2444
4d586b82 2445 return num_wrbs;
6b7c5b94
SP
2446}
2447
10ef9ab4
SP
2448/* Return the number of events in the event queue */
2449static inline int events_get(struct be_eq_obj *eqo)
859b1e4e 2450{
10ef9ab4
SP
2451 struct be_eq_entry *eqe;
2452 int num = 0;
859b1e4e 2453
10ef9ab4
SP
2454 do {
2455 eqe = queue_tail_node(&eqo->q);
2456 if (eqe->evt == 0)
2457 break;
859b1e4e 2458
10ef9ab4
SP
2459 rmb();
2460 eqe->evt = 0;
2461 num++;
2462 queue_tail_inc(&eqo->q);
2463 } while (true);
2464
2465 return num;
859b1e4e
SP
2466}
2467
10ef9ab4
SP
2468/* Leaves the EQ is disarmed state */
2469static void be_eq_clean(struct be_eq_obj *eqo)
859b1e4e 2470{
10ef9ab4 2471 int num = events_get(eqo);
859b1e4e 2472
20947770 2473 be_eq_notify(eqo->adapter, eqo->q.id, false, true, num, 0);
859b1e4e
SP
2474}
2475
99b44304
KA
2476/* Free posted rx buffers that were not used */
2477static void be_rxq_clean(struct be_rx_obj *rxo)
6b7c5b94 2478{
3abcdeda 2479 struct be_queue_info *rxq = &rxo->q;
99b44304
KA
2480 struct be_rx_page_info *page_info;
2481
2482 while (atomic_read(&rxq->used) > 0) {
2483 page_info = get_rx_page_info(rxo);
2484 put_page(page_info->page);
2485 memset(page_info, 0, sizeof(*page_info));
2486 }
2487 BUG_ON(atomic_read(&rxq->used));
2488 rxq->tail = 0;
2489 rxq->head = 0;
2490}
2491
2492static void be_rx_cq_clean(struct be_rx_obj *rxo)
2493{
3abcdeda 2494 struct be_queue_info *rx_cq = &rxo->cq;
2e588f84 2495 struct be_rx_compl_info *rxcp;
d23e946c
SP
2496 struct be_adapter *adapter = rxo->adapter;
2497 int flush_wait = 0;
6b7c5b94 2498
d23e946c
SP
2499 /* Consume pending rx completions.
2500 * Wait for the flush completion (identified by zero num_rcvd)
2501 * to arrive. Notify CQ even when there are no more CQ entries
2502 * for HW to flush partially coalesced CQ entries.
2503 * In Lancer, there is no need to wait for flush compl.
2504 */
2505 for (;;) {
2506 rxcp = be_rx_compl_get(rxo);
ddf1169f 2507 if (!rxcp) {
d23e946c
SP
2508 if (lancer_chip(adapter))
2509 break;
2510
954f6825
VD
2511 if (flush_wait++ > 50 ||
2512 be_check_error(adapter,
2513 BE_ERROR_HW)) {
d23e946c
SP
2514 dev_warn(&adapter->pdev->dev,
2515 "did not receive flush compl\n");
2516 break;
2517 }
2518 be_cq_notify(adapter, rx_cq->id, true, 0);
2519 mdelay(1);
2520 } else {
2521 be_rx_compl_discard(rxo, rxcp);
3f5dffe6 2522 be_cq_notify(adapter, rx_cq->id, false, 1);
d23e946c
SP
2523 if (rxcp->num_rcvd == 0)
2524 break;
2525 }
6b7c5b94
SP
2526 }
2527
d23e946c
SP
2528 /* After cleanup, leave the CQ in unarmed state */
2529 be_cq_notify(adapter, rx_cq->id, false, 0);
6b7c5b94
SP
2530}
2531
0ae57bb3 2532static void be_tx_compl_clean(struct be_adapter *adapter)
6b7c5b94 2533{
5f07b3c5 2534 struct device *dev = &adapter->pdev->dev;
b0fd2eb2 2535 u16 cmpl = 0, timeo = 0, num_wrbs = 0;
152ffe5b 2536 struct be_tx_compl_info *txcp;
0ae57bb3 2537 struct be_queue_info *txq;
b0fd2eb2 2538 u32 end_idx, notified_idx;
152ffe5b 2539 struct be_tx_obj *txo;
0ae57bb3 2540 int i, pending_txqs;
a8e9179a 2541
1a3d0717 2542 /* Stop polling for compls when HW has been silent for 10ms */
a8e9179a 2543 do {
0ae57bb3
SP
2544 pending_txqs = adapter->num_tx_qs;
2545
2546 for_all_tx_queues(adapter, txo, i) {
1a3d0717
VV
2547 cmpl = 0;
2548 num_wrbs = 0;
0ae57bb3 2549 txq = &txo->q;
152ffe5b
SB
2550 while ((txcp = be_tx_compl_get(txo))) {
2551 num_wrbs +=
2552 be_tx_compl_process(adapter, txo,
2553 txcp->end_index);
0ae57bb3
SP
2554 cmpl++;
2555 }
2556 if (cmpl) {
2557 be_cq_notify(adapter, txo->cq.id, false, cmpl);
2558 atomic_sub(num_wrbs, &txq->used);
1a3d0717 2559 timeo = 0;
0ae57bb3 2560 }
cf5671e6 2561 if (!be_is_tx_compl_pending(txo))
0ae57bb3 2562 pending_txqs--;
a8e9179a
SP
2563 }
2564
954f6825
VD
2565 if (pending_txqs == 0 || ++timeo > 10 ||
2566 be_check_error(adapter, BE_ERROR_HW))
a8e9179a
SP
2567 break;
2568
2569 mdelay(1);
2570 } while (true);
2571
5f07b3c5 2572 /* Free enqueued TX that was never notified to HW */
0ae57bb3
SP
2573 for_all_tx_queues(adapter, txo, i) {
2574 txq = &txo->q;
0ae57bb3 2575
5f07b3c5
SP
2576 if (atomic_read(&txq->used)) {
2577 dev_info(dev, "txq%d: cleaning %d pending tx-wrbs\n",
2578 i, atomic_read(&txq->used));
2579 notified_idx = txq->tail;
0ae57bb3 2580 end_idx = txq->tail;
5f07b3c5
SP
2581 index_adv(&end_idx, atomic_read(&txq->used) - 1,
2582 txq->len);
2583 /* Use the tx-compl process logic to handle requests
2584 * that were not sent to the HW.
2585 */
0ae57bb3
SP
2586 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
2587 atomic_sub(num_wrbs, &txq->used);
5f07b3c5
SP
2588 BUG_ON(atomic_read(&txq->used));
2589 txo->pend_wrb_cnt = 0;
2590 /* Since hw was never notified of these requests,
2591 * reset TXQ indices
2592 */
2593 txq->head = notified_idx;
2594 txq->tail = notified_idx;
0ae57bb3 2595 }
b03388d6 2596 }
6b7c5b94
SP
2597}
2598
10ef9ab4
SP
2599static void be_evt_queues_destroy(struct be_adapter *adapter)
2600{
2601 struct be_eq_obj *eqo;
2602 int i;
2603
2604 for_all_evt_queues(adapter, eqo, i) {
19d59aa7
PR
2605 if (eqo->q.created) {
2606 be_eq_clean(eqo);
10ef9ab4 2607 be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
6384a4d0 2608 napi_hash_del(&eqo->napi);
68d7bdcb 2609 netif_napi_del(&eqo->napi);
649886a3 2610 free_cpumask_var(eqo->affinity_mask);
19d59aa7 2611 }
10ef9ab4
SP
2612 be_queue_free(adapter, &eqo->q);
2613 }
2614}
2615
2616static int be_evt_queues_create(struct be_adapter *adapter)
2617{
2618 struct be_queue_info *eq;
2619 struct be_eq_obj *eqo;
2632bafd 2620 struct be_aic_obj *aic;
10ef9ab4
SP
2621 int i, rc;
2622
e261768e 2623 /* need enough EQs to service both RX and TX queues */
92bf14ab 2624 adapter->num_evt_qs = min_t(u16, num_irqs(adapter),
e261768e
SP
2625 max(adapter->cfg_num_rx_irqs,
2626 adapter->cfg_num_tx_irqs));
10ef9ab4
SP
2627
2628 for_all_evt_queues(adapter, eqo, i) {
f36963c9 2629 int numa_node = dev_to_node(&adapter->pdev->dev);
649886a3 2630
2632bafd 2631 aic = &adapter->aic_obj[i];
10ef9ab4 2632 eqo->adapter = adapter;
10ef9ab4 2633 eqo->idx = i;
2632bafd
SP
2634 aic->max_eqd = BE_MAX_EQD;
2635 aic->enable = true;
10ef9ab4
SP
2636
2637 eq = &eqo->q;
2638 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
748b539a 2639 sizeof(struct be_eq_entry));
10ef9ab4
SP
2640 if (rc)
2641 return rc;
2642
f2f781a7 2643 rc = be_cmd_eq_create(adapter, eqo);
10ef9ab4
SP
2644 if (rc)
2645 return rc;
649886a3
KA
2646
2647 if (!zalloc_cpumask_var(&eqo->affinity_mask, GFP_KERNEL))
2648 return -ENOMEM;
2649 cpumask_set_cpu(cpumask_local_spread(i, numa_node),
2650 eqo->affinity_mask);
2651 netif_napi_add(adapter->netdev, &eqo->napi, be_poll,
2652 BE_NAPI_WEIGHT);
10ef9ab4 2653 }
1cfafab9 2654 return 0;
10ef9ab4
SP
2655}
2656
5fb379ee
SP
2657static void be_mcc_queues_destroy(struct be_adapter *adapter)
2658{
2659 struct be_queue_info *q;
5fb379ee 2660
8788fdc2 2661 q = &adapter->mcc_obj.q;
5fb379ee 2662 if (q->created)
8788fdc2 2663 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
5fb379ee
SP
2664 be_queue_free(adapter, q);
2665
8788fdc2 2666 q = &adapter->mcc_obj.cq;
5fb379ee 2667 if (q->created)
8788fdc2 2668 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
5fb379ee
SP
2669 be_queue_free(adapter, q);
2670}
2671
2672/* Must be called only after TX qs are created as MCC shares TX EQ */
2673static int be_mcc_queues_create(struct be_adapter *adapter)
2674{
2675 struct be_queue_info *q, *cq;
5fb379ee 2676
8788fdc2 2677 cq = &adapter->mcc_obj.cq;
5fb379ee 2678 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
748b539a 2679 sizeof(struct be_mcc_compl)))
5fb379ee
SP
2680 goto err;
2681
10ef9ab4
SP
2682 /* Use the default EQ for MCC completions */
2683 if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0))
5fb379ee
SP
2684 goto mcc_cq_free;
2685
8788fdc2 2686 q = &adapter->mcc_obj.q;
5fb379ee
SP
2687 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
2688 goto mcc_cq_destroy;
2689
8788fdc2 2690 if (be_cmd_mccq_create(adapter, q, cq))
5fb379ee
SP
2691 goto mcc_q_free;
2692
2693 return 0;
2694
2695mcc_q_free:
2696 be_queue_free(adapter, q);
2697mcc_cq_destroy:
8788fdc2 2698 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
5fb379ee
SP
2699mcc_cq_free:
2700 be_queue_free(adapter, cq);
2701err:
2702 return -1;
2703}
2704
6b7c5b94
SP
2705static void be_tx_queues_destroy(struct be_adapter *adapter)
2706{
2707 struct be_queue_info *q;
3c8def97
SP
2708 struct be_tx_obj *txo;
2709 u8 i;
6b7c5b94 2710
3c8def97
SP
2711 for_all_tx_queues(adapter, txo, i) {
2712 q = &txo->q;
2713 if (q->created)
2714 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
2715 be_queue_free(adapter, q);
6b7c5b94 2716
3c8def97
SP
2717 q = &txo->cq;
2718 if (q->created)
2719 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2720 be_queue_free(adapter, q);
2721 }
6b7c5b94
SP
2722}
2723
7707133c 2724static int be_tx_qs_create(struct be_adapter *adapter)
6b7c5b94 2725{
73f394e6 2726 struct be_queue_info *cq;
3c8def97 2727 struct be_tx_obj *txo;
73f394e6 2728 struct be_eq_obj *eqo;
92bf14ab 2729 int status, i;
6b7c5b94 2730
e261768e 2731 adapter->num_tx_qs = min(adapter->num_evt_qs, adapter->cfg_num_tx_irqs);
dafc0fe3 2732
10ef9ab4
SP
2733 for_all_tx_queues(adapter, txo, i) {
2734 cq = &txo->cq;
2735 status = be_queue_alloc(adapter, cq, TX_CQ_LEN,
2736 sizeof(struct be_eth_tx_compl));
2737 if (status)
2738 return status;
3c8def97 2739
827da44c
JS
2740 u64_stats_init(&txo->stats.sync);
2741 u64_stats_init(&txo->stats.sync_compl);
2742
10ef9ab4
SP
2743 /* If num_evt_qs is less than num_tx_qs, then more than
2744 * one txq share an eq
2745 */
73f394e6
SP
2746 eqo = &adapter->eq_obj[i % adapter->num_evt_qs];
2747 status = be_cmd_cq_create(adapter, cq, &eqo->q, false, 3);
10ef9ab4
SP
2748 if (status)
2749 return status;
6b7c5b94 2750
10ef9ab4
SP
2751 status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
2752 sizeof(struct be_eth_wrb));
2753 if (status)
2754 return status;
6b7c5b94 2755
94d73aaa 2756 status = be_cmd_txq_create(adapter, txo);
10ef9ab4
SP
2757 if (status)
2758 return status;
73f394e6
SP
2759
2760 netif_set_xps_queue(adapter->netdev, eqo->affinity_mask,
2761 eqo->idx);
3c8def97 2762 }
6b7c5b94 2763
d379142b
SP
2764 dev_info(&adapter->pdev->dev, "created %d TX queue(s)\n",
2765 adapter->num_tx_qs);
10ef9ab4 2766 return 0;
6b7c5b94
SP
2767}
2768
10ef9ab4 2769static void be_rx_cqs_destroy(struct be_adapter *adapter)
6b7c5b94
SP
2770{
2771 struct be_queue_info *q;
3abcdeda
SP
2772 struct be_rx_obj *rxo;
2773 int i;
2774
2775 for_all_rx_queues(adapter, rxo, i) {
3abcdeda
SP
2776 q = &rxo->cq;
2777 if (q->created)
2778 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2779 be_queue_free(adapter, q);
ac6a0c4a
SP
2780 }
2781}
2782
10ef9ab4 2783static int be_rx_cqs_create(struct be_adapter *adapter)
6b7c5b94 2784{
10ef9ab4 2785 struct be_queue_info *eq, *cq;
3abcdeda
SP
2786 struct be_rx_obj *rxo;
2787 int rc, i;
6b7c5b94 2788
e261768e
SP
2789 adapter->num_rss_qs =
2790 min(adapter->num_evt_qs, adapter->cfg_num_rx_irqs);
92bf14ab 2791
71bb8bd0 2792 /* We'll use RSS only if atleast 2 RSS rings are supported. */
e261768e 2793 if (adapter->num_rss_qs < 2)
71bb8bd0
VV
2794 adapter->num_rss_qs = 0;
2795
2796 adapter->num_rx_qs = adapter->num_rss_qs + adapter->need_def_rxq;
2797
2798 /* When the interface is not capable of RSS rings (and there is no
2799 * need to create a default RXQ) we'll still need one RXQ
10ef9ab4 2800 */
71bb8bd0
VV
2801 if (adapter->num_rx_qs == 0)
2802 adapter->num_rx_qs = 1;
92bf14ab 2803
6b7c5b94 2804 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
3abcdeda
SP
2805 for_all_rx_queues(adapter, rxo, i) {
2806 rxo->adapter = adapter;
3abcdeda
SP
2807 cq = &rxo->cq;
2808 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
748b539a 2809 sizeof(struct be_eth_rx_compl));
3abcdeda 2810 if (rc)
10ef9ab4 2811 return rc;
3abcdeda 2812
827da44c 2813 u64_stats_init(&rxo->stats.sync);
10ef9ab4
SP
2814 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2815 rc = be_cmd_cq_create(adapter, cq, eq, false, 3);
3abcdeda 2816 if (rc)
10ef9ab4 2817 return rc;
3abcdeda 2818 }
6b7c5b94 2819
d379142b 2820 dev_info(&adapter->pdev->dev,
71bb8bd0 2821 "created %d RX queue(s)\n", adapter->num_rx_qs);
10ef9ab4 2822 return 0;
b628bde2
SP
2823}
2824
6b7c5b94
SP
2825static irqreturn_t be_intx(int irq, void *dev)
2826{
e49cc34f
SP
2827 struct be_eq_obj *eqo = dev;
2828 struct be_adapter *adapter = eqo->adapter;
2829 int num_evts = 0;
6b7c5b94 2830
d0b9cec3
SP
2831 /* IRQ is not expected when NAPI is scheduled as the EQ
2832 * will not be armed.
2833 * But, this can happen on Lancer INTx where it takes
2834 * a while to de-assert INTx or in BE2 where occasionaly
2835 * an interrupt may be raised even when EQ is unarmed.
2836 * If NAPI is already scheduled, then counting & notifying
2837 * events will orphan them.
e49cc34f 2838 */
d0b9cec3 2839 if (napi_schedule_prep(&eqo->napi)) {
e49cc34f 2840 num_evts = events_get(eqo);
d0b9cec3
SP
2841 __napi_schedule(&eqo->napi);
2842 if (num_evts)
2843 eqo->spurious_intr = 0;
2844 }
20947770 2845 be_eq_notify(adapter, eqo->q.id, false, true, num_evts, 0);
e49cc34f 2846
d0b9cec3
SP
2847 /* Return IRQ_HANDLED only for the the first spurious intr
2848 * after a valid intr to stop the kernel from branding
2849 * this irq as a bad one!
e49cc34f 2850 */
d0b9cec3
SP
2851 if (num_evts || eqo->spurious_intr++ == 0)
2852 return IRQ_HANDLED;
2853 else
2854 return IRQ_NONE;
6b7c5b94
SP
2855}
2856
10ef9ab4 2857static irqreturn_t be_msix(int irq, void *dev)
6b7c5b94 2858{
10ef9ab4 2859 struct be_eq_obj *eqo = dev;
6b7c5b94 2860
20947770 2861 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0, 0);
0b545a62 2862 napi_schedule(&eqo->napi);
6b7c5b94
SP
2863 return IRQ_HANDLED;
2864}
2865
2e588f84 2866static inline bool do_gro(struct be_rx_compl_info *rxcp)
6b7c5b94 2867{
e38b1706 2868 return (rxcp->tcpf && !rxcp->err && rxcp->l4_csum) ? true : false;
6b7c5b94
SP
2869}
2870
10ef9ab4 2871static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
748b539a 2872 int budget, int polling)
6b7c5b94 2873{
3abcdeda
SP
2874 struct be_adapter *adapter = rxo->adapter;
2875 struct be_queue_info *rx_cq = &rxo->cq;
2e588f84 2876 struct be_rx_compl_info *rxcp;
6b7c5b94 2877 u32 work_done;
c30d7266 2878 u32 frags_consumed = 0;
6b7c5b94
SP
2879
2880 for (work_done = 0; work_done < budget; work_done++) {
3abcdeda 2881 rxcp = be_rx_compl_get(rxo);
6b7c5b94
SP
2882 if (!rxcp)
2883 break;
2884
12004ae9
SP
2885 /* Is it a flush compl that has no data */
2886 if (unlikely(rxcp->num_rcvd == 0))
2887 goto loop_continue;
2888
2889 /* Discard compl with partial DMA Lancer B0 */
2890 if (unlikely(!rxcp->pkt_size)) {
10ef9ab4 2891 be_rx_compl_discard(rxo, rxcp);
12004ae9
SP
2892 goto loop_continue;
2893 }
2894
2895 /* On BE drop pkts that arrive due to imperfect filtering in
2896 * promiscuous mode on some skews
2897 */
2898 if (unlikely(rxcp->port != adapter->port_num &&
748b539a 2899 !lancer_chip(adapter))) {
10ef9ab4 2900 be_rx_compl_discard(rxo, rxcp);
12004ae9 2901 goto loop_continue;
64642811 2902 }
009dd872 2903
6384a4d0
SP
2904 /* Don't do gro when we're busy_polling */
2905 if (do_gro(rxcp) && polling != BUSY_POLLING)
10ef9ab4 2906 be_rx_compl_process_gro(rxo, napi, rxcp);
12004ae9 2907 else
6384a4d0
SP
2908 be_rx_compl_process(rxo, napi, rxcp);
2909
12004ae9 2910loop_continue:
c30d7266 2911 frags_consumed += rxcp->num_rcvd;
2e588f84 2912 be_rx_stats_update(rxo, rxcp);
6b7c5b94
SP
2913 }
2914
10ef9ab4
SP
2915 if (work_done) {
2916 be_cq_notify(adapter, rx_cq->id, true, work_done);
9372cacb 2917
6384a4d0
SP
2918 /* When an rx-obj gets into post_starved state, just
2919 * let be_worker do the posting.
2920 */
2921 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM &&
2922 !rxo->rx_post_starved)
c30d7266
AK
2923 be_post_rx_frags(rxo, GFP_ATOMIC,
2924 max_t(u32, MAX_RX_POST,
2925 frags_consumed));
6b7c5b94 2926 }
10ef9ab4 2927
6b7c5b94
SP
2928 return work_done;
2929}
2930
152ffe5b 2931static inline void be_update_tx_err(struct be_tx_obj *txo, u8 status)
512bb8a2
KA
2932{
2933 switch (status) {
2934 case BE_TX_COMP_HDR_PARSE_ERR:
2935 tx_stats(txo)->tx_hdr_parse_err++;
2936 break;
2937 case BE_TX_COMP_NDMA_ERR:
2938 tx_stats(txo)->tx_dma_err++;
2939 break;
2940 case BE_TX_COMP_ACL_ERR:
2941 tx_stats(txo)->tx_spoof_check_err++;
2942 break;
2943 }
2944}
2945
152ffe5b 2946static inline void lancer_update_tx_err(struct be_tx_obj *txo, u8 status)
512bb8a2
KA
2947{
2948 switch (status) {
2949 case LANCER_TX_COMP_LSO_ERR:
2950 tx_stats(txo)->tx_tso_err++;
2951 break;
2952 case LANCER_TX_COMP_HSW_DROP_MAC_ERR:
2953 case LANCER_TX_COMP_HSW_DROP_VLAN_ERR:
2954 tx_stats(txo)->tx_spoof_check_err++;
2955 break;
2956 case LANCER_TX_COMP_QINQ_ERR:
2957 tx_stats(txo)->tx_qinq_err++;
2958 break;
2959 case LANCER_TX_COMP_PARITY_ERR:
2960 tx_stats(txo)->tx_internal_parity_err++;
2961 break;
2962 case LANCER_TX_COMP_DMA_ERR:
2963 tx_stats(txo)->tx_dma_err++;
2964 break;
2965 }
2966}
2967
c8f64615
SP
2968static void be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
2969 int idx)
6b7c5b94 2970{
c8f64615 2971 int num_wrbs = 0, work_done = 0;
152ffe5b 2972 struct be_tx_compl_info *txcp;
c8f64615 2973
152ffe5b
SB
2974 while ((txcp = be_tx_compl_get(txo))) {
2975 num_wrbs += be_tx_compl_process(adapter, txo, txcp->end_index);
c8f64615 2976 work_done++;
3c8def97 2977
152ffe5b 2978 if (txcp->status) {
512bb8a2 2979 if (lancer_chip(adapter))
152ffe5b 2980 lancer_update_tx_err(txo, txcp->status);
512bb8a2 2981 else
152ffe5b 2982 be_update_tx_err(txo, txcp->status);
512bb8a2 2983 }
10ef9ab4 2984 }
6b7c5b94 2985
10ef9ab4
SP
2986 if (work_done) {
2987 be_cq_notify(adapter, txo->cq.id, true, work_done);
2988 atomic_sub(num_wrbs, &txo->q.used);
3c8def97 2989
10ef9ab4
SP
2990 /* As Tx wrbs have been freed up, wake up netdev queue
2991 * if it was stopped due to lack of tx wrbs. */
2992 if (__netif_subqueue_stopped(adapter->netdev, idx) &&
cf5671e6 2993 be_can_txq_wake(txo)) {
10ef9ab4 2994 netif_wake_subqueue(adapter->netdev, idx);
3c8def97 2995 }
10ef9ab4
SP
2996
2997 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
2998 tx_stats(txo)->tx_compl += work_done;
2999 u64_stats_update_end(&tx_stats(txo)->sync_compl);
6b7c5b94 3000 }
10ef9ab4 3001}
6b7c5b94 3002
f7062ee5
SP
3003#ifdef CONFIG_NET_RX_BUSY_POLL
3004static inline bool be_lock_napi(struct be_eq_obj *eqo)
3005{
3006 bool status = true;
3007
3008 spin_lock(&eqo->lock); /* BH is already disabled */
3009 if (eqo->state & BE_EQ_LOCKED) {
3010 WARN_ON(eqo->state & BE_EQ_NAPI);
3011 eqo->state |= BE_EQ_NAPI_YIELD;
3012 status = false;
3013 } else {
3014 eqo->state = BE_EQ_NAPI;
3015 }
3016 spin_unlock(&eqo->lock);
3017 return status;
3018}
3019
3020static inline void be_unlock_napi(struct be_eq_obj *eqo)
3021{
3022 spin_lock(&eqo->lock); /* BH is already disabled */
3023
3024 WARN_ON(eqo->state & (BE_EQ_POLL | BE_EQ_NAPI_YIELD));
3025 eqo->state = BE_EQ_IDLE;
3026
3027 spin_unlock(&eqo->lock);
3028}
3029
3030static inline bool be_lock_busy_poll(struct be_eq_obj *eqo)
3031{
3032 bool status = true;
3033
3034 spin_lock_bh(&eqo->lock);
3035 if (eqo->state & BE_EQ_LOCKED) {
3036 eqo->state |= BE_EQ_POLL_YIELD;
3037 status = false;
3038 } else {
3039 eqo->state |= BE_EQ_POLL;
3040 }
3041 spin_unlock_bh(&eqo->lock);
3042 return status;
3043}
3044
3045static inline void be_unlock_busy_poll(struct be_eq_obj *eqo)
3046{
3047 spin_lock_bh(&eqo->lock);
3048
3049 WARN_ON(eqo->state & (BE_EQ_NAPI));
3050 eqo->state = BE_EQ_IDLE;
3051
3052 spin_unlock_bh(&eqo->lock);
3053}
3054
3055static inline void be_enable_busy_poll(struct be_eq_obj *eqo)
3056{
3057 spin_lock_init(&eqo->lock);
3058 eqo->state = BE_EQ_IDLE;
3059}
3060
3061static inline void be_disable_busy_poll(struct be_eq_obj *eqo)
3062{
3063 local_bh_disable();
3064
3065 /* It's enough to just acquire napi lock on the eqo to stop
3066 * be_busy_poll() from processing any queueus.
3067 */
3068 while (!be_lock_napi(eqo))
3069 mdelay(1);
3070
3071 local_bh_enable();
3072}
3073
3074#else /* CONFIG_NET_RX_BUSY_POLL */
3075
3076static inline bool be_lock_napi(struct be_eq_obj *eqo)
3077{
3078 return true;
3079}
3080
3081static inline void be_unlock_napi(struct be_eq_obj *eqo)
3082{
3083}
3084
3085static inline bool be_lock_busy_poll(struct be_eq_obj *eqo)
3086{
3087 return false;
3088}
3089
3090static inline void be_unlock_busy_poll(struct be_eq_obj *eqo)
3091{
3092}
3093
3094static inline void be_enable_busy_poll(struct be_eq_obj *eqo)
3095{
3096}
3097
3098static inline void be_disable_busy_poll(struct be_eq_obj *eqo)
3099{
3100}
3101#endif /* CONFIG_NET_RX_BUSY_POLL */
3102
68d7bdcb 3103int be_poll(struct napi_struct *napi, int budget)
10ef9ab4
SP
3104{
3105 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
3106 struct be_adapter *adapter = eqo->adapter;
0b545a62 3107 int max_work = 0, work, i, num_evts;
6384a4d0 3108 struct be_rx_obj *rxo;
a4906ea0 3109 struct be_tx_obj *txo;
20947770 3110 u32 mult_enc = 0;
f31e50a8 3111
0b545a62
SP
3112 num_evts = events_get(eqo);
3113
a4906ea0
SP
3114 for_all_tx_queues_on_eq(adapter, eqo, txo, i)
3115 be_process_tx(adapter, txo, i);
f31e50a8 3116
6384a4d0
SP
3117 if (be_lock_napi(eqo)) {
3118 /* This loop will iterate twice for EQ0 in which
3119 * completions of the last RXQ (default one) are also processed
3120 * For other EQs the loop iterates only once
3121 */
3122 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
3123 work = be_process_rx(rxo, napi, budget, NAPI_POLLING);
3124 max_work = max(work, max_work);
3125 }
3126 be_unlock_napi(eqo);
3127 } else {
3128 max_work = budget;
10ef9ab4 3129 }
6b7c5b94 3130
10ef9ab4
SP
3131 if (is_mcc_eqo(eqo))
3132 be_process_mcc(adapter);
93c86700 3133
10ef9ab4
SP
3134 if (max_work < budget) {
3135 napi_complete(napi);
20947770
PR
3136
3137 /* Skyhawk EQ_DB has a provision to set the rearm to interrupt
3138 * delay via a delay multiplier encoding value
3139 */
3140 if (skyhawk_chip(adapter))
3141 mult_enc = be_get_eq_delay_mult_enc(eqo);
3142
3143 be_eq_notify(adapter, eqo->q.id, true, false, num_evts,
3144 mult_enc);
10ef9ab4
SP
3145 } else {
3146 /* As we'll continue in polling mode, count and clear events */
20947770 3147 be_eq_notify(adapter, eqo->q.id, false, false, num_evts, 0);
93c86700 3148 }
10ef9ab4 3149 return max_work;
6b7c5b94
SP
3150}
3151
6384a4d0
SP
3152#ifdef CONFIG_NET_RX_BUSY_POLL
3153static int be_busy_poll(struct napi_struct *napi)
3154{
3155 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
3156 struct be_adapter *adapter = eqo->adapter;
3157 struct be_rx_obj *rxo;
3158 int i, work = 0;
3159
3160 if (!be_lock_busy_poll(eqo))
3161 return LL_FLUSH_BUSY;
3162
3163 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
3164 work = be_process_rx(rxo, napi, 4, BUSY_POLLING);
3165 if (work)
3166 break;
3167 }
3168
3169 be_unlock_busy_poll(eqo);
3170 return work;
3171}
3172#endif
3173
f67ef7ba 3174void be_detect_error(struct be_adapter *adapter)
7c185276 3175{
e1cfb67a
PR
3176 u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
3177 u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
7c185276 3178 u32 i;
eb0eecc1 3179 struct device *dev = &adapter->pdev->dev;
7c185276 3180
954f6825 3181 if (be_check_error(adapter, BE_ERROR_HW))
72f02485
SP
3182 return;
3183
e1cfb67a
PR
3184 if (lancer_chip(adapter)) {
3185 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3186 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
954f6825 3187 be_set_error(adapter, BE_ERROR_UE);
e1cfb67a 3188 sliport_err1 = ioread32(adapter->db +
748b539a 3189 SLIPORT_ERROR1_OFFSET);
e1cfb67a 3190 sliport_err2 = ioread32(adapter->db +
748b539a 3191 SLIPORT_ERROR2_OFFSET);
eb0eecc1
SK
3192 /* Do not log error messages if its a FW reset */
3193 if (sliport_err1 == SLIPORT_ERROR_FW_RESET1 &&
3194 sliport_err2 == SLIPORT_ERROR_FW_RESET2) {
3195 dev_info(dev, "Firmware update in progress\n");
3196 } else {
eb0eecc1
SK
3197 dev_err(dev, "Error detected in the card\n");
3198 dev_err(dev, "ERR: sliport status 0x%x\n",
3199 sliport_status);
3200 dev_err(dev, "ERR: sliport error1 0x%x\n",
3201 sliport_err1);
3202 dev_err(dev, "ERR: sliport error2 0x%x\n",
3203 sliport_err2);
3204 }
e1cfb67a
PR
3205 }
3206 } else {
25848c90
SR
3207 ue_lo = ioread32(adapter->pcicfg + PCICFG_UE_STATUS_LOW);
3208 ue_hi = ioread32(adapter->pcicfg + PCICFG_UE_STATUS_HIGH);
3209 ue_lo_mask = ioread32(adapter->pcicfg +
3210 PCICFG_UE_STATUS_LOW_MASK);
3211 ue_hi_mask = ioread32(adapter->pcicfg +
3212 PCICFG_UE_STATUS_HI_MASK);
e1cfb67a 3213
f67ef7ba
PR
3214 ue_lo = (ue_lo & ~ue_lo_mask);
3215 ue_hi = (ue_hi & ~ue_hi_mask);
7c185276 3216
eb0eecc1
SK
3217 /* On certain platforms BE hardware can indicate spurious UEs.
3218 * Allow HW to stop working completely in case of a real UE.
3219 * Hence not setting the hw_error for UE detection.
3220 */
f67ef7ba 3221
eb0eecc1 3222 if (ue_lo || ue_hi) {
eb0eecc1
SK
3223 dev_err(dev,
3224 "Unrecoverable Error detected in the adapter");
3225 dev_err(dev, "Please reboot server to recover");
3226 if (skyhawk_chip(adapter))
954f6825
VD
3227 be_set_error(adapter, BE_ERROR_UE);
3228
eb0eecc1
SK
3229 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
3230 if (ue_lo & 1)
3231 dev_err(dev, "UE: %s bit set\n",
3232 ue_status_low_desc[i]);
3233 }
3234 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
3235 if (ue_hi & 1)
3236 dev_err(dev, "UE: %s bit set\n",
3237 ue_status_hi_desc[i]);
3238 }
7c185276
AK
3239 }
3240 }
7c185276
AK
3241}
3242
8d56ff11
SP
3243static void be_msix_disable(struct be_adapter *adapter)
3244{
ac6a0c4a 3245 if (msix_enabled(adapter)) {
8d56ff11 3246 pci_disable_msix(adapter->pdev);
ac6a0c4a 3247 adapter->num_msix_vec = 0;
68d7bdcb 3248 adapter->num_msix_roce_vec = 0;
3abcdeda
SP
3249 }
3250}
3251
c2bba3df 3252static int be_msix_enable(struct be_adapter *adapter)
6b7c5b94 3253{
e261768e 3254 unsigned int i, num_vec, max_roce_eqs;
d379142b 3255 struct device *dev = &adapter->pdev->dev;
6b7c5b94 3256
ce7faf0a
SP
3257 /* If RoCE is supported, program the max number of vectors that
3258 * could be used for NIC and RoCE, else, just program the number
3259 * we'll use initially.
92bf14ab 3260 */
e261768e
SP
3261 if (be_roce_supported(adapter)) {
3262 max_roce_eqs =
3263 be_max_func_eqs(adapter) - be_max_nic_eqs(adapter);
3264 max_roce_eqs = min(max_roce_eqs, num_online_cpus());
3265 num_vec = be_max_any_irqs(adapter) + max_roce_eqs;
3266 } else {
3267 num_vec = max(adapter->cfg_num_rx_irqs,
3268 adapter->cfg_num_tx_irqs);
3269 }
3abcdeda 3270
ac6a0c4a 3271 for (i = 0; i < num_vec; i++)
6b7c5b94
SP
3272 adapter->msix_entries[i].entry = i;
3273
7dc4c064
AG
3274 num_vec = pci_enable_msix_range(adapter->pdev, adapter->msix_entries,
3275 MIN_MSIX_VECTORS, num_vec);
3276 if (num_vec < 0)
3277 goto fail;
92bf14ab 3278
92bf14ab
SP
3279 if (be_roce_supported(adapter) && num_vec > MIN_MSIX_VECTORS) {
3280 adapter->num_msix_roce_vec = num_vec / 2;
3281 dev_info(dev, "enabled %d MSI-x vector(s) for RoCE\n",
3282 adapter->num_msix_roce_vec);
3283 }
3284
3285 adapter->num_msix_vec = num_vec - adapter->num_msix_roce_vec;
3286
3287 dev_info(dev, "enabled %d MSI-x vector(s) for NIC\n",
3288 adapter->num_msix_vec);
c2bba3df 3289 return 0;
7dc4c064
AG
3290
3291fail:
3292 dev_warn(dev, "MSIx enable failed\n");
3293
3294 /* INTx is not supported in VFs, so fail probe if enable_msix fails */
18c57c74 3295 if (be_virtfn(adapter))
7dc4c064
AG
3296 return num_vec;
3297 return 0;
6b7c5b94
SP
3298}
3299
fe6d2a38 3300static inline int be_msix_vec_get(struct be_adapter *adapter,
748b539a 3301 struct be_eq_obj *eqo)
b628bde2 3302{
f2f781a7 3303 return adapter->msix_entries[eqo->msix_idx].vector;
b628bde2 3304}
6b7c5b94 3305
b628bde2
SP
3306static int be_msix_register(struct be_adapter *adapter)
3307{
10ef9ab4
SP
3308 struct net_device *netdev = adapter->netdev;
3309 struct be_eq_obj *eqo;
3310 int status, i, vec;
6b7c5b94 3311
10ef9ab4
SP
3312 for_all_evt_queues(adapter, eqo, i) {
3313 sprintf(eqo->desc, "%s-q%d", netdev->name, i);
3314 vec = be_msix_vec_get(adapter, eqo);
3315 status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
3abcdeda
SP
3316 if (status)
3317 goto err_msix;
d658d98a
PR
3318
3319 irq_set_affinity_hint(vec, eqo->affinity_mask);
3abcdeda 3320 }
b628bde2 3321
6b7c5b94 3322 return 0;
3abcdeda 3323err_msix:
6e3cd5fa
VD
3324 for (i--; i >= 0; i--) {
3325 eqo = &adapter->eq_obj[i];
10ef9ab4 3326 free_irq(be_msix_vec_get(adapter, eqo), eqo);
6e3cd5fa 3327 }
10ef9ab4 3328 dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
748b539a 3329 status);
ac6a0c4a 3330 be_msix_disable(adapter);
6b7c5b94
SP
3331 return status;
3332}
3333
3334static int be_irq_register(struct be_adapter *adapter)
3335{
3336 struct net_device *netdev = adapter->netdev;
3337 int status;
3338
ac6a0c4a 3339 if (msix_enabled(adapter)) {
6b7c5b94
SP
3340 status = be_msix_register(adapter);
3341 if (status == 0)
3342 goto done;
ba343c77 3343 /* INTx is not supported for VF */
18c57c74 3344 if (be_virtfn(adapter))
ba343c77 3345 return status;
6b7c5b94
SP
3346 }
3347
e49cc34f 3348 /* INTx: only the first EQ is used */
6b7c5b94
SP
3349 netdev->irq = adapter->pdev->irq;
3350 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
e49cc34f 3351 &adapter->eq_obj[0]);
6b7c5b94
SP
3352 if (status) {
3353 dev_err(&adapter->pdev->dev,
3354 "INTx request IRQ failed - err %d\n", status);
3355 return status;
3356 }
3357done:
3358 adapter->isr_registered = true;
3359 return 0;
3360}
3361
3362static void be_irq_unregister(struct be_adapter *adapter)
3363{
3364 struct net_device *netdev = adapter->netdev;
10ef9ab4 3365 struct be_eq_obj *eqo;
d658d98a 3366 int i, vec;
6b7c5b94
SP
3367
3368 if (!adapter->isr_registered)
3369 return;
3370
3371 /* INTx */
ac6a0c4a 3372 if (!msix_enabled(adapter)) {
e49cc34f 3373 free_irq(netdev->irq, &adapter->eq_obj[0]);
6b7c5b94
SP
3374 goto done;
3375 }
3376
3377 /* MSIx */
d658d98a
PR
3378 for_all_evt_queues(adapter, eqo, i) {
3379 vec = be_msix_vec_get(adapter, eqo);
3380 irq_set_affinity_hint(vec, NULL);
3381 free_irq(vec, eqo);
3382 }
3abcdeda 3383
6b7c5b94
SP
3384done:
3385 adapter->isr_registered = false;
6b7c5b94
SP
3386}
3387
10ef9ab4 3388static void be_rx_qs_destroy(struct be_adapter *adapter)
482c9e79 3389{
62219066 3390 struct rss_info *rss = &adapter->rss_info;
482c9e79
SP
3391 struct be_queue_info *q;
3392 struct be_rx_obj *rxo;
3393 int i;
3394
3395 for_all_rx_queues(adapter, rxo, i) {
3396 q = &rxo->q;
3397 if (q->created) {
99b44304
KA
3398 /* If RXQs are destroyed while in an "out of buffer"
3399 * state, there is a possibility of an HW stall on
3400 * Lancer. So, post 64 buffers to each queue to relieve
3401 * the "out of buffer" condition.
3402 * Make sure there's space in the RXQ before posting.
3403 */
3404 if (lancer_chip(adapter)) {
3405 be_rx_cq_clean(rxo);
3406 if (atomic_read(&q->used) == 0)
3407 be_post_rx_frags(rxo, GFP_KERNEL,
3408 MAX_RX_POST);
3409 }
3410
482c9e79 3411 be_cmd_rxq_destroy(adapter, q);
10ef9ab4 3412 be_rx_cq_clean(rxo);
99b44304 3413 be_rxq_clean(rxo);
482c9e79 3414 }
10ef9ab4 3415 be_queue_free(adapter, q);
482c9e79 3416 }
62219066
AK
3417
3418 if (rss->rss_flags) {
3419 rss->rss_flags = RSS_ENABLE_NONE;
3420 be_cmd_rss_config(adapter, rss->rsstable, rss->rss_flags,
3421 128, rss->rss_hkey);
3422 }
482c9e79
SP
3423}
3424
bcc84140
KA
3425static void be_disable_if_filters(struct be_adapter *adapter)
3426{
3427 be_cmd_pmac_del(adapter, adapter->if_handle,
3428 adapter->pmac_id[0], 0);
3429
3430 be_clear_uc_list(adapter);
3431
3432 /* The IFACE flags are enabled in the open path and cleared
3433 * in the close path. When a VF gets detached from the host and
3434 * assigned to a VM the following happens:
3435 * - VF's IFACE flags get cleared in the detach path
3436 * - IFACE create is issued by the VF in the attach path
3437 * Due to a bug in the BE3/Skyhawk-R FW
3438 * (Lancer FW doesn't have the bug), the IFACE capability flags
3439 * specified along with the IFACE create cmd issued by a VF are not
3440 * honoured by FW. As a consequence, if a *new* driver
3441 * (that enables/disables IFACE flags in open/close)
3442 * is loaded in the host and an *old* driver is * used by a VM/VF,
3443 * the IFACE gets created *without* the needed flags.
3444 * To avoid this, disable RX-filter flags only for Lancer.
3445 */
3446 if (lancer_chip(adapter)) {
3447 be_cmd_rx_filter(adapter, BE_IF_ALL_FILT_FLAGS, OFF);
3448 adapter->if_flags &= ~BE_IF_ALL_FILT_FLAGS;
3449 }
3450}
3451
889cd4b2
SP
3452static int be_close(struct net_device *netdev)
3453{
3454 struct be_adapter *adapter = netdev_priv(netdev);
10ef9ab4
SP
3455 struct be_eq_obj *eqo;
3456 int i;
889cd4b2 3457
e1ad8e33
KA
3458 /* This protection is needed as be_close() may be called even when the
3459 * adapter is in cleared state (after eeh perm failure)
3460 */
3461 if (!(adapter->flags & BE_FLAGS_SETUP_DONE))
3462 return 0;
3463
bcc84140
KA
3464 be_disable_if_filters(adapter);
3465
dff345c5
IV
3466 if (adapter->flags & BE_FLAGS_NAPI_ENABLED) {
3467 for_all_evt_queues(adapter, eqo, i) {
04d3d624 3468 napi_disable(&eqo->napi);
6384a4d0
SP
3469 be_disable_busy_poll(eqo);
3470 }
71237b6f 3471 adapter->flags &= ~BE_FLAGS_NAPI_ENABLED;
04d3d624 3472 }
a323d9bf
SP
3473
3474 be_async_mcc_disable(adapter);
3475
3476 /* Wait for all pending tx completions to arrive so that
3477 * all tx skbs are freed.
3478 */
fba87559 3479 netif_tx_disable(netdev);
6e1f9975 3480 be_tx_compl_clean(adapter);
a323d9bf
SP
3481
3482 be_rx_qs_destroy(adapter);
d11a347d 3483
a323d9bf 3484 for_all_evt_queues(adapter, eqo, i) {
10ef9ab4
SP
3485 if (msix_enabled(adapter))
3486 synchronize_irq(be_msix_vec_get(adapter, eqo));
3487 else
3488 synchronize_irq(netdev->irq);
3489 be_eq_clean(eqo);
63fcb27f
PR
3490 }
3491
889cd4b2
SP
3492 be_irq_unregister(adapter);
3493
482c9e79
SP
3494 return 0;
3495}
3496
10ef9ab4 3497static int be_rx_qs_create(struct be_adapter *adapter)
482c9e79 3498{
1dcf7b1c
ED
3499 struct rss_info *rss = &adapter->rss_info;
3500 u8 rss_key[RSS_HASH_KEY_LEN];
482c9e79 3501 struct be_rx_obj *rxo;
e9008ee9 3502 int rc, i, j;
482c9e79
SP
3503
3504 for_all_rx_queues(adapter, rxo, i) {
10ef9ab4
SP
3505 rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
3506 sizeof(struct be_eth_rx_d));
3507 if (rc)
3508 return rc;
3509 }
3510
71bb8bd0
VV
3511 if (adapter->need_def_rxq || !adapter->num_rss_qs) {
3512 rxo = default_rxo(adapter);
3513 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
3514 rx_frag_size, adapter->if_handle,
3515 false, &rxo->rss_id);
3516 if (rc)
3517 return rc;
3518 }
10ef9ab4
SP
3519
3520 for_all_rss_queues(adapter, rxo, i) {
482c9e79 3521 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
10ef9ab4
SP
3522 rx_frag_size, adapter->if_handle,
3523 true, &rxo->rss_id);
482c9e79
SP
3524 if (rc)
3525 return rc;
3526 }
3527
3528 if (be_multi_rxq(adapter)) {
71bb8bd0 3529 for (j = 0; j < RSS_INDIR_TABLE_LEN; j += adapter->num_rss_qs) {
e9008ee9 3530 for_all_rss_queues(adapter, rxo, i) {
e2557877 3531 if ((j + i) >= RSS_INDIR_TABLE_LEN)
e9008ee9 3532 break;
e2557877
VD
3533 rss->rsstable[j + i] = rxo->rss_id;
3534 rss->rss_queue[j + i] = i;
e9008ee9
PR
3535 }
3536 }
e2557877
VD
3537 rss->rss_flags = RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_IPV4 |
3538 RSS_ENABLE_TCP_IPV6 | RSS_ENABLE_IPV6;
594ad54a
SR
3539
3540 if (!BEx_chip(adapter))
e2557877
VD
3541 rss->rss_flags |= RSS_ENABLE_UDP_IPV4 |
3542 RSS_ENABLE_UDP_IPV6;
62219066
AK
3543
3544 netdev_rss_key_fill(rss_key, RSS_HASH_KEY_LEN);
3545 rc = be_cmd_rss_config(adapter, rss->rsstable, rss->rss_flags,
3546 RSS_INDIR_TABLE_LEN, rss_key);
3547 if (rc) {
3548 rss->rss_flags = RSS_ENABLE_NONE;
3549 return rc;
3550 }
3551
3552 memcpy(rss->rss_hkey, rss_key, RSS_HASH_KEY_LEN);
da1388d6
VV
3553 } else {
3554 /* Disable RSS, if only default RX Q is created */
e2557877 3555 rss->rss_flags = RSS_ENABLE_NONE;
da1388d6 3556 }
594ad54a 3557
e2557877 3558
b02e60c8
SR
3559 /* Post 1 less than RXQ-len to avoid head being equal to tail,
3560 * which is a queue empty condition
3561 */
10ef9ab4 3562 for_all_rx_queues(adapter, rxo, i)
b02e60c8
SR
3563 be_post_rx_frags(rxo, GFP_KERNEL, RX_Q_LEN - 1);
3564
889cd4b2
SP
3565 return 0;
3566}
3567
bcc84140
KA
3568static int be_enable_if_filters(struct be_adapter *adapter)
3569{
3570 int status;
3571
c1bb0a55 3572 status = be_cmd_rx_filter(adapter, BE_IF_FILT_FLAGS_BASIC, ON);
bcc84140
KA
3573 if (status)
3574 return status;
3575
3576 /* For BE3 VFs, the PF programs the initial MAC address */
3577 if (!(BEx_chip(adapter) && be_virtfn(adapter))) {
3578 status = be_cmd_pmac_add(adapter, adapter->netdev->dev_addr,
3579 adapter->if_handle,
3580 &adapter->pmac_id[0], 0);
3581 if (status)
3582 return status;
3583 }
3584
3585 if (adapter->vlans_added)
3586 be_vid_config(adapter);
3587
3588 be_set_rx_mode(adapter->netdev);
3589
3590 return 0;
3591}
3592
6b7c5b94
SP
3593static int be_open(struct net_device *netdev)
3594{
3595 struct be_adapter *adapter = netdev_priv(netdev);
10ef9ab4 3596 struct be_eq_obj *eqo;
3abcdeda 3597 struct be_rx_obj *rxo;
10ef9ab4 3598 struct be_tx_obj *txo;
b236916a 3599 u8 link_status;
3abcdeda 3600 int status, i;
5fb379ee 3601
10ef9ab4 3602 status = be_rx_qs_create(adapter);
482c9e79
SP
3603 if (status)
3604 goto err;
3605
bcc84140
KA
3606 status = be_enable_if_filters(adapter);
3607 if (status)
3608 goto err;
3609
c2bba3df
SK
3610 status = be_irq_register(adapter);
3611 if (status)
3612 goto err;
5fb379ee 3613
10ef9ab4 3614 for_all_rx_queues(adapter, rxo, i)
3abcdeda 3615 be_cq_notify(adapter, rxo->cq.id, true, 0);
5fb379ee 3616
10ef9ab4
SP
3617 for_all_tx_queues(adapter, txo, i)
3618 be_cq_notify(adapter, txo->cq.id, true, 0);
3619
7a1e9b20
SP
3620 be_async_mcc_enable(adapter);
3621
10ef9ab4
SP
3622 for_all_evt_queues(adapter, eqo, i) {
3623 napi_enable(&eqo->napi);
6384a4d0 3624 be_enable_busy_poll(eqo);
20947770 3625 be_eq_notify(adapter, eqo->q.id, true, true, 0, 0);
10ef9ab4 3626 }
04d3d624 3627 adapter->flags |= BE_FLAGS_NAPI_ENABLED;
10ef9ab4 3628
323ff71e 3629 status = be_cmd_link_status_query(adapter, NULL, &link_status, 0);
b236916a
AK
3630 if (!status)
3631 be_link_status_update(adapter, link_status);
3632
fba87559 3633 netif_tx_start_all_queues(netdev);
c9c47142 3634 if (skyhawk_chip(adapter))
bde6b7cd 3635 udp_tunnel_get_rx_info(netdev);
c5abe7c0 3636
889cd4b2
SP
3637 return 0;
3638err:
3639 be_close(adapter->netdev);
3640 return -EIO;
5fb379ee
SP
3641}
3642
f7062ee5
SP
3643static void be_vf_eth_addr_generate(struct be_adapter *adapter, u8 *mac)
3644{
3645 u32 addr;
3646
3647 addr = jhash(adapter->netdev->dev_addr, ETH_ALEN, 0);
3648
3649 mac[5] = (u8)(addr & 0xFF);
3650 mac[4] = (u8)((addr >> 8) & 0xFF);
3651 mac[3] = (u8)((addr >> 16) & 0xFF);
3652 /* Use the OUI from the current MAC address */
3653 memcpy(mac, adapter->netdev->dev_addr, 3);
3654}
3655
6d87f5c3
AK
3656/*
3657 * Generate a seed MAC address from the PF MAC Address using jhash.
3658 * MAC Address for VFs are assigned incrementally starting from the seed.
3659 * These addresses are programmed in the ASIC by the PF and the VF driver
3660 * queries for the MAC address during its probe.
3661 */
4c876616 3662static int be_vf_eth_addr_config(struct be_adapter *adapter)
6d87f5c3 3663{
f9449ab7 3664 u32 vf;
3abcdeda 3665 int status = 0;
6d87f5c3 3666 u8 mac[ETH_ALEN];
11ac75ed 3667 struct be_vf_cfg *vf_cfg;
6d87f5c3
AK
3668
3669 be_vf_eth_addr_generate(adapter, mac);
3670
11ac75ed 3671 for_all_vfs(adapter, vf_cfg, vf) {
3175d8c2 3672 if (BEx_chip(adapter))
590c391d 3673 status = be_cmd_pmac_add(adapter, mac,
11ac75ed
SP
3674 vf_cfg->if_handle,
3675 &vf_cfg->pmac_id, vf + 1);
3175d8c2
SP
3676 else
3677 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
3678 vf + 1);
590c391d 3679
6d87f5c3
AK
3680 if (status)
3681 dev_err(&adapter->pdev->dev,
748b539a
SP
3682 "Mac address assignment failed for VF %d\n",
3683 vf);
6d87f5c3 3684 else
11ac75ed 3685 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
6d87f5c3
AK
3686
3687 mac[5] += 1;
3688 }
3689 return status;
3690}
3691
4c876616
SP
3692static int be_vfs_mac_query(struct be_adapter *adapter)
3693{
3694 int status, vf;
3695 u8 mac[ETH_ALEN];
3696 struct be_vf_cfg *vf_cfg;
4c876616
SP
3697
3698 for_all_vfs(adapter, vf_cfg, vf) {
b188f090
SR
3699 status = be_cmd_get_active_mac(adapter, vf_cfg->pmac_id,
3700 mac, vf_cfg->if_handle,
3701 false, vf+1);
4c876616
SP
3702 if (status)
3703 return status;
3704 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
3705 }
3706 return 0;
3707}
3708
f9449ab7 3709static void be_vf_clear(struct be_adapter *adapter)
6d87f5c3 3710{
11ac75ed 3711 struct be_vf_cfg *vf_cfg;
6d87f5c3
AK
3712 u32 vf;
3713
257a3feb 3714 if (pci_vfs_assigned(adapter->pdev)) {
4c876616
SP
3715 dev_warn(&adapter->pdev->dev,
3716 "VFs are assigned to VMs: not disabling VFs\n");
39f1d94d
SP
3717 goto done;
3718 }
3719
b4c1df93
SP
3720 pci_disable_sriov(adapter->pdev);
3721
11ac75ed 3722 for_all_vfs(adapter, vf_cfg, vf) {
3175d8c2 3723 if (BEx_chip(adapter))
11ac75ed
SP
3724 be_cmd_pmac_del(adapter, vf_cfg->if_handle,
3725 vf_cfg->pmac_id, vf + 1);
3175d8c2
SP
3726 else
3727 be_cmd_set_mac(adapter, NULL, vf_cfg->if_handle,
3728 vf + 1);
f9449ab7 3729
11ac75ed
SP
3730 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
3731 }
39f1d94d
SP
3732done:
3733 kfree(adapter->vf_cfg);
3734 adapter->num_vfs = 0;
f174c7ec 3735 adapter->flags &= ~BE_FLAGS_SRIOV_ENABLED;
6d87f5c3
AK
3736}
3737
7707133c
SP
3738static void be_clear_queues(struct be_adapter *adapter)
3739{
3740 be_mcc_queues_destroy(adapter);
3741 be_rx_cqs_destroy(adapter);
3742 be_tx_queues_destroy(adapter);
3743 be_evt_queues_destroy(adapter);
3744}
3745
68d7bdcb 3746static void be_cancel_worker(struct be_adapter *adapter)
a54769f5 3747{
191eb756
SP
3748 if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) {
3749 cancel_delayed_work_sync(&adapter->work);
3750 adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED;
3751 }
68d7bdcb
SP
3752}
3753
eb7dd46c
SP
3754static void be_cancel_err_detection(struct be_adapter *adapter)
3755{
3756 if (adapter->flags & BE_FLAGS_ERR_DETECTION_SCHEDULED) {
3757 cancel_delayed_work_sync(&adapter->be_err_detection_work);
3758 adapter->flags &= ~BE_FLAGS_ERR_DETECTION_SCHEDULED;
3759 }
3760}
3761
c9c47142
SP
3762static void be_disable_vxlan_offloads(struct be_adapter *adapter)
3763{
630f4b70
SB
3764 struct net_device *netdev = adapter->netdev;
3765
c9c47142
SP
3766 if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS)
3767 be_cmd_manage_iface(adapter, adapter->if_handle,
3768 OP_CONVERT_TUNNEL_TO_NORMAL);
3769
3770 if (adapter->vxlan_port)
3771 be_cmd_set_vxlan_port(adapter, 0);
3772
3773 adapter->flags &= ~BE_FLAGS_VXLAN_OFFLOADS;
3774 adapter->vxlan_port = 0;
630f4b70
SB
3775
3776 netdev->hw_enc_features = 0;
3777 netdev->hw_features &= ~(NETIF_F_GSO_UDP_TUNNEL);
ac9a3d84 3778 netdev->features &= ~(NETIF_F_GSO_UDP_TUNNEL);
c9c47142
SP
3779}
3780
b9263cbf
SR
3781static void be_calculate_vf_res(struct be_adapter *adapter, u16 num_vfs,
3782 struct be_resources *vft_res)
f2858738
VV
3783{
3784 struct be_resources res = adapter->pool_res;
b9263cbf
SR
3785 u32 vf_if_cap_flags = res.vf_if_cap_flags;
3786 struct be_resources res_mod = {0};
f2858738
VV
3787 u16 num_vf_qs = 1;
3788
de2b1e03
SK
3789 /* Distribute the queue resources among the PF and it's VFs */
3790 if (num_vfs) {
3791 /* Divide the rx queues evenly among the VFs and the PF, capped
3792 * at VF-EQ-count. Any remainder queues belong to the PF.
3793 */
ee9ad280
SB
3794 num_vf_qs = min(SH_VF_MAX_NIC_EQS,
3795 res.max_rss_qs / (num_vfs + 1));
f2858738 3796
de2b1e03
SK
3797 /* Skyhawk-R chip supports only MAX_PORT_RSS_TABLES
3798 * RSS Tables per port. Provide RSS on VFs, only if number of
3799 * VFs requested is less than it's PF Pool's RSS Tables limit.
f2858738 3800 */
de2b1e03 3801 if (num_vfs >= be_max_pf_pool_rss_tables(adapter))
f2858738
VV
3802 num_vf_qs = 1;
3803 }
b9263cbf
SR
3804
3805 /* Resource with fields set to all '1's by GET_PROFILE_CONFIG cmd,
3806 * which are modifiable using SET_PROFILE_CONFIG cmd.
3807 */
de2b1e03
SK
3808 be_cmd_get_profile_config(adapter, &res_mod, NULL, ACTIVE_PROFILE_TYPE,
3809 RESOURCE_MODIFIABLE, 0);
b9263cbf
SR
3810
3811 /* If RSS IFACE capability flags are modifiable for a VF, set the
3812 * capability flag as valid and set RSS and DEFQ_RSS IFACE flags if
3813 * more than 1 RSSQ is available for a VF.
3814 * Otherwise, provision only 1 queue pair for VF.
3815 */
3816 if (res_mod.vf_if_cap_flags & BE_IF_FLAGS_RSS) {
3817 vft_res->flags |= BIT(IF_CAPS_FLAGS_VALID_SHIFT);
3818 if (num_vf_qs > 1) {
3819 vf_if_cap_flags |= BE_IF_FLAGS_RSS;
3820 if (res.if_cap_flags & BE_IF_FLAGS_DEFQ_RSS)
3821 vf_if_cap_flags |= BE_IF_FLAGS_DEFQ_RSS;
3822 } else {
3823 vf_if_cap_flags &= ~(BE_IF_FLAGS_RSS |
3824 BE_IF_FLAGS_DEFQ_RSS);
3825 }
3826 } else {
3827 num_vf_qs = 1;
3828 }
3829
3830 if (res_mod.vf_if_cap_flags & BE_IF_FLAGS_VLAN_PROMISCUOUS) {
3831 vft_res->flags |= BIT(IF_CAPS_FLAGS_VALID_SHIFT);
3832 vf_if_cap_flags &= ~BE_IF_FLAGS_VLAN_PROMISCUOUS;
3833 }
3834
3835 vft_res->vf_if_cap_flags = vf_if_cap_flags;
3836 vft_res->max_rx_qs = num_vf_qs;
3837 vft_res->max_rss_qs = num_vf_qs;
3838 vft_res->max_tx_qs = res.max_tx_qs / (num_vfs + 1);
3839 vft_res->max_cq_count = res.max_cq_count / (num_vfs + 1);
3840
3841 /* Distribute unicast MACs, VLANs, IFACE count and MCCQ count equally
3842 * among the PF and it's VFs, if the fields are changeable
3843 */
3844 if (res_mod.max_uc_mac == FIELD_MODIFIABLE)
3845 vft_res->max_uc_mac = res.max_uc_mac / (num_vfs + 1);
3846
3847 if (res_mod.max_vlans == FIELD_MODIFIABLE)
3848 vft_res->max_vlans = res.max_vlans / (num_vfs + 1);
3849
3850 if (res_mod.max_iface_count == FIELD_MODIFIABLE)
3851 vft_res->max_iface_count = res.max_iface_count / (num_vfs + 1);
3852
3853 if (res_mod.max_mcc_count == FIELD_MODIFIABLE)
3854 vft_res->max_mcc_count = res.max_mcc_count / (num_vfs + 1);
f2858738
VV
3855}
3856
b05004ad
SK
3857static int be_clear(struct be_adapter *adapter)
3858{
f2858738 3859 struct pci_dev *pdev = adapter->pdev;
b9263cbf 3860 struct be_resources vft_res = {0};
f2858738 3861
68d7bdcb 3862 be_cancel_worker(adapter);
191eb756 3863
11ac75ed 3864 if (sriov_enabled(adapter))
f9449ab7
SP
3865 be_vf_clear(adapter);
3866
bec84e6b
VV
3867 /* Re-configure FW to distribute resources evenly across max-supported
3868 * number of VFs, only when VFs are not already enabled.
3869 */
ace40aff
VV
3870 if (skyhawk_chip(adapter) && be_physfn(adapter) &&
3871 !pci_vfs_assigned(pdev)) {
b9263cbf
SR
3872 be_calculate_vf_res(adapter,
3873 pci_sriov_get_totalvfs(pdev),
3874 &vft_res);
bec84e6b 3875 be_cmd_set_sriov_config(adapter, adapter->pool_res,
f2858738 3876 pci_sriov_get_totalvfs(pdev),
b9263cbf 3877 &vft_res);
f2858738 3878 }
bec84e6b 3879
c9c47142 3880 be_disable_vxlan_offloads(adapter);
bcc84140
KA
3881 kfree(adapter->pmac_id);
3882 adapter->pmac_id = NULL;
fbc13f01 3883
f9449ab7 3884 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
a54769f5 3885
7707133c 3886 be_clear_queues(adapter);
a54769f5 3887
10ef9ab4 3888 be_msix_disable(adapter);
e1ad8e33 3889 adapter->flags &= ~BE_FLAGS_SETUP_DONE;
a54769f5
SP
3890 return 0;
3891}
3892
4c876616 3893static int be_vfs_if_create(struct be_adapter *adapter)
abb93951 3894{
92bf14ab 3895 struct be_resources res = {0};
bcc84140 3896 u32 cap_flags, en_flags, vf;
4c876616 3897 struct be_vf_cfg *vf_cfg;
0700d816 3898 int status;
abb93951 3899
0700d816 3900 /* If a FW profile exists, then cap_flags are updated */
c1bb0a55 3901 cap_flags = BE_VF_IF_EN_FLAGS;
abb93951 3902
4c876616 3903 for_all_vfs(adapter, vf_cfg, vf) {
92bf14ab 3904 if (!BE3_chip(adapter)) {
de2b1e03
SK
3905 status = be_cmd_get_profile_config(adapter, &res, NULL,
3906 ACTIVE_PROFILE_TYPE,
f2858738 3907 RESOURCE_LIMITS,
92bf14ab 3908 vf + 1);
435452aa 3909 if (!status) {
92bf14ab 3910 cap_flags = res.if_cap_flags;
435452aa
VV
3911 /* Prevent VFs from enabling VLAN promiscuous
3912 * mode
3913 */
3914 cap_flags &= ~BE_IF_FLAGS_VLAN_PROMISCUOUS;
3915 }
92bf14ab 3916 }
4c876616 3917
c1bb0a55
VD
3918 /* PF should enable IF flags during proxy if_create call */
3919 en_flags = cap_flags & BE_VF_IF_EN_FLAGS;
bcc84140
KA
3920 status = be_cmd_if_create(adapter, cap_flags, en_flags,
3921 &vf_cfg->if_handle, vf + 1);
4c876616 3922 if (status)
0700d816 3923 return status;
4c876616 3924 }
0700d816
KA
3925
3926 return 0;
abb93951
PR
3927}
3928
39f1d94d 3929static int be_vf_setup_init(struct be_adapter *adapter)
30128031 3930{
11ac75ed 3931 struct be_vf_cfg *vf_cfg;
30128031
SP
3932 int vf;
3933
39f1d94d
SP
3934 adapter->vf_cfg = kcalloc(adapter->num_vfs, sizeof(*vf_cfg),
3935 GFP_KERNEL);
3936 if (!adapter->vf_cfg)
3937 return -ENOMEM;
3938
11ac75ed
SP
3939 for_all_vfs(adapter, vf_cfg, vf) {
3940 vf_cfg->if_handle = -1;
3941 vf_cfg->pmac_id = -1;
30128031 3942 }
39f1d94d 3943 return 0;
30128031
SP
3944}
3945
f9449ab7
SP
3946static int be_vf_setup(struct be_adapter *adapter)
3947{
c502224e 3948 struct device *dev = &adapter->pdev->dev;
11ac75ed 3949 struct be_vf_cfg *vf_cfg;
4c876616 3950 int status, old_vfs, vf;
e7bcbd7b 3951 bool spoofchk;
39f1d94d 3952
257a3feb 3953 old_vfs = pci_num_vf(adapter->pdev);
39f1d94d
SP
3954
3955 status = be_vf_setup_init(adapter);
3956 if (status)
3957 goto err;
30128031 3958
4c876616
SP
3959 if (old_vfs) {
3960 for_all_vfs(adapter, vf_cfg, vf) {
3961 status = be_cmd_get_if_id(adapter, vf_cfg, vf);
3962 if (status)
3963 goto err;
3964 }
f9449ab7 3965
4c876616
SP
3966 status = be_vfs_mac_query(adapter);
3967 if (status)
3968 goto err;
3969 } else {
bec84e6b
VV
3970 status = be_vfs_if_create(adapter);
3971 if (status)
3972 goto err;
3973
39f1d94d
SP
3974 status = be_vf_eth_addr_config(adapter);
3975 if (status)
3976 goto err;
3977 }
f9449ab7 3978
11ac75ed 3979 for_all_vfs(adapter, vf_cfg, vf) {
04a06028 3980 /* Allow VFs to programs MAC/VLAN filters */
435452aa
VV
3981 status = be_cmd_get_fn_privileges(adapter, &vf_cfg->privileges,
3982 vf + 1);
3983 if (!status && !(vf_cfg->privileges & BE_PRIV_FILTMGMT)) {
04a06028 3984 status = be_cmd_set_fn_privileges(adapter,
435452aa 3985 vf_cfg->privileges |
04a06028
SP
3986 BE_PRIV_FILTMGMT,
3987 vf + 1);
435452aa
VV
3988 if (!status) {
3989 vf_cfg->privileges |= BE_PRIV_FILTMGMT;
04a06028
SP
3990 dev_info(dev, "VF%d has FILTMGMT privilege\n",
3991 vf);
435452aa 3992 }
04a06028
SP
3993 }
3994
0f77ba73
RN
3995 /* Allow full available bandwidth */
3996 if (!old_vfs)
3997 be_cmd_config_qos(adapter, 0, 0, vf + 1);
f1f3ee1b 3998
e7bcbd7b
KA
3999 status = be_cmd_get_hsw_config(adapter, NULL, vf + 1,
4000 vf_cfg->if_handle, NULL,
4001 &spoofchk);
4002 if (!status)
4003 vf_cfg->spoofchk = spoofchk;
4004
bdce2ad7 4005 if (!old_vfs) {
0599863d 4006 be_cmd_enable_vf(adapter, vf + 1);
bdce2ad7
SR
4007 be_cmd_set_logical_link_config(adapter,
4008 IFLA_VF_LINK_STATE_AUTO,
4009 vf+1);
4010 }
f9449ab7 4011 }
b4c1df93
SP
4012
4013 if (!old_vfs) {
4014 status = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
4015 if (status) {
4016 dev_err(dev, "SRIOV enable failed\n");
4017 adapter->num_vfs = 0;
4018 goto err;
4019 }
4020 }
f174c7ec
VV
4021
4022 adapter->flags |= BE_FLAGS_SRIOV_ENABLED;
f9449ab7
SP
4023 return 0;
4024err:
4c876616
SP
4025 dev_err(dev, "VF setup failed\n");
4026 be_vf_clear(adapter);
f9449ab7
SP
4027 return status;
4028}
4029
f93f160b
VV
4030/* Converting function_mode bits on BE3 to SH mc_type enums */
4031
4032static u8 be_convert_mc_type(u32 function_mode)
4033{
66064dbc 4034 if (function_mode & VNIC_MODE && function_mode & QNQ_MODE)
f93f160b 4035 return vNIC1;
66064dbc 4036 else if (function_mode & QNQ_MODE)
f93f160b
VV
4037 return FLEX10;
4038 else if (function_mode & VNIC_MODE)
4039 return vNIC2;
4040 else if (function_mode & UMC_ENABLED)
4041 return UMC;
4042 else
4043 return MC_NONE;
4044}
4045
92bf14ab
SP
4046/* On BE2/BE3 FW does not suggest the supported limits */
4047static void BEx_get_resources(struct be_adapter *adapter,
4048 struct be_resources *res)
4049{
bec84e6b 4050 bool use_sriov = adapter->num_vfs ? 1 : 0;
92bf14ab
SP
4051
4052 if (be_physfn(adapter))
4053 res->max_uc_mac = BE_UC_PMAC_COUNT;
4054 else
4055 res->max_uc_mac = BE_VF_UC_PMAC_COUNT;
4056
f93f160b
VV
4057 adapter->mc_type = be_convert_mc_type(adapter->function_mode);
4058
4059 if (be_is_mc(adapter)) {
4060 /* Assuming that there are 4 channels per port,
4061 * when multi-channel is enabled
4062 */
4063 if (be_is_qnq_mode(adapter))
4064 res->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
4065 else
4066 /* In a non-qnq multichannel mode, the pvid
4067 * takes up one vlan entry
4068 */
4069 res->max_vlans = (BE_NUM_VLANS_SUPPORTED / 4) - 1;
4070 } else {
92bf14ab 4071 res->max_vlans = BE_NUM_VLANS_SUPPORTED;
f93f160b
VV
4072 }
4073
92bf14ab
SP
4074 res->max_mcast_mac = BE_MAX_MC;
4075
a5243dab
VV
4076 /* 1) For BE3 1Gb ports, FW does not support multiple TXQs
4077 * 2) Create multiple TX rings on a BE3-R multi-channel interface
4078 * *only* if it is RSS-capable.
4079 */
4080 if (BE2_chip(adapter) || use_sriov || (adapter->port_num > 1) ||
18c57c74
KA
4081 be_virtfn(adapter) ||
4082 (be_is_mc(adapter) &&
4083 !(adapter->function_caps & BE_FUNCTION_CAPS_RSS))) {
92bf14ab 4084 res->max_tx_qs = 1;
a28277dc
SR
4085 } else if (adapter->function_caps & BE_FUNCTION_CAPS_SUPER_NIC) {
4086 struct be_resources super_nic_res = {0};
4087
4088 /* On a SuperNIC profile, the driver needs to use the
4089 * GET_PROFILE_CONFIG cmd to query the per-function TXQ limits
4090 */
de2b1e03
SK
4091 be_cmd_get_profile_config(adapter, &super_nic_res, NULL,
4092 ACTIVE_PROFILE_TYPE, RESOURCE_LIMITS,
4093 0);
a28277dc
SR
4094 /* Some old versions of BE3 FW don't report max_tx_qs value */
4095 res->max_tx_qs = super_nic_res.max_tx_qs ? : BE3_MAX_TX_QS;
4096 } else {
92bf14ab 4097 res->max_tx_qs = BE3_MAX_TX_QS;
a28277dc 4098 }
92bf14ab
SP
4099
4100 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
4101 !use_sriov && be_physfn(adapter))
4102 res->max_rss_qs = (adapter->be3_native) ?
4103 BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
4104 res->max_rx_qs = res->max_rss_qs + 1;
4105
e3dc867c 4106 if (be_physfn(adapter))
d3518e21 4107 res->max_evt_qs = (be_max_vfs(adapter) > 0) ?
e3dc867c
SR
4108 BE3_SRIOV_MAX_EVT_QS : BE3_MAX_EVT_QS;
4109 else
4110 res->max_evt_qs = 1;
92bf14ab
SP
4111
4112 res->if_cap_flags = BE_IF_CAP_FLAGS_WANT;
71bb8bd0 4113 res->if_cap_flags &= ~BE_IF_FLAGS_DEFQ_RSS;
92bf14ab
SP
4114 if (!(adapter->function_caps & BE_FUNCTION_CAPS_RSS))
4115 res->if_cap_flags &= ~BE_IF_FLAGS_RSS;
4116}
4117
30128031
SP
4118static void be_setup_init(struct be_adapter *adapter)
4119{
4120 adapter->vlan_prio_bmap = 0xff;
42f11cf2 4121 adapter->phy.link_speed = -1;
30128031
SP
4122 adapter->if_handle = -1;
4123 adapter->be3_native = false;
f66b7cfd 4124 adapter->if_flags = 0;
51d1f98a 4125 adapter->phy_state = BE_UNKNOWN_PHY_STATE;
f25b119c
PR
4126 if (be_physfn(adapter))
4127 adapter->cmd_privileges = MAX_PRIVILEGES;
4128 else
4129 adapter->cmd_privileges = MIN_PRIVILEGES;
30128031
SP
4130}
4131
de2b1e03
SK
4132/* HW supports only MAX_PORT_RSS_TABLES RSS Policy Tables per port.
4133 * However, this HW limitation is not exposed to the host via any SLI cmd.
4134 * As a result, in the case of SRIOV and in particular multi-partition configs
4135 * the driver needs to calcuate a proportional share of RSS Tables per PF-pool
4136 * for distribution between the VFs. This self-imposed limit will determine the
4137 * no: of VFs for which RSS can be enabled.
4138 */
4139void be_calculate_pf_pool_rss_tables(struct be_adapter *adapter)
4140{
4141 struct be_port_resources port_res = {0};
4142 u8 rss_tables_on_port;
4143 u16 max_vfs = be_max_vfs(adapter);
4144
4145 be_cmd_get_profile_config(adapter, NULL, &port_res, SAVED_PROFILE_TYPE,
4146 RESOURCE_LIMITS, 0);
4147
4148 rss_tables_on_port = MAX_PORT_RSS_TABLES - port_res.nic_pfs;
4149
4150 /* Each PF Pool's RSS Tables limit =
4151 * PF's Max VFs / Total_Max_VFs on Port * RSS Tables on Port
4152 */
4153 adapter->pool_res.max_rss_tables =
4154 max_vfs * rss_tables_on_port / port_res.max_vfs;
4155}
4156
bec84e6b
VV
4157static int be_get_sriov_config(struct be_adapter *adapter)
4158{
bec84e6b 4159 struct be_resources res = {0};
d3d18312 4160 int max_vfs, old_vfs;
bec84e6b 4161
de2b1e03
SK
4162 be_cmd_get_profile_config(adapter, &res, NULL, ACTIVE_PROFILE_TYPE,
4163 RESOURCE_LIMITS, 0);
d3d18312 4164
ace40aff 4165 /* Some old versions of BE3 FW don't report max_vfs value */
bec84e6b
VV
4166 if (BE3_chip(adapter) && !res.max_vfs) {
4167 max_vfs = pci_sriov_get_totalvfs(adapter->pdev);
4168 res.max_vfs = max_vfs > 0 ? min(MAX_VFS, max_vfs) : 0;
4169 }
4170
d3d18312 4171 adapter->pool_res = res;
bec84e6b 4172
ace40aff
VV
4173 /* If during previous unload of the driver, the VFs were not disabled,
4174 * then we cannot rely on the PF POOL limits for the TotalVFs value.
4175 * Instead use the TotalVFs value stored in the pci-dev struct.
4176 */
bec84e6b
VV
4177 old_vfs = pci_num_vf(adapter->pdev);
4178 if (old_vfs) {
ace40aff
VV
4179 dev_info(&adapter->pdev->dev, "%d VFs are already enabled\n",
4180 old_vfs);
4181
4182 adapter->pool_res.max_vfs =
4183 pci_sriov_get_totalvfs(adapter->pdev);
bec84e6b 4184 adapter->num_vfs = old_vfs;
bec84e6b
VV
4185 }
4186
de2b1e03
SK
4187 if (skyhawk_chip(adapter) && be_max_vfs(adapter) && !old_vfs) {
4188 be_calculate_pf_pool_rss_tables(adapter);
4189 dev_info(&adapter->pdev->dev,
4190 "RSS can be enabled for all VFs if num_vfs <= %d\n",
4191 be_max_pf_pool_rss_tables(adapter));
4192 }
bec84e6b
VV
4193 return 0;
4194}
4195
ace40aff
VV
4196static void be_alloc_sriov_res(struct be_adapter *adapter)
4197{
4198 int old_vfs = pci_num_vf(adapter->pdev);
b9263cbf 4199 struct be_resources vft_res = {0};
ace40aff
VV
4200 int status;
4201
4202 be_get_sriov_config(adapter);
4203
4204 if (!old_vfs)
4205 pci_sriov_set_totalvfs(adapter->pdev, be_max_vfs(adapter));
4206
4207 /* When the HW is in SRIOV capable configuration, the PF-pool
4208 * resources are given to PF during driver load, if there are no
4209 * old VFs. This facility is not available in BE3 FW.
4210 * Also, this is done by FW in Lancer chip.
4211 */
4212 if (skyhawk_chip(adapter) && be_max_vfs(adapter) && !old_vfs) {
b9263cbf 4213 be_calculate_vf_res(adapter, 0, &vft_res);
ace40aff 4214 status = be_cmd_set_sriov_config(adapter, adapter->pool_res, 0,
b9263cbf 4215 &vft_res);
ace40aff
VV
4216 if (status)
4217 dev_err(&adapter->pdev->dev,
4218 "Failed to optimize SRIOV resources\n");
4219 }
4220}
4221
92bf14ab 4222static int be_get_resources(struct be_adapter *adapter)
abb93951 4223{
92bf14ab
SP
4224 struct device *dev = &adapter->pdev->dev;
4225 struct be_resources res = {0};
4226 int status;
abb93951 4227
92bf14ab
SP
4228 /* For Lancer, SH etc read per-function resource limits from FW.
4229 * GET_FUNC_CONFIG returns per function guaranteed limits.
4230 * GET_PROFILE_CONFIG returns PCI-E related limits PF-pool limits
4231 */
ce7faf0a
SP
4232 if (BEx_chip(adapter)) {
4233 BEx_get_resources(adapter, &res);
4234 } else {
92bf14ab
SP
4235 status = be_cmd_get_func_config(adapter, &res);
4236 if (status)
4237 return status;
abb93951 4238
71bb8bd0
VV
4239 /* If a deafault RXQ must be created, we'll use up one RSSQ*/
4240 if (res.max_rss_qs && res.max_rss_qs == res.max_rx_qs &&
4241 !(res.if_cap_flags & BE_IF_FLAGS_DEFQ_RSS))
4242 res.max_rss_qs -= 1;
abb93951 4243 }
4c876616 4244
ce7faf0a
SP
4245 /* If RoCE is supported stash away half the EQs for RoCE */
4246 res.max_nic_evt_qs = be_roce_supported(adapter) ?
4247 res.max_evt_qs / 2 : res.max_evt_qs;
4248 adapter->res = res;
4249
71bb8bd0
VV
4250 /* If FW supports RSS default queue, then skip creating non-RSS
4251 * queue for non-IP traffic.
4252 */
4253 adapter->need_def_rxq = (be_if_cap_flags(adapter) &
4254 BE_IF_FLAGS_DEFQ_RSS) ? 0 : 1;
4255
acbafeb1
SP
4256 dev_info(dev, "Max: txqs %d, rxqs %d, rss %d, eqs %d, vfs %d\n",
4257 be_max_txqs(adapter), be_max_rxqs(adapter),
ce7faf0a 4258 be_max_rss(adapter), be_max_nic_eqs(adapter),
acbafeb1
SP
4259 be_max_vfs(adapter));
4260 dev_info(dev, "Max: uc-macs %d, mc-macs %d, vlans %d\n",
4261 be_max_uc(adapter), be_max_mc(adapter),
4262 be_max_vlans(adapter));
4263
e261768e
SP
4264 /* Ensure RX and TX queues are created in pairs at init time */
4265 adapter->cfg_num_rx_irqs =
4266 min_t(u16, netif_get_num_default_rss_queues(),
4267 be_max_qp_irqs(adapter));
4268 adapter->cfg_num_tx_irqs = adapter->cfg_num_rx_irqs;
92bf14ab 4269 return 0;
abb93951
PR
4270}
4271
39f1d94d
SP
4272static int be_get_config(struct be_adapter *adapter)
4273{
6b085ba9 4274 int status, level;
542963b7 4275 u16 profile_id;
6b085ba9 4276
980df249
SR
4277 status = be_cmd_get_cntl_attributes(adapter);
4278 if (status)
4279 return status;
4280
e97e3cda 4281 status = be_cmd_query_fw_cfg(adapter);
abb93951 4282 if (status)
92bf14ab 4283 return status;
abb93951 4284
fd7ff6f0
VD
4285 if (!lancer_chip(adapter) && be_physfn(adapter))
4286 be_cmd_get_fat_dump_len(adapter, &adapter->fat_dump_len);
4287
6b085ba9
SP
4288 if (BEx_chip(adapter)) {
4289 level = be_cmd_get_fw_log_level(adapter);
4290 adapter->msg_enable =
4291 level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0;
4292 }
4293
4294 be_cmd_get_acpi_wol_cap(adapter);
45f13df7
SB
4295 pci_enable_wake(adapter->pdev, PCI_D3hot, adapter->wol_en);
4296 pci_enable_wake(adapter->pdev, PCI_D3cold, adapter->wol_en);
6b085ba9 4297
21252377
VV
4298 be_cmd_query_port_name(adapter);
4299
4300 if (be_physfn(adapter)) {
542963b7
VV
4301 status = be_cmd_get_active_profile(adapter, &profile_id);
4302 if (!status)
4303 dev_info(&adapter->pdev->dev,
4304 "Using profile 0x%x\n", profile_id);
962bcb75 4305 }
bec84e6b 4306
92bf14ab 4307 return 0;
39f1d94d
SP
4308}
4309
95046b92
SP
4310static int be_mac_setup(struct be_adapter *adapter)
4311{
4312 u8 mac[ETH_ALEN];
4313 int status;
4314
4315 if (is_zero_ether_addr(adapter->netdev->dev_addr)) {
4316 status = be_cmd_get_perm_mac(adapter, mac);
4317 if (status)
4318 return status;
4319
4320 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
4321 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
95046b92
SP
4322 }
4323
95046b92
SP
4324 return 0;
4325}
4326
68d7bdcb
SP
4327static void be_schedule_worker(struct be_adapter *adapter)
4328{
4329 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
4330 adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
4331}
4332
972f37b4 4333static void be_schedule_err_detection(struct be_adapter *adapter, u32 delay)
eb7dd46c
SP
4334{
4335 schedule_delayed_work(&adapter->be_err_detection_work,
972f37b4 4336 msecs_to_jiffies(delay));
eb7dd46c
SP
4337 adapter->flags |= BE_FLAGS_ERR_DETECTION_SCHEDULED;
4338}
4339
7707133c 4340static int be_setup_queues(struct be_adapter *adapter)
5fb379ee 4341{
68d7bdcb 4342 struct net_device *netdev = adapter->netdev;
10ef9ab4 4343 int status;
ba343c77 4344
7707133c 4345 status = be_evt_queues_create(adapter);
abb93951
PR
4346 if (status)
4347 goto err;
73d540f2 4348
7707133c 4349 status = be_tx_qs_create(adapter);
c2bba3df
SK
4350 if (status)
4351 goto err;
10ef9ab4 4352
7707133c 4353 status = be_rx_cqs_create(adapter);
10ef9ab4 4354 if (status)
a54769f5 4355 goto err;
6b7c5b94 4356
7707133c 4357 status = be_mcc_queues_create(adapter);
10ef9ab4
SP
4358 if (status)
4359 goto err;
4360
68d7bdcb
SP
4361 status = netif_set_real_num_rx_queues(netdev, adapter->num_rx_qs);
4362 if (status)
4363 goto err;
4364
4365 status = netif_set_real_num_tx_queues(netdev, adapter->num_tx_qs);
4366 if (status)
4367 goto err;
4368
7707133c
SP
4369 return 0;
4370err:
4371 dev_err(&adapter->pdev->dev, "queue_setup failed\n");
4372 return status;
4373}
4374
62219066
AK
4375static int be_if_create(struct be_adapter *adapter)
4376{
4377 u32 en_flags = BE_IF_FLAGS_RSS | BE_IF_FLAGS_DEFQ_RSS;
4378 u32 cap_flags = be_if_cap_flags(adapter);
4379 int status;
4380
e261768e 4381 if (adapter->cfg_num_rx_irqs == 1)
62219066
AK
4382 cap_flags &= ~(BE_IF_FLAGS_DEFQ_RSS | BE_IF_FLAGS_RSS);
4383
4384 en_flags &= cap_flags;
4385 /* will enable all the needed filter flags in be_open() */
4386 status = be_cmd_if_create(adapter, be_if_cap_flags(adapter), en_flags,
4387 &adapter->if_handle, 0);
4388
4389 return status;
4390}
4391
68d7bdcb
SP
4392int be_update_queues(struct be_adapter *adapter)
4393{
4394 struct net_device *netdev = adapter->netdev;
4395 int status;
4396
4397 if (netif_running(netdev))
4398 be_close(netdev);
4399
4400 be_cancel_worker(adapter);
4401
4402 /* If any vectors have been shared with RoCE we cannot re-program
4403 * the MSIx table.
4404 */
4405 if (!adapter->num_msix_roce_vec)
4406 be_msix_disable(adapter);
4407
4408 be_clear_queues(adapter);
62219066
AK
4409 status = be_cmd_if_destroy(adapter, adapter->if_handle, 0);
4410 if (status)
4411 return status;
68d7bdcb
SP
4412
4413 if (!msix_enabled(adapter)) {
4414 status = be_msix_enable(adapter);
4415 if (status)
4416 return status;
4417 }
4418
62219066
AK
4419 status = be_if_create(adapter);
4420 if (status)
4421 return status;
4422
68d7bdcb
SP
4423 status = be_setup_queues(adapter);
4424 if (status)
4425 return status;
4426
4427 be_schedule_worker(adapter);
4428
4429 if (netif_running(netdev))
4430 status = be_open(netdev);
4431
4432 return status;
4433}
4434
f7062ee5
SP
4435static inline int fw_major_num(const char *fw_ver)
4436{
4437 int fw_major = 0, i;
4438
4439 i = sscanf(fw_ver, "%d.", &fw_major);
4440 if (i != 1)
4441 return 0;
4442
4443 return fw_major;
4444}
4445
f962f840
SP
4446/* If any VFs are already enabled don't FLR the PF */
4447static bool be_reset_required(struct be_adapter *adapter)
4448{
4449 return pci_num_vf(adapter->pdev) ? false : true;
4450}
4451
4452/* Wait for the FW to be ready and perform the required initialization */
4453static int be_func_init(struct be_adapter *adapter)
4454{
4455 int status;
4456
4457 status = be_fw_wait_ready(adapter);
4458 if (status)
4459 return status;
4460
4461 if (be_reset_required(adapter)) {
4462 status = be_cmd_reset_function(adapter);
4463 if (status)
4464 return status;
4465
4466 /* Wait for interrupts to quiesce after an FLR */
4467 msleep(100);
4468
4469 /* We can clear all errors when function reset succeeds */
954f6825 4470 be_clear_error(adapter, BE_CLEAR_ALL);
f962f840
SP
4471 }
4472
4473 /* Tell FW we're ready to fire cmds */
4474 status = be_cmd_fw_init(adapter);
4475 if (status)
4476 return status;
4477
4478 /* Allow interrupts for other ULPs running on NIC function */
4479 be_intr_set(adapter, true);
4480
4481 return 0;
4482}
4483
7707133c
SP
4484static int be_setup(struct be_adapter *adapter)
4485{
4486 struct device *dev = &adapter->pdev->dev;
7707133c
SP
4487 int status;
4488
f962f840
SP
4489 status = be_func_init(adapter);
4490 if (status)
4491 return status;
4492
7707133c
SP
4493 be_setup_init(adapter);
4494
4495 if (!lancer_chip(adapter))
4496 be_cmd_req_native_mode(adapter);
4497
980df249
SR
4498 /* invoke this cmd first to get pf_num and vf_num which are needed
4499 * for issuing profile related cmds
4500 */
4501 if (!BEx_chip(adapter)) {
4502 status = be_cmd_get_func_config(adapter, NULL);
4503 if (status)
4504 return status;
4505 }
72ef3a88 4506
de2b1e03
SK
4507 status = be_get_config(adapter);
4508 if (status)
4509 goto err;
4510
ace40aff
VV
4511 if (!BE2_chip(adapter) && be_physfn(adapter))
4512 be_alloc_sriov_res(adapter);
4513
de2b1e03 4514 status = be_get_resources(adapter);
10ef9ab4 4515 if (status)
a54769f5 4516 goto err;
6b7c5b94 4517
de2b1e03
SK
4518 adapter->pmac_id = kcalloc(be_max_uc(adapter),
4519 sizeof(*adapter->pmac_id), GFP_KERNEL);
4520 if (!adapter->pmac_id)
4521 return -ENOMEM;
4522
7707133c 4523 status = be_msix_enable(adapter);
10ef9ab4 4524 if (status)
a54769f5 4525 goto err;
6b7c5b94 4526
bcc84140 4527 /* will enable all the needed filter flags in be_open() */
62219066 4528 status = be_if_create(adapter);
7707133c 4529 if (status)
a54769f5 4530 goto err;
6b7c5b94 4531
68d7bdcb
SP
4532 /* Updating real_num_tx/rx_queues() requires rtnl_lock() */
4533 rtnl_lock();
7707133c 4534 status = be_setup_queues(adapter);
68d7bdcb 4535 rtnl_unlock();
95046b92 4536 if (status)
1578e777
PR
4537 goto err;
4538
7707133c 4539 be_cmd_get_fn_privileges(adapter, &adapter->cmd_privileges, 0);
7707133c
SP
4540
4541 status = be_mac_setup(adapter);
10ef9ab4
SP
4542 if (status)
4543 goto err;
4544
e97e3cda 4545 be_cmd_get_fw_ver(adapter);
acbafeb1 4546 dev_info(dev, "FW version is %s\n", adapter->fw_ver);
5a56eb10 4547
e9e2a904 4548 if (BE2_chip(adapter) && fw_major_num(adapter->fw_ver) < 4) {
50762667 4549 dev_err(dev, "Firmware on card is old(%s), IRQs may not work",
e9e2a904
SK
4550 adapter->fw_ver);
4551 dev_err(dev, "Please upgrade firmware to version >= 4.0\n");
4552 }
4553
00d594c3
KA
4554 status = be_cmd_set_flow_control(adapter, adapter->tx_fc,
4555 adapter->rx_fc);
4556 if (status)
4557 be_cmd_get_flow_control(adapter, &adapter->tx_fc,
4558 &adapter->rx_fc);
590c391d 4559
00d594c3
KA
4560 dev_info(&adapter->pdev->dev, "HW Flow control - TX:%d RX:%d\n",
4561 adapter->tx_fc, adapter->rx_fc);
2dc1deb6 4562
bdce2ad7
SR
4563 if (be_physfn(adapter))
4564 be_cmd_set_logical_link_config(adapter,
4565 IFLA_VF_LINK_STATE_AUTO, 0);
4566
bec84e6b
VV
4567 if (adapter->num_vfs)
4568 be_vf_setup(adapter);
f9449ab7 4569
f25b119c
PR
4570 status = be_cmd_get_phy_info(adapter);
4571 if (!status && be_pause_supported(adapter))
42f11cf2
AK
4572 adapter->phy.fc_autoneg = 1;
4573
68d7bdcb 4574 be_schedule_worker(adapter);
e1ad8e33 4575 adapter->flags |= BE_FLAGS_SETUP_DONE;
f9449ab7 4576 return 0;
a54769f5
SP
4577err:
4578 be_clear(adapter);
4579 return status;
4580}
6b7c5b94 4581
66268739
IV
4582#ifdef CONFIG_NET_POLL_CONTROLLER
4583static void be_netpoll(struct net_device *netdev)
4584{
4585 struct be_adapter *adapter = netdev_priv(netdev);
10ef9ab4 4586 struct be_eq_obj *eqo;
66268739
IV
4587 int i;
4588
e49cc34f 4589 for_all_evt_queues(adapter, eqo, i) {
20947770 4590 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0, 0);
e49cc34f
SP
4591 napi_schedule(&eqo->napi);
4592 }
66268739
IV
4593}
4594#endif
4595
485bf569
SN
4596int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
4597{
4598 const struct firmware *fw;
4599 int status;
4600
4601 if (!netif_running(adapter->netdev)) {
4602 dev_err(&adapter->pdev->dev,
4603 "Firmware load not allowed (interface is down)\n");
940a3fcd 4604 return -ENETDOWN;
485bf569
SN
4605 }
4606
4607 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
4608 if (status)
4609 goto fw_exit;
4610
4611 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
4612
4613 if (lancer_chip(adapter))
4614 status = lancer_fw_download(adapter, fw);
4615 else
4616 status = be_fw_download(adapter, fw);
4617
eeb65ced 4618 if (!status)
e97e3cda 4619 be_cmd_get_fw_ver(adapter);
eeb65ced 4620
84517482
AK
4621fw_exit:
4622 release_firmware(fw);
4623 return status;
4624}
4625
add511b3
RP
4626static int be_ndo_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
4627 u16 flags)
a77dcb8c
AK
4628{
4629 struct be_adapter *adapter = netdev_priv(dev);
4630 struct nlattr *attr, *br_spec;
4631 int rem;
4632 int status = 0;
4633 u16 mode = 0;
4634
4635 if (!sriov_enabled(adapter))
4636 return -EOPNOTSUPP;
4637
4638 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
4ea85e83
TG
4639 if (!br_spec)
4640 return -EINVAL;
a77dcb8c
AK
4641
4642 nla_for_each_nested(attr, br_spec, rem) {
4643 if (nla_type(attr) != IFLA_BRIDGE_MODE)
4644 continue;
4645
b7c1a314
TG
4646 if (nla_len(attr) < sizeof(mode))
4647 return -EINVAL;
4648
a77dcb8c 4649 mode = nla_get_u16(attr);
ac0f5fba
SR
4650 if (BE3_chip(adapter) && mode == BRIDGE_MODE_VEPA)
4651 return -EOPNOTSUPP;
4652
a77dcb8c
AK
4653 if (mode != BRIDGE_MODE_VEPA && mode != BRIDGE_MODE_VEB)
4654 return -EINVAL;
4655
4656 status = be_cmd_set_hsw_config(adapter, 0, 0,
4657 adapter->if_handle,
4658 mode == BRIDGE_MODE_VEPA ?
4659 PORT_FWD_TYPE_VEPA :
e7bcbd7b 4660 PORT_FWD_TYPE_VEB, 0);
a77dcb8c
AK
4661 if (status)
4662 goto err;
4663
4664 dev_info(&adapter->pdev->dev, "enabled switch mode: %s\n",
4665 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
4666
4667 return status;
4668 }
4669err:
4670 dev_err(&adapter->pdev->dev, "Failed to set switch mode %s\n",
4671 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
4672
4673 return status;
4674}
4675
4676static int be_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
46c264da
ND
4677 struct net_device *dev, u32 filter_mask,
4678 int nlflags)
a77dcb8c
AK
4679{
4680 struct be_adapter *adapter = netdev_priv(dev);
4681 int status = 0;
4682 u8 hsw_mode;
4683
a77dcb8c
AK
4684 /* BE and Lancer chips support VEB mode only */
4685 if (BEx_chip(adapter) || lancer_chip(adapter)) {
8431706b
IV
4686 /* VEB is disabled in non-SR-IOV profiles on BE3/Lancer */
4687 if (!pci_sriov_get_totalvfs(adapter->pdev))
4688 return 0;
a77dcb8c
AK
4689 hsw_mode = PORT_FWD_TYPE_VEB;
4690 } else {
4691 status = be_cmd_get_hsw_config(adapter, NULL, 0,
e7bcbd7b
KA
4692 adapter->if_handle, &hsw_mode,
4693 NULL);
a77dcb8c
AK
4694 if (status)
4695 return 0;
ff9ed19d
KP
4696
4697 if (hsw_mode == PORT_FWD_TYPE_PASSTHRU)
4698 return 0;
a77dcb8c
AK
4699 }
4700
4701 return ndo_dflt_bridge_getlink(skb, pid, seq, dev,
4702 hsw_mode == PORT_FWD_TYPE_VEPA ?
2c3c031c 4703 BRIDGE_MODE_VEPA : BRIDGE_MODE_VEB,
7d4f8d87 4704 0, 0, nlflags, filter_mask, NULL);
a77dcb8c
AK
4705}
4706
630f4b70
SB
4707/* VxLAN offload Notes:
4708 *
4709 * The stack defines tunnel offload flags (hw_enc_features) for IP and doesn't
4710 * distinguish various types of transports (VxLAN, GRE, NVGRE ..). So, offload
4711 * is expected to work across all types of IP tunnels once exported. Skyhawk
4712 * supports offloads for either VxLAN or NVGRE, exclusively. So we export VxLAN
16dde0d6
SB
4713 * offloads in hw_enc_features only when a VxLAN port is added. If other (non
4714 * VxLAN) tunnels are configured while VxLAN offloads are enabled, offloads for
4715 * those other tunnels are unexported on the fly through ndo_features_check().
630f4b70
SB
4716 *
4717 * Skyhawk supports VxLAN offloads only for one UDP dport. So, if the stack
4718 * adds more than one port, disable offloads and don't re-enable them again
4719 * until after all the tunnels are removed.
4720 */
bde6b7cd
AD
4721static void be_add_vxlan_port(struct net_device *netdev,
4722 struct udp_tunnel_info *ti)
c9c47142
SP
4723{
4724 struct be_adapter *adapter = netdev_priv(netdev);
4725 struct device *dev = &adapter->pdev->dev;
bde6b7cd 4726 __be16 port = ti->port;
c9c47142
SP
4727 int status;
4728
bde6b7cd
AD
4729 if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
4730 return;
4731
af19e686 4732 if (lancer_chip(adapter) || BEx_chip(adapter) || be_is_mc(adapter))
c9c47142
SP
4733 return;
4734
1e5b311a
JB
4735 if (adapter->vxlan_port == port && adapter->vxlan_port_count) {
4736 adapter->vxlan_port_aliases++;
4737 return;
4738 }
4739
c9c47142 4740 if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS) {
c9c47142
SP
4741 dev_info(dev,
4742 "Only one UDP port supported for VxLAN offloads\n");
630f4b70
SB
4743 dev_info(dev, "Disabling VxLAN offloads\n");
4744 adapter->vxlan_port_count++;
4745 goto err;
c9c47142
SP
4746 }
4747
630f4b70
SB
4748 if (adapter->vxlan_port_count++ >= 1)
4749 return;
4750
c9c47142
SP
4751 status = be_cmd_manage_iface(adapter, adapter->if_handle,
4752 OP_CONVERT_NORMAL_TO_TUNNEL);
4753 if (status) {
4754 dev_warn(dev, "Failed to convert normal interface to tunnel\n");
4755 goto err;
4756 }
4757
4758 status = be_cmd_set_vxlan_port(adapter, port);
4759 if (status) {
4760 dev_warn(dev, "Failed to add VxLAN port\n");
4761 goto err;
4762 }
4763 adapter->flags |= BE_FLAGS_VXLAN_OFFLOADS;
4764 adapter->vxlan_port = port;
4765
630f4b70
SB
4766 netdev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
4767 NETIF_F_TSO | NETIF_F_TSO6 |
4768 NETIF_F_GSO_UDP_TUNNEL;
4769 netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL;
ac9a3d84 4770 netdev->features |= NETIF_F_GSO_UDP_TUNNEL;
630f4b70 4771
c9c47142
SP
4772 dev_info(dev, "Enabled VxLAN offloads for UDP port %d\n",
4773 be16_to_cpu(port));
4774 return;
4775err:
4776 be_disable_vxlan_offloads(adapter);
c9c47142
SP
4777}
4778
bde6b7cd
AD
4779static void be_del_vxlan_port(struct net_device *netdev,
4780 struct udp_tunnel_info *ti)
c9c47142
SP
4781{
4782 struct be_adapter *adapter = netdev_priv(netdev);
bde6b7cd
AD
4783 __be16 port = ti->port;
4784
4785 if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
4786 return;
c9c47142 4787
af19e686 4788 if (lancer_chip(adapter) || BEx_chip(adapter) || be_is_mc(adapter))
c9c47142
SP
4789 return;
4790
4791 if (adapter->vxlan_port != port)
630f4b70 4792 goto done;
c9c47142 4793
1e5b311a
JB
4794 if (adapter->vxlan_port_aliases) {
4795 adapter->vxlan_port_aliases--;
4796 return;
4797 }
4798
c9c47142
SP
4799 be_disable_vxlan_offloads(adapter);
4800
4801 dev_info(&adapter->pdev->dev,
4802 "Disabled VxLAN offloads for UDP port %d\n",
4803 be16_to_cpu(port));
630f4b70
SB
4804done:
4805 adapter->vxlan_port_count--;
c9c47142 4806}
725d548f 4807
5f35227e
JG
4808static netdev_features_t be_features_check(struct sk_buff *skb,
4809 struct net_device *dev,
4810 netdev_features_t features)
725d548f 4811{
16dde0d6
SB
4812 struct be_adapter *adapter = netdev_priv(dev);
4813 u8 l4_hdr = 0;
4814
4815 /* The code below restricts offload features for some tunneled packets.
4816 * Offload features for normal (non tunnel) packets are unchanged.
4817 */
4818 if (!skb->encapsulation ||
4819 !(adapter->flags & BE_FLAGS_VXLAN_OFFLOADS))
4820 return features;
4821
4822 /* It's an encapsulated packet and VxLAN offloads are enabled. We
4823 * should disable tunnel offload features if it's not a VxLAN packet,
4824 * as tunnel offloads have been enabled only for VxLAN. This is done to
4825 * allow other tunneled traffic like GRE work fine while VxLAN
4826 * offloads are configured in Skyhawk-R.
4827 */
4828 switch (vlan_get_protocol(skb)) {
4829 case htons(ETH_P_IP):
4830 l4_hdr = ip_hdr(skb)->protocol;
4831 break;
4832 case htons(ETH_P_IPV6):
4833 l4_hdr = ipv6_hdr(skb)->nexthdr;
4834 break;
4835 default:
4836 return features;
4837 }
4838
4839 if (l4_hdr != IPPROTO_UDP ||
4840 skb->inner_protocol_type != ENCAP_TYPE_ETHER ||
4841 skb->inner_protocol != htons(ETH_P_TEB) ||
4842 skb_inner_mac_header(skb) - skb_transport_header(skb) !=
4843 sizeof(struct udphdr) + sizeof(struct vxlanhdr))
a188222b 4844 return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
16dde0d6
SB
4845
4846 return features;
725d548f 4847}
c9c47142 4848
a155a5db
SB
4849static int be_get_phys_port_id(struct net_device *dev,
4850 struct netdev_phys_item_id *ppid)
4851{
4852 int i, id_len = CNTL_SERIAL_NUM_WORDS * CNTL_SERIAL_NUM_WORD_SZ + 1;
4853 struct be_adapter *adapter = netdev_priv(dev);
4854 u8 *id;
4855
4856 if (MAX_PHYS_ITEM_ID_LEN < id_len)
4857 return -ENOSPC;
4858
4859 ppid->id[0] = adapter->hba_port_num + 1;
4860 id = &ppid->id[1];
4861 for (i = CNTL_SERIAL_NUM_WORDS - 1; i >= 0;
4862 i--, id += CNTL_SERIAL_NUM_WORD_SZ)
4863 memcpy(id, &adapter->serial_num[i], CNTL_SERIAL_NUM_WORD_SZ);
4864
4865 ppid->id_len = id_len;
4866
4867 return 0;
4868}
4869
e5686ad8 4870static const struct net_device_ops be_netdev_ops = {
6b7c5b94
SP
4871 .ndo_open = be_open,
4872 .ndo_stop = be_close,
4873 .ndo_start_xmit = be_xmit,
a54769f5 4874 .ndo_set_rx_mode = be_set_rx_mode,
6b7c5b94
SP
4875 .ndo_set_mac_address = be_mac_addr_set,
4876 .ndo_change_mtu = be_change_mtu,
ab1594e9 4877 .ndo_get_stats64 = be_get_stats64,
6b7c5b94 4878 .ndo_validate_addr = eth_validate_addr,
6b7c5b94
SP
4879 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
4880 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
64600ea5 4881 .ndo_set_vf_mac = be_set_vf_mac,
1da87b7f 4882 .ndo_set_vf_vlan = be_set_vf_vlan,
ed616689 4883 .ndo_set_vf_rate = be_set_vf_tx_rate,
66268739 4884 .ndo_get_vf_config = be_get_vf_config,
bdce2ad7 4885 .ndo_set_vf_link_state = be_set_vf_link_state,
e7bcbd7b 4886 .ndo_set_vf_spoofchk = be_set_vf_spoofchk,
66268739
IV
4887#ifdef CONFIG_NET_POLL_CONTROLLER
4888 .ndo_poll_controller = be_netpoll,
4889#endif
a77dcb8c
AK
4890 .ndo_bridge_setlink = be_ndo_bridge_setlink,
4891 .ndo_bridge_getlink = be_ndo_bridge_getlink,
6384a4d0 4892#ifdef CONFIG_NET_RX_BUSY_POLL
c9c47142 4893 .ndo_busy_poll = be_busy_poll,
6384a4d0 4894#endif
bde6b7cd
AD
4895 .ndo_udp_tunnel_add = be_add_vxlan_port,
4896 .ndo_udp_tunnel_del = be_del_vxlan_port,
5f35227e 4897 .ndo_features_check = be_features_check,
a155a5db 4898 .ndo_get_phys_port_id = be_get_phys_port_id,
6b7c5b94
SP
4899};
4900
4901static void be_netdev_init(struct net_device *netdev)
4902{
4903 struct be_adapter *adapter = netdev_priv(netdev);
4904
6332c8d3 4905 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
8b8ddc68 4906 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
f646968f 4907 NETIF_F_HW_VLAN_CTAG_TX;
62219066 4908 if ((be_if_cap_flags(adapter) & BE_IF_FLAGS_RSS))
8b8ddc68 4909 netdev->hw_features |= NETIF_F_RXHASH;
6332c8d3
MM
4910
4911 netdev->features |= netdev->hw_features |
f646968f 4912 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER;
4b972914 4913
eb8a50d9 4914 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
79032644 4915 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
51c59870 4916
fbc13f01
AK
4917 netdev->priv_flags |= IFF_UNICAST_FLT;
4918
6b7c5b94
SP
4919 netdev->flags |= IFF_MULTICAST;
4920
127bfce5 4921 netif_set_gso_max_size(netdev, BE_MAX_GSO_SIZE - ETH_HLEN);
c190e3c8 4922
10ef9ab4 4923 netdev->netdev_ops = &be_netdev_ops;
6b7c5b94 4924
7ad24ea4 4925 netdev->ethtool_ops = &be_ethtool_ops;
6b7c5b94
SP
4926}
4927
87ac1a52
KA
4928static void be_cleanup(struct be_adapter *adapter)
4929{
4930 struct net_device *netdev = adapter->netdev;
4931
4932 rtnl_lock();
4933 netif_device_detach(netdev);
4934 if (netif_running(netdev))
4935 be_close(netdev);
4936 rtnl_unlock();
4937
4938 be_clear(adapter);
4939}
4940
484d76fd 4941static int be_resume(struct be_adapter *adapter)
78fad34e 4942{
d0e1b319 4943 struct net_device *netdev = adapter->netdev;
78fad34e
SP
4944 int status;
4945
78fad34e
SP
4946 status = be_setup(adapter);
4947 if (status)
484d76fd 4948 return status;
78fad34e 4949
08d9910c
HFS
4950 rtnl_lock();
4951 if (netif_running(netdev))
d0e1b319 4952 status = be_open(netdev);
08d9910c
HFS
4953 rtnl_unlock();
4954
4955 if (status)
4956 return status;
78fad34e 4957
d0e1b319
KA
4958 netif_device_attach(netdev);
4959
484d76fd
KA
4960 return 0;
4961}
4962
4963static int be_err_recover(struct be_adapter *adapter)
4964{
484d76fd
KA
4965 int status;
4966
1babbad4
PR
4967 /* Error recovery is supported only Lancer as of now */
4968 if (!lancer_chip(adapter))
4969 return -EIO;
4970
4971 /* Wait for adapter to reach quiescent state before
4972 * destroying queues
4973 */
4974 status = be_fw_wait_ready(adapter);
4975 if (status)
4976 goto err;
4977
4978 be_cleanup(adapter);
4979
484d76fd
KA
4980 status = be_resume(adapter);
4981 if (status)
4982 goto err;
4983
78fad34e
SP
4984 return 0;
4985err:
78fad34e
SP
4986 return status;
4987}
4988
eb7dd46c 4989static void be_err_detection_task(struct work_struct *work)
78fad34e
SP
4990{
4991 struct be_adapter *adapter =
eb7dd46c
SP
4992 container_of(work, struct be_adapter,
4993 be_err_detection_work.work);
1babbad4
PR
4994 struct device *dev = &adapter->pdev->dev;
4995 int recovery_status;
972f37b4 4996 int delay = ERR_DETECTION_DELAY;
78fad34e
SP
4997
4998 be_detect_error(adapter);
4999
1babbad4
PR
5000 if (be_check_error(adapter, BE_ERROR_HW))
5001 recovery_status = be_err_recover(adapter);
5002 else
5003 goto reschedule_task;
5004
5005 if (!recovery_status) {
972f37b4 5006 adapter->recovery_retries = 0;
1babbad4
PR
5007 dev_info(dev, "Adapter recovery successful\n");
5008 goto reschedule_task;
5009 } else if (be_virtfn(adapter)) {
5010 /* For VFs, check if PF have allocated resources
5011 * every second.
5012 */
5013 dev_err(dev, "Re-trying adapter recovery\n");
5014 goto reschedule_task;
972f37b4
PR
5015 } else if (adapter->recovery_retries++ <
5016 MAX_ERR_RECOVERY_RETRY_COUNT) {
5017 /* In case of another error during recovery, it takes 30 sec
5018 * for adapter to come out of error. Retry error recovery after
5019 * this time interval.
5020 */
5021 dev_err(&adapter->pdev->dev, "Re-trying adapter recovery\n");
5022 delay = ERR_RECOVERY_RETRY_DELAY;
5023 goto reschedule_task;
1babbad4
PR
5024 } else {
5025 dev_err(dev, "Adapter recovery failed\n");
78fad34e
SP
5026 }
5027
1babbad4
PR
5028 return;
5029reschedule_task:
972f37b4 5030 be_schedule_err_detection(adapter, delay);
78fad34e
SP
5031}
5032
5033static void be_log_sfp_info(struct be_adapter *adapter)
5034{
5035 int status;
5036
5037 status = be_cmd_query_sfp_info(adapter);
5038 if (!status) {
5039 dev_err(&adapter->pdev->dev,
51d1f98a
AK
5040 "Port %c: %s Vendor: %s part no: %s",
5041 adapter->port_name,
5042 be_misconfig_evt_port_state[adapter->phy_state],
5043 adapter->phy.vendor_name,
78fad34e
SP
5044 adapter->phy.vendor_pn);
5045 }
51d1f98a 5046 adapter->flags &= ~BE_FLAGS_PHY_MISCONFIGURED;
78fad34e
SP
5047}
5048
5049static void be_worker(struct work_struct *work)
5050{
5051 struct be_adapter *adapter =
5052 container_of(work, struct be_adapter, work.work);
5053 struct be_rx_obj *rxo;
5054 int i;
5055
5056 /* when interrupts are not yet enabled, just reap any pending
5057 * mcc completions
5058 */
5059 if (!netif_running(adapter->netdev)) {
5060 local_bh_disable();
5061 be_process_mcc(adapter);
5062 local_bh_enable();
5063 goto reschedule;
5064 }
5065
5066 if (!adapter->stats_cmd_sent) {
5067 if (lancer_chip(adapter))
5068 lancer_cmd_get_pport_stats(adapter,
5069 &adapter->stats_cmd);
5070 else
5071 be_cmd_get_stats(adapter, &adapter->stats_cmd);
5072 }
5073
5074 if (be_physfn(adapter) &&
5075 MODULO(adapter->work_counter, adapter->be_get_temp_freq) == 0)
5076 be_cmd_get_die_temperature(adapter);
5077
5078 for_all_rx_queues(adapter, rxo, i) {
5079 /* Replenish RX-queues starved due to memory
5080 * allocation failures.
5081 */
5082 if (rxo->rx_post_starved)
5083 be_post_rx_frags(rxo, GFP_KERNEL, MAX_RX_POST);
5084 }
5085
20947770
PR
5086 /* EQ-delay update for Skyhawk is done while notifying EQ */
5087 if (!skyhawk_chip(adapter))
5088 be_eqd_update(adapter, false);
78fad34e 5089
51d1f98a 5090 if (adapter->flags & BE_FLAGS_PHY_MISCONFIGURED)
78fad34e
SP
5091 be_log_sfp_info(adapter);
5092
5093reschedule:
5094 adapter->work_counter++;
5095 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
5096}
5097
6b7c5b94
SP
5098static void be_unmap_pci_bars(struct be_adapter *adapter)
5099{
c5b3ad4c
SP
5100 if (adapter->csr)
5101 pci_iounmap(adapter->pdev, adapter->csr);
8788fdc2 5102 if (adapter->db)
ce66f781 5103 pci_iounmap(adapter->pdev, adapter->db);
a69bf3c5
DM
5104 if (adapter->pcicfg && adapter->pcicfg_mapped)
5105 pci_iounmap(adapter->pdev, adapter->pcicfg);
045508a8
PP
5106}
5107
ce66f781
SP
5108static int db_bar(struct be_adapter *adapter)
5109{
18c57c74 5110 if (lancer_chip(adapter) || be_virtfn(adapter))
ce66f781
SP
5111 return 0;
5112 else
5113 return 4;
5114}
5115
5116static int be_roce_map_pci_bars(struct be_adapter *adapter)
045508a8 5117{
dbf0f2a7 5118 if (skyhawk_chip(adapter)) {
ce66f781
SP
5119 adapter->roce_db.size = 4096;
5120 adapter->roce_db.io_addr = pci_resource_start(adapter->pdev,
5121 db_bar(adapter));
5122 adapter->roce_db.total_size = pci_resource_len(adapter->pdev,
5123 db_bar(adapter));
5124 }
045508a8 5125 return 0;
6b7c5b94
SP
5126}
5127
5128static int be_map_pci_bars(struct be_adapter *adapter)
5129{
0fa74a4b 5130 struct pci_dev *pdev = adapter->pdev;
6b7c5b94 5131 u8 __iomem *addr;
78fad34e
SP
5132 u32 sli_intf;
5133
5134 pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
5135 adapter->sli_family = (sli_intf & SLI_INTF_FAMILY_MASK) >>
5136 SLI_INTF_FAMILY_SHIFT;
5137 adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
fe6d2a38 5138
c5b3ad4c 5139 if (BEx_chip(adapter) && be_physfn(adapter)) {
0fa74a4b 5140 adapter->csr = pci_iomap(pdev, 2, 0);
ddf1169f 5141 if (!adapter->csr)
c5b3ad4c
SP
5142 return -ENOMEM;
5143 }
5144
25848c90 5145 addr = pci_iomap(pdev, db_bar(adapter), 0);
ddf1169f 5146 if (!addr)
6b7c5b94 5147 goto pci_map_err;
ba343c77 5148 adapter->db = addr;
ce66f781 5149
25848c90
SR
5150 if (skyhawk_chip(adapter) || BEx_chip(adapter)) {
5151 if (be_physfn(adapter)) {
5152 /* PCICFG is the 2nd BAR in BE2 */
5153 addr = pci_iomap(pdev, BE2_chip(adapter) ? 1 : 0, 0);
5154 if (!addr)
5155 goto pci_map_err;
5156 adapter->pcicfg = addr;
a69bf3c5 5157 adapter->pcicfg_mapped = true;
25848c90
SR
5158 } else {
5159 adapter->pcicfg = adapter->db + SRIOV_VF_PCICFG_OFFSET;
a69bf3c5 5160 adapter->pcicfg_mapped = false;
25848c90
SR
5161 }
5162 }
5163
ce66f781 5164 be_roce_map_pci_bars(adapter);
6b7c5b94 5165 return 0;
ce66f781 5166
6b7c5b94 5167pci_map_err:
25848c90 5168 dev_err(&pdev->dev, "Error in mapping PCI BARs\n");
6b7c5b94
SP
5169 be_unmap_pci_bars(adapter);
5170 return -ENOMEM;
5171}
5172
78fad34e 5173static void be_drv_cleanup(struct be_adapter *adapter)
6b7c5b94 5174{
8788fdc2 5175 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
78fad34e 5176 struct device *dev = &adapter->pdev->dev;
6b7c5b94
SP
5177
5178 if (mem->va)
78fad34e 5179 dma_free_coherent(dev, mem->size, mem->va, mem->dma);
e7b909a6 5180
5b8821b7 5181 mem = &adapter->rx_filter;
e7b909a6 5182 if (mem->va)
78fad34e
SP
5183 dma_free_coherent(dev, mem->size, mem->va, mem->dma);
5184
5185 mem = &adapter->stats_cmd;
5186 if (mem->va)
5187 dma_free_coherent(dev, mem->size, mem->va, mem->dma);
6b7c5b94
SP
5188}
5189
78fad34e
SP
5190/* Allocate and initialize various fields in be_adapter struct */
5191static int be_drv_init(struct be_adapter *adapter)
6b7c5b94 5192{
8788fdc2
SP
5193 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
5194 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
5b8821b7 5195 struct be_dma_mem *rx_filter = &adapter->rx_filter;
78fad34e
SP
5196 struct be_dma_mem *stats_cmd = &adapter->stats_cmd;
5197 struct device *dev = &adapter->pdev->dev;
5198 int status = 0;
6b7c5b94
SP
5199
5200 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
e51000db
SB
5201 mbox_mem_alloc->va = dma_zalloc_coherent(dev, mbox_mem_alloc->size,
5202 &mbox_mem_alloc->dma,
5203 GFP_KERNEL);
78fad34e
SP
5204 if (!mbox_mem_alloc->va)
5205 return -ENOMEM;
5206
6b7c5b94
SP
5207 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
5208 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
5209 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
e7b909a6 5210
5b8821b7 5211 rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
78fad34e
SP
5212 rx_filter->va = dma_zalloc_coherent(dev, rx_filter->size,
5213 &rx_filter->dma, GFP_KERNEL);
ddf1169f 5214 if (!rx_filter->va) {
e7b909a6
SP
5215 status = -ENOMEM;
5216 goto free_mbox;
5217 }
1f9061d2 5218
78fad34e
SP
5219 if (lancer_chip(adapter))
5220 stats_cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
5221 else if (BE2_chip(adapter))
5222 stats_cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
5223 else if (BE3_chip(adapter))
5224 stats_cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
5225 else
5226 stats_cmd->size = sizeof(struct be_cmd_req_get_stats_v2);
5227 stats_cmd->va = dma_zalloc_coherent(dev, stats_cmd->size,
5228 &stats_cmd->dma, GFP_KERNEL);
5229 if (!stats_cmd->va) {
5230 status = -ENOMEM;
5231 goto free_rx_filter;
5232 }
5233
2984961c 5234 mutex_init(&adapter->mbox_lock);
8788fdc2
SP
5235 spin_lock_init(&adapter->mcc_lock);
5236 spin_lock_init(&adapter->mcc_cq_lock);
5eeff635 5237 init_completion(&adapter->et_cmd_compl);
e7b909a6 5238
78fad34e 5239 pci_save_state(adapter->pdev);
6b7c5b94 5240
78fad34e 5241 INIT_DELAYED_WORK(&adapter->work, be_worker);
eb7dd46c
SP
5242 INIT_DELAYED_WORK(&adapter->be_err_detection_work,
5243 be_err_detection_task);
6b7c5b94 5244
78fad34e
SP
5245 adapter->rx_fc = true;
5246 adapter->tx_fc = true;
6b7c5b94 5247
78fad34e
SP
5248 /* Must be a power of 2 or else MODULO will BUG_ON */
5249 adapter->be_get_temp_freq = 64;
ca34fe38 5250
6b7c5b94 5251 return 0;
78fad34e
SP
5252
5253free_rx_filter:
5254 dma_free_coherent(dev, rx_filter->size, rx_filter->va, rx_filter->dma);
5255free_mbox:
5256 dma_free_coherent(dev, mbox_mem_alloc->size, mbox_mem_alloc->va,
5257 mbox_mem_alloc->dma);
5258 return status;
6b7c5b94
SP
5259}
5260
3bc6b06c 5261static void be_remove(struct pci_dev *pdev)
6b7c5b94
SP
5262{
5263 struct be_adapter *adapter = pci_get_drvdata(pdev);
8d56ff11 5264
6b7c5b94
SP
5265 if (!adapter)
5266 return;
5267
045508a8 5268 be_roce_dev_remove(adapter);
8cef7a78 5269 be_intr_set(adapter, false);
045508a8 5270
eb7dd46c 5271 be_cancel_err_detection(adapter);
f67ef7ba 5272
6b7c5b94
SP
5273 unregister_netdev(adapter->netdev);
5274
5fb379ee
SP
5275 be_clear(adapter);
5276
bf99e50d
PR
5277 /* tell fw we're done with firing cmds */
5278 be_cmd_fw_clean(adapter);
5279
78fad34e
SP
5280 be_unmap_pci_bars(adapter);
5281 be_drv_cleanup(adapter);
6b7c5b94 5282
d6b6d987
SP
5283 pci_disable_pcie_error_reporting(pdev);
5284
6b7c5b94
SP
5285 pci_release_regions(pdev);
5286 pci_disable_device(pdev);
5287
5288 free_netdev(adapter->netdev);
5289}
5290
9a03259c
AB
5291static ssize_t be_hwmon_show_temp(struct device *dev,
5292 struct device_attribute *dev_attr,
5293 char *buf)
29e9122b
VD
5294{
5295 struct be_adapter *adapter = dev_get_drvdata(dev);
5296
5297 /* Unit: millidegree Celsius */
5298 if (adapter->hwmon_info.be_on_die_temp == BE_INVALID_DIE_TEMP)
5299 return -EIO;
5300 else
5301 return sprintf(buf, "%u\n",
5302 adapter->hwmon_info.be_on_die_temp * 1000);
5303}
5304
5305static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO,
5306 be_hwmon_show_temp, NULL, 1);
5307
5308static struct attribute *be_hwmon_attrs[] = {
5309 &sensor_dev_attr_temp1_input.dev_attr.attr,
5310 NULL
5311};
5312
5313ATTRIBUTE_GROUPS(be_hwmon);
5314
d379142b
SP
5315static char *mc_name(struct be_adapter *adapter)
5316{
f93f160b
VV
5317 char *str = ""; /* default */
5318
5319 switch (adapter->mc_type) {
5320 case UMC:
5321 str = "UMC";
5322 break;
5323 case FLEX10:
5324 str = "FLEX10";
5325 break;
5326 case vNIC1:
5327 str = "vNIC-1";
5328 break;
5329 case nPAR:
5330 str = "nPAR";
5331 break;
5332 case UFP:
5333 str = "UFP";
5334 break;
5335 case vNIC2:
5336 str = "vNIC-2";
5337 break;
5338 default:
5339 str = "";
5340 }
5341
5342 return str;
d379142b
SP
5343}
5344
5345static inline char *func_name(struct be_adapter *adapter)
5346{
5347 return be_physfn(adapter) ? "PF" : "VF";
5348}
5349
f7062ee5
SP
5350static inline char *nic_name(struct pci_dev *pdev)
5351{
5352 switch (pdev->device) {
5353 case OC_DEVICE_ID1:
5354 return OC_NAME;
5355 case OC_DEVICE_ID2:
5356 return OC_NAME_BE;
5357 case OC_DEVICE_ID3:
5358 case OC_DEVICE_ID4:
5359 return OC_NAME_LANCER;
5360 case BE_DEVICE_ID2:
5361 return BE3_NAME;
5362 case OC_DEVICE_ID5:
5363 case OC_DEVICE_ID6:
5364 return OC_NAME_SH;
5365 default:
5366 return BE_NAME;
5367 }
5368}
5369
1dd06ae8 5370static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id)
6b7c5b94 5371{
6b7c5b94
SP
5372 struct be_adapter *adapter;
5373 struct net_device *netdev;
21252377 5374 int status = 0;
6b7c5b94 5375
acbafeb1
SP
5376 dev_info(&pdev->dev, "%s version is %s\n", DRV_NAME, DRV_VER);
5377
6b7c5b94
SP
5378 status = pci_enable_device(pdev);
5379 if (status)
5380 goto do_none;
5381
5382 status = pci_request_regions(pdev, DRV_NAME);
5383 if (status)
5384 goto disable_dev;
5385 pci_set_master(pdev);
5386
7f640062 5387 netdev = alloc_etherdev_mqs(sizeof(*adapter), MAX_TX_QS, MAX_RX_QS);
ddf1169f 5388 if (!netdev) {
6b7c5b94
SP
5389 status = -ENOMEM;
5390 goto rel_reg;
5391 }
5392 adapter = netdev_priv(netdev);
5393 adapter->pdev = pdev;
5394 pci_set_drvdata(pdev, adapter);
5395 adapter->netdev = netdev;
2243e2e9 5396 SET_NETDEV_DEV(netdev, &pdev->dev);
6b7c5b94 5397
4c15c243 5398 status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
6b7c5b94
SP
5399 if (!status) {
5400 netdev->features |= NETIF_F_HIGHDMA;
5401 } else {
4c15c243 5402 status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
6b7c5b94
SP
5403 if (status) {
5404 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
5405 goto free_netdev;
5406 }
5407 }
5408
2f951a9a
KA
5409 status = pci_enable_pcie_error_reporting(pdev);
5410 if (!status)
5411 dev_info(&pdev->dev, "PCIe error reporting enabled\n");
d6b6d987 5412
78fad34e 5413 status = be_map_pci_bars(adapter);
6b7c5b94 5414 if (status)
39f1d94d 5415 goto free_netdev;
6b7c5b94 5416
78fad34e
SP
5417 status = be_drv_init(adapter);
5418 if (status)
5419 goto unmap_bars;
5420
5fb379ee
SP
5421 status = be_setup(adapter);
5422 if (status)
78fad34e 5423 goto drv_cleanup;
2243e2e9 5424
3abcdeda 5425 be_netdev_init(netdev);
6b7c5b94
SP
5426 status = register_netdev(netdev);
5427 if (status != 0)
5fb379ee 5428 goto unsetup;
6b7c5b94 5429
045508a8
PP
5430 be_roce_dev_add(adapter);
5431
972f37b4 5432 be_schedule_err_detection(adapter, ERR_DETECTION_DELAY);
b4e32a71 5433
29e9122b 5434 /* On Die temperature not supported for VF. */
9a03259c 5435 if (be_physfn(adapter) && IS_ENABLED(CONFIG_BE2NET_HWMON)) {
29e9122b
VD
5436 adapter->hwmon_info.hwmon_dev =
5437 devm_hwmon_device_register_with_groups(&pdev->dev,
5438 DRV_NAME,
5439 adapter,
5440 be_hwmon_groups);
5441 adapter->hwmon_info.be_on_die_temp = BE_INVALID_DIE_TEMP;
5442 }
5443
d379142b 5444 dev_info(&pdev->dev, "%s: %s %s port %c\n", nic_name(pdev),
21252377 5445 func_name(adapter), mc_name(adapter), adapter->port_name);
34b1ef04 5446
6b7c5b94
SP
5447 return 0;
5448
5fb379ee
SP
5449unsetup:
5450 be_clear(adapter);
78fad34e
SP
5451drv_cleanup:
5452 be_drv_cleanup(adapter);
5453unmap_bars:
5454 be_unmap_pci_bars(adapter);
f9449ab7 5455free_netdev:
fe6d2a38 5456 free_netdev(netdev);
6b7c5b94
SP
5457rel_reg:
5458 pci_release_regions(pdev);
5459disable_dev:
5460 pci_disable_device(pdev);
5461do_none:
c4ca2374 5462 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
6b7c5b94
SP
5463 return status;
5464}
5465
5466static int be_suspend(struct pci_dev *pdev, pm_message_t state)
5467{
5468 struct be_adapter *adapter = pci_get_drvdata(pdev);
6b7c5b94 5469
d4360d6f 5470 be_intr_set(adapter, false);
eb7dd46c 5471 be_cancel_err_detection(adapter);
f67ef7ba 5472
87ac1a52 5473 be_cleanup(adapter);
6b7c5b94
SP
5474
5475 pci_save_state(pdev);
5476 pci_disable_device(pdev);
5477 pci_set_power_state(pdev, pci_choose_state(pdev, state));
5478 return 0;
5479}
5480
484d76fd 5481static int be_pci_resume(struct pci_dev *pdev)
6b7c5b94 5482{
6b7c5b94 5483 struct be_adapter *adapter = pci_get_drvdata(pdev);
484d76fd 5484 int status = 0;
6b7c5b94
SP
5485
5486 status = pci_enable_device(pdev);
5487 if (status)
5488 return status;
5489
6b7c5b94
SP
5490 pci_restore_state(pdev);
5491
484d76fd 5492 status = be_resume(adapter);
2243e2e9
SP
5493 if (status)
5494 return status;
5495
972f37b4 5496 be_schedule_err_detection(adapter, ERR_DETECTION_DELAY);
eb7dd46c 5497
6b7c5b94
SP
5498 return 0;
5499}
5500
82456b03
SP
5501/*
5502 * An FLR will stop BE from DMAing any data.
5503 */
5504static void be_shutdown(struct pci_dev *pdev)
5505{
5506 struct be_adapter *adapter = pci_get_drvdata(pdev);
82456b03 5507
2d5d4154
AK
5508 if (!adapter)
5509 return;
82456b03 5510
d114f99a 5511 be_roce_dev_shutdown(adapter);
0f4a6828 5512 cancel_delayed_work_sync(&adapter->work);
eb7dd46c 5513 be_cancel_err_detection(adapter);
a4ca055f 5514
2d5d4154 5515 netif_device_detach(adapter->netdev);
82456b03 5516
57841869
AK
5517 be_cmd_reset_function(adapter);
5518
82456b03 5519 pci_disable_device(pdev);
82456b03
SP
5520}
5521
cf588477 5522static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
748b539a 5523 pci_channel_state_t state)
cf588477
SP
5524{
5525 struct be_adapter *adapter = pci_get_drvdata(pdev);
cf588477
SP
5526
5527 dev_err(&adapter->pdev->dev, "EEH error detected\n");
5528
68f22793
PR
5529 be_roce_dev_remove(adapter);
5530
954f6825
VD
5531 if (!be_check_error(adapter, BE_ERROR_EEH)) {
5532 be_set_error(adapter, BE_ERROR_EEH);
cf588477 5533
eb7dd46c 5534 be_cancel_err_detection(adapter);
cf588477 5535
87ac1a52 5536 be_cleanup(adapter);
cf588477 5537 }
cf588477
SP
5538
5539 if (state == pci_channel_io_perm_failure)
5540 return PCI_ERS_RESULT_DISCONNECT;
5541
5542 pci_disable_device(pdev);
5543
eeb7fc7b
SK
5544 /* The error could cause the FW to trigger a flash debug dump.
5545 * Resetting the card while flash dump is in progress
c8a54163
PR
5546 * can cause it not to recover; wait for it to finish.
5547 * Wait only for first function as it is needed only once per
5548 * adapter.
eeb7fc7b 5549 */
c8a54163
PR
5550 if (pdev->devfn == 0)
5551 ssleep(30);
5552
cf588477
SP
5553 return PCI_ERS_RESULT_NEED_RESET;
5554}
5555
5556static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
5557{
5558 struct be_adapter *adapter = pci_get_drvdata(pdev);
5559 int status;
5560
5561 dev_info(&adapter->pdev->dev, "EEH reset\n");
cf588477
SP
5562
5563 status = pci_enable_device(pdev);
5564 if (status)
5565 return PCI_ERS_RESULT_DISCONNECT;
5566
5567 pci_set_master(pdev);
cf588477
SP
5568 pci_restore_state(pdev);
5569
5570 /* Check if card is ok and fw is ready */
c5b3ad4c
SP
5571 dev_info(&adapter->pdev->dev,
5572 "Waiting for FW to be ready after EEH reset\n");
bf99e50d 5573 status = be_fw_wait_ready(adapter);
cf588477
SP
5574 if (status)
5575 return PCI_ERS_RESULT_DISCONNECT;
5576
d6b6d987 5577 pci_cleanup_aer_uncorrect_error_status(pdev);
954f6825 5578 be_clear_error(adapter, BE_CLEAR_ALL);
cf588477
SP
5579 return PCI_ERS_RESULT_RECOVERED;
5580}
5581
5582static void be_eeh_resume(struct pci_dev *pdev)
5583{
5584 int status = 0;
5585 struct be_adapter *adapter = pci_get_drvdata(pdev);
cf588477
SP
5586
5587 dev_info(&adapter->pdev->dev, "EEH resume\n");
5588
5589 pci_save_state(pdev);
5590
484d76fd 5591 status = be_resume(adapter);
bf99e50d
PR
5592 if (status)
5593 goto err;
5594
68f22793
PR
5595 be_roce_dev_add(adapter);
5596
972f37b4 5597 be_schedule_err_detection(adapter, ERR_DETECTION_DELAY);
cf588477
SP
5598 return;
5599err:
5600 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
cf588477
SP
5601}
5602
ace40aff
VV
5603static int be_pci_sriov_configure(struct pci_dev *pdev, int num_vfs)
5604{
5605 struct be_adapter *adapter = pci_get_drvdata(pdev);
b9263cbf 5606 struct be_resources vft_res = {0};
ace40aff
VV
5607 int status;
5608
5609 if (!num_vfs)
5610 be_vf_clear(adapter);
5611
5612 adapter->num_vfs = num_vfs;
5613
5614 if (adapter->num_vfs == 0 && pci_vfs_assigned(pdev)) {
5615 dev_warn(&pdev->dev,
5616 "Cannot disable VFs while they are assigned\n");
5617 return -EBUSY;
5618 }
5619
5620 /* When the HW is in SRIOV capable configuration, the PF-pool resources
5621 * are equally distributed across the max-number of VFs. The user may
5622 * request only a subset of the max-vfs to be enabled.
5623 * Based on num_vfs, redistribute the resources across num_vfs so that
5624 * each VF will have access to more number of resources.
5625 * This facility is not available in BE3 FW.
5626 * Also, this is done by FW in Lancer chip.
5627 */
5628 if (skyhawk_chip(adapter) && !pci_num_vf(pdev)) {
b9263cbf
SR
5629 be_calculate_vf_res(adapter, adapter->num_vfs,
5630 &vft_res);
ace40aff 5631 status = be_cmd_set_sriov_config(adapter, adapter->pool_res,
b9263cbf 5632 adapter->num_vfs, &vft_res);
ace40aff
VV
5633 if (status)
5634 dev_err(&pdev->dev,
5635 "Failed to optimize SR-IOV resources\n");
5636 }
5637
5638 status = be_get_resources(adapter);
5639 if (status)
5640 return be_cmd_status(status);
5641
5642 /* Updating real_num_tx/rx_queues() requires rtnl_lock() */
5643 rtnl_lock();
5644 status = be_update_queues(adapter);
5645 rtnl_unlock();
5646 if (status)
5647 return be_cmd_status(status);
5648
5649 if (adapter->num_vfs)
5650 status = be_vf_setup(adapter);
5651
5652 if (!status)
5653 return adapter->num_vfs;
5654
5655 return 0;
5656}
5657
3646f0e5 5658static const struct pci_error_handlers be_eeh_handlers = {
cf588477
SP
5659 .error_detected = be_eeh_err_detected,
5660 .slot_reset = be_eeh_reset,
5661 .resume = be_eeh_resume,
5662};
5663
6b7c5b94
SP
5664static struct pci_driver be_driver = {
5665 .name = DRV_NAME,
5666 .id_table = be_dev_ids,
5667 .probe = be_probe,
5668 .remove = be_remove,
5669 .suspend = be_suspend,
484d76fd 5670 .resume = be_pci_resume,
82456b03 5671 .shutdown = be_shutdown,
ace40aff 5672 .sriov_configure = be_pci_sriov_configure,
cf588477 5673 .err_handler = &be_eeh_handlers
6b7c5b94
SP
5674};
5675
5676static int __init be_init_module(void)
5677{
8e95a202
JP
5678 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
5679 rx_frag_size != 2048) {
6b7c5b94
SP
5680 printk(KERN_WARNING DRV_NAME
5681 " : Module param rx_frag_size must be 2048/4096/8192."
5682 " Using 2048\n");
5683 rx_frag_size = 2048;
5684 }
6b7c5b94 5685
ace40aff
VV
5686 if (num_vfs > 0) {
5687 pr_info(DRV_NAME " : Module param num_vfs is obsolete.");
5688 pr_info(DRV_NAME " : Use sysfs method to enable VFs\n");
5689 }
5690
6b7c5b94
SP
5691 return pci_register_driver(&be_driver);
5692}
5693module_init(be_init_module);
5694
5695static void __exit be_exit_module(void)
5696{
5697 pci_unregister_driver(&be_driver);
5698}
5699module_exit(be_exit_module);