be2net: move un-exported routines from be.h to respective src files
[linux-2.6-block.git] / drivers / net / ethernet / emulex / benet / be_main.c
CommitLineData
6b7c5b94 1/*
40263820 2 * Copyright (C) 2005 - 2014 Emulex
6b7c5b94
SP
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
d2145cde 11 * linux-drivers@emulex.com
6b7c5b94 12 *
d2145cde
AK
13 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
6b7c5b94
SP
16 */
17
70c71606 18#include <linux/prefetch.h>
9d9779e7 19#include <linux/module.h>
6b7c5b94 20#include "be.h"
8788fdc2 21#include "be_cmds.h"
65f71b8b 22#include <asm/div64.h>
d6b6d987 23#include <linux/aer.h>
a77dcb8c 24#include <linux/if_bridge.h>
6384a4d0 25#include <net/busy_poll.h>
c9c47142 26#include <net/vxlan.h>
6b7c5b94
SP
27
28MODULE_VERSION(DRV_VER);
29MODULE_DEVICE_TABLE(pci, be_dev_ids);
30MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
00d3d51e 31MODULE_AUTHOR("Emulex Corporation");
6b7c5b94
SP
32MODULE_LICENSE("GPL");
33
ba343c77 34static unsigned int num_vfs;
ba343c77 35module_param(num_vfs, uint, S_IRUGO);
ba343c77 36MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
6b7c5b94 37
11ac75ed
SP
38static ushort rx_frag_size = 2048;
39module_param(rx_frag_size, ushort, S_IRUGO);
40MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
41
9baa3c34 42static const struct pci_device_id be_dev_ids[] = {
c4ca2374 43 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
59fd5d87 44 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
c4ca2374
AK
45 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
46 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
fe6d2a38 47 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
12f4d0a8 48 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
ecedb6ae 49 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
76b73530 50 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID6)},
6b7c5b94
SP
51 { 0 }
52};
53MODULE_DEVICE_TABLE(pci, be_dev_ids);
7c185276 54/* UE Status Low CSR */
42c8b11e 55static const char * const ue_status_low_desc[] = {
7c185276
AK
56 "CEV",
57 "CTX",
58 "DBUF",
59 "ERX",
60 "Host",
61 "MPU",
62 "NDMA",
63 "PTC ",
64 "RDMA ",
65 "RXF ",
66 "RXIPS ",
67 "RXULP0 ",
68 "RXULP1 ",
69 "RXULP2 ",
70 "TIM ",
71 "TPOST ",
72 "TPRE ",
73 "TXIPS ",
74 "TXULP0 ",
75 "TXULP1 ",
76 "UC ",
77 "WDMA ",
78 "TXULP2 ",
79 "HOST1 ",
80 "P0_OB_LINK ",
81 "P1_OB_LINK ",
82 "HOST_GPIO ",
83 "MBOX ",
6bdf8f55
VV
84 "ERX2 ",
85 "SPARE ",
86 "JTAG ",
87 "MPU_INTPEND "
7c185276 88};
e2fb1afa 89
7c185276 90/* UE Status High CSR */
42c8b11e 91static const char * const ue_status_hi_desc[] = {
7c185276
AK
92 "LPCMEMHOST",
93 "MGMT_MAC",
94 "PCS0ONLINE",
95 "MPU_IRAM",
96 "PCS1ONLINE",
97 "PCTL0",
98 "PCTL1",
99 "PMEM",
100 "RR",
101 "TXPB",
102 "RXPP",
103 "XAUI",
104 "TXP",
105 "ARM",
106 "IPC",
107 "HOST2",
108 "HOST3",
109 "HOST4",
110 "HOST5",
111 "HOST6",
112 "HOST7",
6bdf8f55
VV
113 "ECRC",
114 "Poison TLP",
42c8b11e 115 "NETC",
6bdf8f55
VV
116 "PERIPH",
117 "LLTXULP",
118 "D2P",
119 "RCON",
120 "LDMA",
121 "LLTXP",
122 "LLTXPB",
7c185276
AK
123 "Unknown"
124};
6b7c5b94
SP
125
126static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
127{
128 struct be_dma_mem *mem = &q->dma_mem;
03d28ffe 129
1cfafab9 130 if (mem->va) {
2b7bcebf
IV
131 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
132 mem->dma);
1cfafab9
SP
133 mem->va = NULL;
134 }
6b7c5b94
SP
135}
136
137static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
748b539a 138 u16 len, u16 entry_size)
6b7c5b94
SP
139{
140 struct be_dma_mem *mem = &q->dma_mem;
141
142 memset(q, 0, sizeof(*q));
143 q->len = len;
144 q->entry_size = entry_size;
145 mem->size = len * entry_size;
ede23fa8
JP
146 mem->va = dma_zalloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
147 GFP_KERNEL);
6b7c5b94 148 if (!mem->va)
10ef9ab4 149 return -ENOMEM;
6b7c5b94
SP
150 return 0;
151}
152
68c45a2d 153static void be_reg_intr_set(struct be_adapter *adapter, bool enable)
6b7c5b94 154{
db3ea781 155 u32 reg, enabled;
5f0b849e 156
db3ea781 157 pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
748b539a 158 &reg);
db3ea781
SP
159 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
160
5f0b849e 161 if (!enabled && enable)
6b7c5b94 162 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
5f0b849e 163 else if (enabled && !enable)
6b7c5b94 164 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
5f0b849e 165 else
6b7c5b94 166 return;
5f0b849e 167
db3ea781 168 pci_write_config_dword(adapter->pdev,
748b539a 169 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
6b7c5b94
SP
170}
171
68c45a2d
SK
172static void be_intr_set(struct be_adapter *adapter, bool enable)
173{
174 int status = 0;
175
176 /* On lancer interrupts can't be controlled via this register */
177 if (lancer_chip(adapter))
178 return;
179
180 if (adapter->eeh_error)
181 return;
182
183 status = be_cmd_intr_set(adapter, enable);
184 if (status)
185 be_reg_intr_set(adapter, enable);
186}
187
8788fdc2 188static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
6b7c5b94
SP
189{
190 u32 val = 0;
03d28ffe 191
6b7c5b94
SP
192 val |= qid & DB_RQ_RING_ID_MASK;
193 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
f3eb62d2
SP
194
195 wmb();
8788fdc2 196 iowrite32(val, adapter->db + DB_RQ_OFFSET);
6b7c5b94
SP
197}
198
94d73aaa
VV
199static void be_txq_notify(struct be_adapter *adapter, struct be_tx_obj *txo,
200 u16 posted)
6b7c5b94
SP
201{
202 u32 val = 0;
03d28ffe 203
94d73aaa 204 val |= txo->q.id & DB_TXULP_RING_ID_MASK;
6b7c5b94 205 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
f3eb62d2
SP
206
207 wmb();
94d73aaa 208 iowrite32(val, adapter->db + txo->db_offset);
6b7c5b94
SP
209}
210
8788fdc2 211static void be_eq_notify(struct be_adapter *adapter, u16 qid,
748b539a 212 bool arm, bool clear_int, u16 num_popped)
6b7c5b94
SP
213{
214 u32 val = 0;
03d28ffe 215
6b7c5b94 216 val |= qid & DB_EQ_RING_ID_MASK;
748b539a 217 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) << DB_EQ_RING_ID_EXT_MASK_SHIFT);
cf588477 218
f67ef7ba 219 if (adapter->eeh_error)
cf588477
SP
220 return;
221
6b7c5b94
SP
222 if (arm)
223 val |= 1 << DB_EQ_REARM_SHIFT;
224 if (clear_int)
225 val |= 1 << DB_EQ_CLR_SHIFT;
226 val |= 1 << DB_EQ_EVNT_SHIFT;
227 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
8788fdc2 228 iowrite32(val, adapter->db + DB_EQ_OFFSET);
6b7c5b94
SP
229}
230
8788fdc2 231void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
6b7c5b94
SP
232{
233 u32 val = 0;
03d28ffe 234
6b7c5b94 235 val |= qid & DB_CQ_RING_ID_MASK;
fe6d2a38
SP
236 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
237 DB_CQ_RING_ID_EXT_MASK_SHIFT);
cf588477 238
f67ef7ba 239 if (adapter->eeh_error)
cf588477
SP
240 return;
241
6b7c5b94
SP
242 if (arm)
243 val |= 1 << DB_CQ_REARM_SHIFT;
244 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
8788fdc2 245 iowrite32(val, adapter->db + DB_CQ_OFFSET);
6b7c5b94
SP
246}
247
6b7c5b94
SP
248static int be_mac_addr_set(struct net_device *netdev, void *p)
249{
250 struct be_adapter *adapter = netdev_priv(netdev);
5a712c13 251 struct device *dev = &adapter->pdev->dev;
6b7c5b94 252 struct sockaddr *addr = p;
5a712c13
SP
253 int status;
254 u8 mac[ETH_ALEN];
255 u32 old_pmac_id = adapter->pmac_id[0], curr_pmac_id = 0;
6b7c5b94 256
ca9e4988
AK
257 if (!is_valid_ether_addr(addr->sa_data))
258 return -EADDRNOTAVAIL;
259
ff32f8ab
VV
260 /* Proceed further only if, User provided MAC is different
261 * from active MAC
262 */
263 if (ether_addr_equal(addr->sa_data, netdev->dev_addr))
264 return 0;
265
5a712c13
SP
266 /* The PMAC_ADD cmd may fail if the VF doesn't have FILTMGMT
267 * privilege or if PF did not provision the new MAC address.
268 * On BE3, this cmd will always fail if the VF doesn't have the
269 * FILTMGMT privilege. This failure is OK, only if the PF programmed
270 * the MAC for the VF.
704e4c88 271 */
5a712c13
SP
272 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
273 adapter->if_handle, &adapter->pmac_id[0], 0);
274 if (!status) {
275 curr_pmac_id = adapter->pmac_id[0];
276
277 /* Delete the old programmed MAC. This call may fail if the
278 * old MAC was already deleted by the PF driver.
279 */
280 if (adapter->pmac_id[0] != old_pmac_id)
281 be_cmd_pmac_del(adapter, adapter->if_handle,
282 old_pmac_id, 0);
704e4c88
PR
283 }
284
5a712c13
SP
285 /* Decide if the new MAC is successfully activated only after
286 * querying the FW
704e4c88 287 */
b188f090
SR
288 status = be_cmd_get_active_mac(adapter, curr_pmac_id, mac,
289 adapter->if_handle, true, 0);
a65027e4 290 if (status)
e3a7ae2c 291 goto err;
6b7c5b94 292
5a712c13
SP
293 /* The MAC change did not happen, either due to lack of privilege
294 * or PF didn't pre-provision.
295 */
61d23e9f 296 if (!ether_addr_equal(addr->sa_data, mac)) {
5a712c13
SP
297 status = -EPERM;
298 goto err;
299 }
300
e3a7ae2c 301 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
5a712c13 302 dev_info(dev, "MAC address changed to %pM\n", mac);
e3a7ae2c
SK
303 return 0;
304err:
5a712c13 305 dev_warn(dev, "MAC address change to %pM failed\n", addr->sa_data);
6b7c5b94
SP
306 return status;
307}
308
ca34fe38
SP
309/* BE2 supports only v0 cmd */
310static void *hw_stats_from_cmd(struct be_adapter *adapter)
311{
312 if (BE2_chip(adapter)) {
313 struct be_cmd_resp_get_stats_v0 *cmd = adapter->stats_cmd.va;
314
315 return &cmd->hw_stats;
61000861 316 } else if (BE3_chip(adapter)) {
ca34fe38
SP
317 struct be_cmd_resp_get_stats_v1 *cmd = adapter->stats_cmd.va;
318
61000861
AK
319 return &cmd->hw_stats;
320 } else {
321 struct be_cmd_resp_get_stats_v2 *cmd = adapter->stats_cmd.va;
322
ca34fe38
SP
323 return &cmd->hw_stats;
324 }
325}
326
327/* BE2 supports only v0 cmd */
328static void *be_erx_stats_from_cmd(struct be_adapter *adapter)
329{
330 if (BE2_chip(adapter)) {
331 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
332
333 return &hw_stats->erx;
61000861 334 } else if (BE3_chip(adapter)) {
ca34fe38
SP
335 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
336
61000861
AK
337 return &hw_stats->erx;
338 } else {
339 struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
340
ca34fe38
SP
341 return &hw_stats->erx;
342 }
343}
344
345static void populate_be_v0_stats(struct be_adapter *adapter)
89a88ab8 346{
ac124ff9
SP
347 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
348 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
349 struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
89a88ab8 350 struct be_port_rxf_stats_v0 *port_stats =
ac124ff9
SP
351 &rxf_stats->port[adapter->port_num];
352 struct be_drv_stats *drvs = &adapter->drv_stats;
89a88ab8 353
ac124ff9 354 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
89a88ab8
AK
355 drvs->rx_pause_frames = port_stats->rx_pause_frames;
356 drvs->rx_crc_errors = port_stats->rx_crc_errors;
357 drvs->rx_control_frames = port_stats->rx_control_frames;
358 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
359 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
360 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
361 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
362 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
363 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
364 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
365 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
366 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
367 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
368 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
ac124ff9 369 drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
89a88ab8
AK
370 drvs->rx_dropped_header_too_small =
371 port_stats->rx_dropped_header_too_small;
18fb06a1
SR
372 drvs->rx_address_filtered =
373 port_stats->rx_address_filtered +
374 port_stats->rx_vlan_filtered;
89a88ab8
AK
375 drvs->rx_alignment_symbol_errors =
376 port_stats->rx_alignment_symbol_errors;
377
378 drvs->tx_pauseframes = port_stats->tx_pauseframes;
379 drvs->tx_controlframes = port_stats->tx_controlframes;
380
381 if (adapter->port_num)
ac124ff9 382 drvs->jabber_events = rxf_stats->port1_jabber_events;
89a88ab8 383 else
ac124ff9 384 drvs->jabber_events = rxf_stats->port0_jabber_events;
89a88ab8 385 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
89a88ab8 386 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
89a88ab8
AK
387 drvs->forwarded_packets = rxf_stats->forwarded_packets;
388 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
ac124ff9
SP
389 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
390 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
89a88ab8
AK
391 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
392}
393
ca34fe38 394static void populate_be_v1_stats(struct be_adapter *adapter)
89a88ab8 395{
ac124ff9
SP
396 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
397 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
398 struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
89a88ab8 399 struct be_port_rxf_stats_v1 *port_stats =
ac124ff9
SP
400 &rxf_stats->port[adapter->port_num];
401 struct be_drv_stats *drvs = &adapter->drv_stats;
89a88ab8 402
ac124ff9 403 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
02fe7027
AK
404 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
405 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
89a88ab8
AK
406 drvs->rx_pause_frames = port_stats->rx_pause_frames;
407 drvs->rx_crc_errors = port_stats->rx_crc_errors;
408 drvs->rx_control_frames = port_stats->rx_control_frames;
409 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
410 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
411 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
412 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
413 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
414 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
415 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
416 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
417 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
418 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
419 drvs->rx_dropped_header_too_small =
420 port_stats->rx_dropped_header_too_small;
421 drvs->rx_input_fifo_overflow_drop =
422 port_stats->rx_input_fifo_overflow_drop;
18fb06a1 423 drvs->rx_address_filtered = port_stats->rx_address_filtered;
89a88ab8
AK
424 drvs->rx_alignment_symbol_errors =
425 port_stats->rx_alignment_symbol_errors;
ac124ff9 426 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
89a88ab8
AK
427 drvs->tx_pauseframes = port_stats->tx_pauseframes;
428 drvs->tx_controlframes = port_stats->tx_controlframes;
b5adffc4 429 drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
89a88ab8
AK
430 drvs->jabber_events = port_stats->jabber_events;
431 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
89a88ab8 432 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
89a88ab8
AK
433 drvs->forwarded_packets = rxf_stats->forwarded_packets;
434 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
ac124ff9
SP
435 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
436 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
89a88ab8
AK
437 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
438}
439
61000861
AK
440static void populate_be_v2_stats(struct be_adapter *adapter)
441{
442 struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
443 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
444 struct be_rxf_stats_v2 *rxf_stats = &hw_stats->rxf;
445 struct be_port_rxf_stats_v2 *port_stats =
446 &rxf_stats->port[adapter->port_num];
447 struct be_drv_stats *drvs = &adapter->drv_stats;
448
449 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
450 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
451 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
452 drvs->rx_pause_frames = port_stats->rx_pause_frames;
453 drvs->rx_crc_errors = port_stats->rx_crc_errors;
454 drvs->rx_control_frames = port_stats->rx_control_frames;
455 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
456 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
457 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
458 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
459 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
460 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
461 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
462 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
463 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
464 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
465 drvs->rx_dropped_header_too_small =
466 port_stats->rx_dropped_header_too_small;
467 drvs->rx_input_fifo_overflow_drop =
468 port_stats->rx_input_fifo_overflow_drop;
469 drvs->rx_address_filtered = port_stats->rx_address_filtered;
470 drvs->rx_alignment_symbol_errors =
471 port_stats->rx_alignment_symbol_errors;
472 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
473 drvs->tx_pauseframes = port_stats->tx_pauseframes;
474 drvs->tx_controlframes = port_stats->tx_controlframes;
475 drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
476 drvs->jabber_events = port_stats->jabber_events;
477 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
478 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
479 drvs->forwarded_packets = rxf_stats->forwarded_packets;
480 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
481 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
482 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
483 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
748b539a 484 if (be_roce_supported(adapter)) {
461ae379
AK
485 drvs->rx_roce_bytes_lsd = port_stats->roce_bytes_received_lsd;
486 drvs->rx_roce_bytes_msd = port_stats->roce_bytes_received_msd;
487 drvs->rx_roce_frames = port_stats->roce_frames_received;
488 drvs->roce_drops_crc = port_stats->roce_drops_crc;
489 drvs->roce_drops_payload_len =
490 port_stats->roce_drops_payload_len;
491 }
61000861
AK
492}
493
005d5696
SX
494static void populate_lancer_stats(struct be_adapter *adapter)
495{
005d5696 496 struct be_drv_stats *drvs = &adapter->drv_stats;
748b539a 497 struct lancer_pport_stats *pport_stats = pport_stats_from_cmd(adapter);
ac124ff9
SP
498
499 be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
500 drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
501 drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
502 drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
005d5696 503 drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
ac124ff9 504 drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
005d5696
SX
505 drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
506 drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
507 drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
508 drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
509 drvs->rx_dropped_tcp_length =
510 pport_stats->rx_dropped_invalid_tcp_length;
511 drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
512 drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
513 drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
514 drvs->rx_dropped_header_too_small =
515 pport_stats->rx_dropped_header_too_small;
516 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
18fb06a1
SR
517 drvs->rx_address_filtered =
518 pport_stats->rx_address_filtered +
519 pport_stats->rx_vlan_filtered;
ac124ff9 520 drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
005d5696 521 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
ac124ff9
SP
522 drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
523 drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
005d5696 524 drvs->jabber_events = pport_stats->rx_jabbers;
ac124ff9
SP
525 drvs->forwarded_packets = pport_stats->num_forwards_lo;
526 drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
005d5696 527 drvs->rx_drops_too_many_frags =
ac124ff9 528 pport_stats->rx_drops_too_many_frags_lo;
005d5696 529}
89a88ab8 530
09c1c68f
SP
531static void accumulate_16bit_val(u32 *acc, u16 val)
532{
533#define lo(x) (x & 0xFFFF)
534#define hi(x) (x & 0xFFFF0000)
535 bool wrapped = val < lo(*acc);
536 u32 newacc = hi(*acc) + val;
537
538 if (wrapped)
539 newacc += 65536;
540 ACCESS_ONCE(*acc) = newacc;
541}
542
4188e7df 543static void populate_erx_stats(struct be_adapter *adapter,
748b539a 544 struct be_rx_obj *rxo, u32 erx_stat)
a6c578ef
AK
545{
546 if (!BEx_chip(adapter))
547 rx_stats(rxo)->rx_drops_no_frags = erx_stat;
548 else
549 /* below erx HW counter can actually wrap around after
550 * 65535. Driver accumulates a 32-bit value
551 */
552 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
553 (u16)erx_stat);
554}
555
89a88ab8
AK
556void be_parse_stats(struct be_adapter *adapter)
557{
61000861 558 struct be_erx_stats_v2 *erx = be_erx_stats_from_cmd(adapter);
ac124ff9
SP
559 struct be_rx_obj *rxo;
560 int i;
a6c578ef 561 u32 erx_stat;
ac124ff9 562
ca34fe38
SP
563 if (lancer_chip(adapter)) {
564 populate_lancer_stats(adapter);
005d5696 565 } else {
ca34fe38
SP
566 if (BE2_chip(adapter))
567 populate_be_v0_stats(adapter);
61000861
AK
568 else if (BE3_chip(adapter))
569 /* for BE3 */
ca34fe38 570 populate_be_v1_stats(adapter);
61000861
AK
571 else
572 populate_be_v2_stats(adapter);
d51ebd33 573
61000861 574 /* erx_v2 is longer than v0, v1. use v2 for v0, v1 access */
ca34fe38 575 for_all_rx_queues(adapter, rxo, i) {
a6c578ef
AK
576 erx_stat = erx->rx_drops_no_fragments[rxo->q.id];
577 populate_erx_stats(adapter, rxo, erx_stat);
ca34fe38 578 }
09c1c68f 579 }
89a88ab8
AK
580}
581
ab1594e9 582static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
748b539a 583 struct rtnl_link_stats64 *stats)
6b7c5b94 584{
ab1594e9 585 struct be_adapter *adapter = netdev_priv(netdev);
89a88ab8 586 struct be_drv_stats *drvs = &adapter->drv_stats;
3abcdeda 587 struct be_rx_obj *rxo;
3c8def97 588 struct be_tx_obj *txo;
ab1594e9
SP
589 u64 pkts, bytes;
590 unsigned int start;
3abcdeda 591 int i;
6b7c5b94 592
3abcdeda 593 for_all_rx_queues(adapter, rxo, i) {
ab1594e9 594 const struct be_rx_stats *rx_stats = rx_stats(rxo);
03d28ffe 595
ab1594e9 596 do {
57a7744e 597 start = u64_stats_fetch_begin_irq(&rx_stats->sync);
ab1594e9
SP
598 pkts = rx_stats(rxo)->rx_pkts;
599 bytes = rx_stats(rxo)->rx_bytes;
57a7744e 600 } while (u64_stats_fetch_retry_irq(&rx_stats->sync, start));
ab1594e9
SP
601 stats->rx_packets += pkts;
602 stats->rx_bytes += bytes;
603 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
604 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
605 rx_stats(rxo)->rx_drops_no_frags;
3abcdeda
SP
606 }
607
3c8def97 608 for_all_tx_queues(adapter, txo, i) {
ab1594e9 609 const struct be_tx_stats *tx_stats = tx_stats(txo);
03d28ffe 610
ab1594e9 611 do {
57a7744e 612 start = u64_stats_fetch_begin_irq(&tx_stats->sync);
ab1594e9
SP
613 pkts = tx_stats(txo)->tx_pkts;
614 bytes = tx_stats(txo)->tx_bytes;
57a7744e 615 } while (u64_stats_fetch_retry_irq(&tx_stats->sync, start));
ab1594e9
SP
616 stats->tx_packets += pkts;
617 stats->tx_bytes += bytes;
3c8def97 618 }
6b7c5b94
SP
619
620 /* bad pkts received */
ab1594e9 621 stats->rx_errors = drvs->rx_crc_errors +
89a88ab8
AK
622 drvs->rx_alignment_symbol_errors +
623 drvs->rx_in_range_errors +
624 drvs->rx_out_range_errors +
625 drvs->rx_frame_too_long +
626 drvs->rx_dropped_too_small +
627 drvs->rx_dropped_too_short +
628 drvs->rx_dropped_header_too_small +
629 drvs->rx_dropped_tcp_length +
ab1594e9 630 drvs->rx_dropped_runt;
68110868 631
6b7c5b94 632 /* detailed rx errors */
ab1594e9 633 stats->rx_length_errors = drvs->rx_in_range_errors +
89a88ab8
AK
634 drvs->rx_out_range_errors +
635 drvs->rx_frame_too_long;
68110868 636
ab1594e9 637 stats->rx_crc_errors = drvs->rx_crc_errors;
6b7c5b94
SP
638
639 /* frame alignment errors */
ab1594e9 640 stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
68110868 641
6b7c5b94
SP
642 /* receiver fifo overrun */
643 /* drops_no_pbuf is no per i/f, it's per BE card */
ab1594e9 644 stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
89a88ab8
AK
645 drvs->rx_input_fifo_overflow_drop +
646 drvs->rx_drops_no_pbuf;
ab1594e9 647 return stats;
6b7c5b94
SP
648}
649
b236916a 650void be_link_status_update(struct be_adapter *adapter, u8 link_status)
6b7c5b94 651{
6b7c5b94
SP
652 struct net_device *netdev = adapter->netdev;
653
b236916a 654 if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
ea172a01 655 netif_carrier_off(netdev);
b236916a 656 adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
6b7c5b94 657 }
b236916a 658
bdce2ad7 659 if (link_status)
b236916a
AK
660 netif_carrier_on(netdev);
661 else
662 netif_carrier_off(netdev);
6b7c5b94
SP
663}
664
5f07b3c5 665static void be_tx_stats_update(struct be_tx_obj *txo, struct sk_buff *skb)
6b7c5b94 666{
3c8def97
SP
667 struct be_tx_stats *stats = tx_stats(txo);
668
ab1594e9 669 u64_stats_update_begin(&stats->sync);
ac124ff9 670 stats->tx_reqs++;
5f07b3c5
SP
671 stats->tx_bytes += skb->len;
672 stats->tx_pkts += (skb_shinfo(skb)->gso_segs ? : 1);
ab1594e9 673 u64_stats_update_end(&stats->sync);
6b7c5b94
SP
674}
675
5f07b3c5
SP
676/* Returns number of WRBs needed for the skb */
677static u32 skb_wrb_cnt(struct sk_buff *skb)
6b7c5b94 678{
5f07b3c5
SP
679 /* +1 for the header wrb */
680 return 1 + (skb_headlen(skb) ? 1 : 0) + skb_shinfo(skb)->nr_frags;
6b7c5b94
SP
681}
682
683static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
684{
685 wrb->frag_pa_hi = upper_32_bits(addr);
686 wrb->frag_pa_lo = addr & 0xFFFFFFFF;
687 wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
89b1f496 688 wrb->rsvd0 = 0;
6b7c5b94
SP
689}
690
1ded132d 691static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
748b539a 692 struct sk_buff *skb)
1ded132d
AK
693{
694 u8 vlan_prio;
695 u16 vlan_tag;
696
df8a39de 697 vlan_tag = skb_vlan_tag_get(skb);
1ded132d
AK
698 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
699 /* If vlan priority provided by OS is NOT in available bmap */
700 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
701 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
702 adapter->recommended_prio;
703
704 return vlan_tag;
705}
706
c9c47142
SP
707/* Used only for IP tunnel packets */
708static u16 skb_inner_ip_proto(struct sk_buff *skb)
709{
710 return (inner_ip_hdr(skb)->version == 4) ?
711 inner_ip_hdr(skb)->protocol : inner_ipv6_hdr(skb)->nexthdr;
712}
713
714static u16 skb_ip_proto(struct sk_buff *skb)
715{
716 return (ip_hdr(skb)->version == 4) ?
717 ip_hdr(skb)->protocol : ipv6_hdr(skb)->nexthdr;
718}
719
cc4ce020 720static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
748b539a
SP
721 struct sk_buff *skb, u32 wrb_cnt, u32 len,
722 bool skip_hw_vlan)
6b7c5b94 723{
c9c47142 724 u16 vlan_tag, proto;
cc4ce020 725
6b7c5b94
SP
726 memset(hdr, 0, sizeof(*hdr));
727
c3c18bc1 728 SET_TX_WRB_HDR_BITS(crc, hdr, 1);
6b7c5b94 729
49e4b847 730 if (skb_is_gso(skb)) {
c3c18bc1
SP
731 SET_TX_WRB_HDR_BITS(lso, hdr, 1);
732 SET_TX_WRB_HDR_BITS(lso_mss, hdr, skb_shinfo(skb)->gso_size);
fe6d2a38 733 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
c3c18bc1 734 SET_TX_WRB_HDR_BITS(lso6, hdr, 1);
6b7c5b94 735 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
c9c47142 736 if (skb->encapsulation) {
c3c18bc1 737 SET_TX_WRB_HDR_BITS(ipcs, hdr, 1);
c9c47142
SP
738 proto = skb_inner_ip_proto(skb);
739 } else {
740 proto = skb_ip_proto(skb);
741 }
742 if (proto == IPPROTO_TCP)
c3c18bc1 743 SET_TX_WRB_HDR_BITS(tcpcs, hdr, 1);
c9c47142 744 else if (proto == IPPROTO_UDP)
c3c18bc1 745 SET_TX_WRB_HDR_BITS(udpcs, hdr, 1);
6b7c5b94
SP
746 }
747
df8a39de 748 if (skb_vlan_tag_present(skb)) {
c3c18bc1 749 SET_TX_WRB_HDR_BITS(vlan, hdr, 1);
1ded132d 750 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
c3c18bc1 751 SET_TX_WRB_HDR_BITS(vlan_tag, hdr, vlan_tag);
6b7c5b94
SP
752 }
753
c3c18bc1
SP
754 SET_TX_WRB_HDR_BITS(num_wrb, hdr, wrb_cnt);
755 SET_TX_WRB_HDR_BITS(len, hdr, len);
5f07b3c5
SP
756
757 /* Hack to skip HW VLAN tagging needs evt = 1, compl = 0
758 * When this hack is not needed, the evt bit is set while ringing DB
759 */
760 if (skip_hw_vlan)
761 SET_TX_WRB_HDR_BITS(event, hdr, 1);
6b7c5b94
SP
762}
763
2b7bcebf 764static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
748b539a 765 bool unmap_single)
7101e111
SP
766{
767 dma_addr_t dma;
768
769 be_dws_le_to_cpu(wrb, sizeof(*wrb));
770
771 dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
b681ee77 772 if (wrb->frag_len) {
7101e111 773 if (unmap_single)
2b7bcebf
IV
774 dma_unmap_single(dev, dma, wrb->frag_len,
775 DMA_TO_DEVICE);
7101e111 776 else
2b7bcebf 777 dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
7101e111
SP
778 }
779}
6b7c5b94 780
5f07b3c5
SP
781/* Returns the number of WRBs used up by the skb */
782static u32 be_xmit_enqueue(struct be_adapter *adapter, struct be_tx_obj *txo,
783 struct sk_buff *skb, bool skip_hw_vlan)
6b7c5b94 784{
5f07b3c5 785 u32 i, copied = 0, wrb_cnt = skb_wrb_cnt(skb);
2b7bcebf 786 struct device *dev = &adapter->pdev->dev;
5f07b3c5 787 struct be_queue_info *txq = &txo->q;
6b7c5b94 788 struct be_eth_hdr_wrb *hdr;
7101e111 789 bool map_single = false;
5f07b3c5
SP
790 struct be_eth_wrb *wrb;
791 dma_addr_t busaddr;
792 u16 head = txq->head;
6b7c5b94 793
6b7c5b94 794 hdr = queue_head_node(txq);
5f07b3c5
SP
795 wrb_fill_hdr(adapter, hdr, skb, wrb_cnt, skb->len, skip_hw_vlan);
796 be_dws_cpu_to_le(hdr, sizeof(*hdr));
797
6b7c5b94
SP
798 queue_head_inc(txq);
799
ebc8d2ab 800 if (skb->len > skb->data_len) {
e743d313 801 int len = skb_headlen(skb);
03d28ffe 802
2b7bcebf
IV
803 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
804 if (dma_mapping_error(dev, busaddr))
7101e111
SP
805 goto dma_err;
806 map_single = true;
ebc8d2ab
DM
807 wrb = queue_head_node(txq);
808 wrb_fill(wrb, busaddr, len);
809 be_dws_cpu_to_le(wrb, sizeof(*wrb));
810 queue_head_inc(txq);
811 copied += len;
812 }
6b7c5b94 813
ebc8d2ab 814 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
748b539a 815 const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
03d28ffe 816
b061b39e 817 busaddr = skb_frag_dma_map(dev, frag, 0,
9e903e08 818 skb_frag_size(frag), DMA_TO_DEVICE);
2b7bcebf 819 if (dma_mapping_error(dev, busaddr))
7101e111 820 goto dma_err;
ebc8d2ab 821 wrb = queue_head_node(txq);
9e903e08 822 wrb_fill(wrb, busaddr, skb_frag_size(frag));
ebc8d2ab
DM
823 be_dws_cpu_to_le(wrb, sizeof(*wrb));
824 queue_head_inc(txq);
9e903e08 825 copied += skb_frag_size(frag);
6b7c5b94
SP
826 }
827
5f07b3c5
SP
828 BUG_ON(txo->sent_skb_list[head]);
829 txo->sent_skb_list[head] = skb;
830 txo->last_req_hdr = head;
831 atomic_add(wrb_cnt, &txq->used);
832 txo->last_req_wrb_cnt = wrb_cnt;
833 txo->pend_wrb_cnt += wrb_cnt;
6b7c5b94 834
5f07b3c5
SP
835 be_tx_stats_update(txo, skb);
836 return wrb_cnt;
6b7c5b94 837
7101e111 838dma_err:
5f07b3c5
SP
839 /* Bring the queue back to the state it was in before this
840 * routine was invoked.
841 */
842 txq->head = head;
843 /* skip the first wrb (hdr); it's not mapped */
844 queue_head_inc(txq);
7101e111
SP
845 while (copied) {
846 wrb = queue_head_node(txq);
2b7bcebf 847 unmap_tx_frag(dev, wrb, map_single);
7101e111
SP
848 map_single = false;
849 copied -= wrb->frag_len;
d3de1540 850 adapter->drv_stats.dma_map_errors++;
7101e111
SP
851 queue_head_inc(txq);
852 }
5f07b3c5 853 txq->head = head;
7101e111 854 return 0;
6b7c5b94
SP
855}
856
f7062ee5
SP
857static inline int qnq_async_evt_rcvd(struct be_adapter *adapter)
858{
859 return adapter->flags & BE_FLAGS_QNQ_ASYNC_EVT_RCVD;
860}
861
93040ae5 862static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
bc0c3405
AK
863 struct sk_buff *skb,
864 bool *skip_hw_vlan)
93040ae5
SK
865{
866 u16 vlan_tag = 0;
867
868 skb = skb_share_check(skb, GFP_ATOMIC);
869 if (unlikely(!skb))
870 return skb;
871
df8a39de 872 if (skb_vlan_tag_present(skb))
93040ae5 873 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
52fe29e4
SB
874
875 if (qnq_async_evt_rcvd(adapter) && adapter->pvid) {
876 if (!vlan_tag)
877 vlan_tag = adapter->pvid;
878 /* f/w workaround to set skip_hw_vlan = 1, informs the F/W to
879 * skip VLAN insertion
880 */
881 if (skip_hw_vlan)
882 *skip_hw_vlan = true;
883 }
bc0c3405
AK
884
885 if (vlan_tag) {
62749e2c
JP
886 skb = vlan_insert_tag_set_proto(skb, htons(ETH_P_8021Q),
887 vlan_tag);
bc0c3405
AK
888 if (unlikely(!skb))
889 return skb;
bc0c3405
AK
890 skb->vlan_tci = 0;
891 }
892
893 /* Insert the outer VLAN, if any */
894 if (adapter->qnq_vid) {
895 vlan_tag = adapter->qnq_vid;
62749e2c
JP
896 skb = vlan_insert_tag_set_proto(skb, htons(ETH_P_8021Q),
897 vlan_tag);
bc0c3405
AK
898 if (unlikely(!skb))
899 return skb;
900 if (skip_hw_vlan)
901 *skip_hw_vlan = true;
902 }
903
93040ae5
SK
904 return skb;
905}
906
bc0c3405
AK
907static bool be_ipv6_exthdr_check(struct sk_buff *skb)
908{
909 struct ethhdr *eh = (struct ethhdr *)skb->data;
910 u16 offset = ETH_HLEN;
911
912 if (eh->h_proto == htons(ETH_P_IPV6)) {
913 struct ipv6hdr *ip6h = (struct ipv6hdr *)(skb->data + offset);
914
915 offset += sizeof(struct ipv6hdr);
916 if (ip6h->nexthdr != NEXTHDR_TCP &&
917 ip6h->nexthdr != NEXTHDR_UDP) {
918 struct ipv6_opt_hdr *ehdr =
504fbf1e 919 (struct ipv6_opt_hdr *)(skb->data + offset);
bc0c3405
AK
920
921 /* offending pkt: 2nd byte following IPv6 hdr is 0xff */
922 if (ehdr->hdrlen == 0xff)
923 return true;
924 }
925 }
926 return false;
927}
928
929static int be_vlan_tag_tx_chk(struct be_adapter *adapter, struct sk_buff *skb)
930{
df8a39de 931 return skb_vlan_tag_present(skb) || adapter->pvid || adapter->qnq_vid;
bc0c3405
AK
932}
933
748b539a 934static int be_ipv6_tx_stall_chk(struct be_adapter *adapter, struct sk_buff *skb)
bc0c3405 935{
ee9c799c 936 return BE3_chip(adapter) && be_ipv6_exthdr_check(skb);
bc0c3405
AK
937}
938
ec495fac
VV
939static struct sk_buff *be_lancer_xmit_workarounds(struct be_adapter *adapter,
940 struct sk_buff *skb,
941 bool *skip_hw_vlan)
6b7c5b94 942{
d2cb6ce7 943 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
ee9c799c
SP
944 unsigned int eth_hdr_len;
945 struct iphdr *ip;
93040ae5 946
1297f9db
AK
947 /* For padded packets, BE HW modifies tot_len field in IP header
948 * incorrecly when VLAN tag is inserted by HW.
3904dcc4 949 * For padded packets, Lancer computes incorrect checksum.
1ded132d 950 */
ee9c799c
SP
951 eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
952 VLAN_ETH_HLEN : ETH_HLEN;
3904dcc4 953 if (skb->len <= 60 &&
df8a39de 954 (lancer_chip(adapter) || skb_vlan_tag_present(skb)) &&
ee9c799c 955 is_ipv4_pkt(skb)) {
93040ae5
SK
956 ip = (struct iphdr *)ip_hdr(skb);
957 pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len));
958 }
1ded132d 959
d2cb6ce7 960 /* If vlan tag is already inlined in the packet, skip HW VLAN
f93f160b 961 * tagging in pvid-tagging mode
d2cb6ce7 962 */
f93f160b 963 if (be_pvid_tagging_enabled(adapter) &&
d2cb6ce7 964 veh->h_vlan_proto == htons(ETH_P_8021Q))
748b539a 965 *skip_hw_vlan = true;
d2cb6ce7 966
93040ae5
SK
967 /* HW has a bug wherein it will calculate CSUM for VLAN
968 * pkts even though it is disabled.
969 * Manually insert VLAN in pkt.
970 */
971 if (skb->ip_summed != CHECKSUM_PARTIAL &&
df8a39de 972 skb_vlan_tag_present(skb)) {
ee9c799c 973 skb = be_insert_vlan_in_pkt(adapter, skb, skip_hw_vlan);
bc0c3405 974 if (unlikely(!skb))
c9128951 975 goto err;
bc0c3405
AK
976 }
977
978 /* HW may lockup when VLAN HW tagging is requested on
979 * certain ipv6 packets. Drop such pkts if the HW workaround to
980 * skip HW tagging is not enabled by FW.
981 */
982 if (unlikely(be_ipv6_tx_stall_chk(adapter, skb) &&
cd3307aa
KA
983 (adapter->pvid || adapter->qnq_vid) &&
984 !qnq_async_evt_rcvd(adapter)))
bc0c3405
AK
985 goto tx_drop;
986
987 /* Manual VLAN tag insertion to prevent:
988 * ASIC lockup when the ASIC inserts VLAN tag into
989 * certain ipv6 packets. Insert VLAN tags in driver,
990 * and set event, completion, vlan bits accordingly
991 * in the Tx WRB.
992 */
993 if (be_ipv6_tx_stall_chk(adapter, skb) &&
994 be_vlan_tag_tx_chk(adapter, skb)) {
ee9c799c 995 skb = be_insert_vlan_in_pkt(adapter, skb, skip_hw_vlan);
1ded132d 996 if (unlikely(!skb))
c9128951 997 goto err;
1ded132d
AK
998 }
999
ee9c799c
SP
1000 return skb;
1001tx_drop:
1002 dev_kfree_skb_any(skb);
c9128951 1003err:
ee9c799c
SP
1004 return NULL;
1005}
1006
ec495fac
VV
1007static struct sk_buff *be_xmit_workarounds(struct be_adapter *adapter,
1008 struct sk_buff *skb,
1009 bool *skip_hw_vlan)
1010{
1011 /* Lancer, SH-R ASICs have a bug wherein Packets that are 32 bytes or
1012 * less may cause a transmit stall on that port. So the work-around is
1013 * to pad short packets (<= 32 bytes) to a 36-byte length.
1014 */
1015 if (unlikely(!BEx_chip(adapter) && skb->len <= 32)) {
74b6939d 1016 if (skb_put_padto(skb, 36))
ec495fac 1017 return NULL;
ec495fac
VV
1018 }
1019
1020 if (BEx_chip(adapter) || lancer_chip(adapter)) {
1021 skb = be_lancer_xmit_workarounds(adapter, skb, skip_hw_vlan);
1022 if (!skb)
1023 return NULL;
1024 }
1025
1026 return skb;
1027}
1028
5f07b3c5
SP
1029static void be_xmit_flush(struct be_adapter *adapter, struct be_tx_obj *txo)
1030{
1031 struct be_queue_info *txq = &txo->q;
1032 struct be_eth_hdr_wrb *hdr = queue_index_node(txq, txo->last_req_hdr);
1033
1034 /* Mark the last request eventable if it hasn't been marked already */
1035 if (!(hdr->dw[2] & cpu_to_le32(TX_HDR_WRB_EVT)))
1036 hdr->dw[2] |= cpu_to_le32(TX_HDR_WRB_EVT | TX_HDR_WRB_COMPL);
1037
1038 /* compose a dummy wrb if there are odd set of wrbs to notify */
1039 if (!lancer_chip(adapter) && (txo->pend_wrb_cnt & 1)) {
1040 wrb_fill(queue_head_node(txq), 0, 0);
1041 queue_head_inc(txq);
1042 atomic_inc(&txq->used);
1043 txo->pend_wrb_cnt++;
1044 hdr->dw[2] &= ~cpu_to_le32(TX_HDR_WRB_NUM_MASK <<
1045 TX_HDR_WRB_NUM_SHIFT);
1046 hdr->dw[2] |= cpu_to_le32((txo->last_req_wrb_cnt + 1) <<
1047 TX_HDR_WRB_NUM_SHIFT);
1048 }
1049 be_txq_notify(adapter, txo, txo->pend_wrb_cnt);
1050 txo->pend_wrb_cnt = 0;
1051}
1052
ee9c799c
SP
1053static netdev_tx_t be_xmit(struct sk_buff *skb, struct net_device *netdev)
1054{
5f07b3c5 1055 bool skip_hw_vlan = false, flush = !skb->xmit_more;
ee9c799c 1056 struct be_adapter *adapter = netdev_priv(netdev);
5f07b3c5
SP
1057 u16 q_idx = skb_get_queue_mapping(skb);
1058 struct be_tx_obj *txo = &adapter->tx_obj[q_idx];
ee9c799c 1059 struct be_queue_info *txq = &txo->q;
5f07b3c5 1060 u16 wrb_cnt;
ee9c799c
SP
1061
1062 skb = be_xmit_workarounds(adapter, skb, &skip_hw_vlan);
5f07b3c5
SP
1063 if (unlikely(!skb))
1064 goto drop;
6b7c5b94 1065
5f07b3c5
SP
1066 wrb_cnt = be_xmit_enqueue(adapter, txo, skb, skip_hw_vlan);
1067 if (unlikely(!wrb_cnt)) {
1068 dev_kfree_skb_any(skb);
1069 goto drop;
1070 }
cd8f76c0 1071
5f07b3c5
SP
1072 if ((atomic_read(&txq->used) + BE_MAX_TX_FRAG_COUNT) >= txq->len) {
1073 netif_stop_subqueue(netdev, q_idx);
1074 tx_stats(txo)->tx_stops++;
1075 }
c190e3c8 1076
5f07b3c5
SP
1077 if (flush || __netif_subqueue_stopped(netdev, q_idx))
1078 be_xmit_flush(adapter, txo);
6b7c5b94 1079
5f07b3c5
SP
1080 return NETDEV_TX_OK;
1081drop:
1082 tx_stats(txo)->tx_drv_drops++;
1083 /* Flush the already enqueued tx requests */
1084 if (flush && txo->pend_wrb_cnt)
1085 be_xmit_flush(adapter, txo);
6b7c5b94 1086
6b7c5b94
SP
1087 return NETDEV_TX_OK;
1088}
1089
1090static int be_change_mtu(struct net_device *netdev, int new_mtu)
1091{
1092 struct be_adapter *adapter = netdev_priv(netdev);
0d3f5cce
KA
1093 struct device *dev = &adapter->pdev->dev;
1094
1095 if (new_mtu < BE_MIN_MTU || new_mtu > BE_MAX_MTU) {
1096 dev_info(dev, "MTU must be between %d and %d bytes\n",
1097 BE_MIN_MTU, BE_MAX_MTU);
6b7c5b94
SP
1098 return -EINVAL;
1099 }
0d3f5cce
KA
1100
1101 dev_info(dev, "MTU changed from %d to %d bytes\n",
748b539a 1102 netdev->mtu, new_mtu);
6b7c5b94
SP
1103 netdev->mtu = new_mtu;
1104 return 0;
1105}
1106
1107/*
82903e4b
AK
1108 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
1109 * If the user configures more, place BE in vlan promiscuous mode.
6b7c5b94 1110 */
10329df8 1111static int be_vid_config(struct be_adapter *adapter)
6b7c5b94 1112{
50762667 1113 struct device *dev = &adapter->pdev->dev;
10329df8 1114 u16 vids[BE_NUM_VLANS_SUPPORTED];
f6cbd364 1115 u16 num = 0, i = 0;
82903e4b 1116 int status = 0;
1da87b7f 1117
c0e64ef4
SP
1118 /* No need to further configure vids if in promiscuous mode */
1119 if (adapter->promiscuous)
1120 return 0;
1121
92bf14ab 1122 if (adapter->vlans_added > be_max_vlans(adapter))
0fc16ebf
PR
1123 goto set_vlan_promisc;
1124
1125 /* Construct VLAN Table to give to HW */
f6cbd364
RN
1126 for_each_set_bit(i, adapter->vids, VLAN_N_VID)
1127 vids[num++] = cpu_to_le16(i);
0fc16ebf 1128
4d567d97 1129 status = be_cmd_vlan_config(adapter, adapter->if_handle, vids, num);
0fc16ebf 1130 if (status) {
d9d604f8 1131 /* Set to VLAN promisc mode as setting VLAN filter failed */
4c60005f
KA
1132 if (addl_status(status) ==
1133 MCC_ADDL_STATUS_INSUFFICIENT_RESOURCES)
d9d604f8 1134 goto set_vlan_promisc;
50762667 1135 dev_err(dev, "Setting HW VLAN filtering failed\n");
d9d604f8
AK
1136 } else {
1137 if (adapter->flags & BE_FLAGS_VLAN_PROMISC) {
1138 /* hw VLAN filtering re-enabled. */
1139 status = be_cmd_rx_filter(adapter,
1140 BE_FLAGS_VLAN_PROMISC, OFF);
1141 if (!status) {
50762667
VV
1142 dev_info(dev,
1143 "Disabling VLAN Promiscuous mode\n");
d9d604f8 1144 adapter->flags &= ~BE_FLAGS_VLAN_PROMISC;
d9d604f8
AK
1145 }
1146 }
6b7c5b94 1147 }
1da87b7f 1148
b31c50a7 1149 return status;
0fc16ebf
PR
1150
1151set_vlan_promisc:
a6b74e01
SK
1152 if (adapter->flags & BE_FLAGS_VLAN_PROMISC)
1153 return 0;
d9d604f8
AK
1154
1155 status = be_cmd_rx_filter(adapter, BE_FLAGS_VLAN_PROMISC, ON);
1156 if (!status) {
50762667 1157 dev_info(dev, "Enable VLAN Promiscuous mode\n");
d9d604f8
AK
1158 adapter->flags |= BE_FLAGS_VLAN_PROMISC;
1159 } else
50762667 1160 dev_err(dev, "Failed to enable VLAN Promiscuous mode\n");
0fc16ebf 1161 return status;
6b7c5b94
SP
1162}
1163
80d5c368 1164static int be_vlan_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
6b7c5b94
SP
1165{
1166 struct be_adapter *adapter = netdev_priv(netdev);
80817cbf 1167 int status = 0;
6b7c5b94 1168
a85e9986
PR
1169 /* Packets with VID 0 are always received by Lancer by default */
1170 if (lancer_chip(adapter) && vid == 0)
48291c22
VV
1171 return status;
1172
f6cbd364 1173 if (test_bit(vid, adapter->vids))
48291c22 1174 return status;
a85e9986 1175
f6cbd364 1176 set_bit(vid, adapter->vids);
a6b74e01 1177 adapter->vlans_added++;
8e586137 1178
a6b74e01
SK
1179 status = be_vid_config(adapter);
1180 if (status) {
1181 adapter->vlans_added--;
f6cbd364 1182 clear_bit(vid, adapter->vids);
a6b74e01 1183 }
48291c22 1184
80817cbf 1185 return status;
6b7c5b94
SP
1186}
1187
80d5c368 1188static int be_vlan_rem_vid(struct net_device *netdev, __be16 proto, u16 vid)
6b7c5b94
SP
1189{
1190 struct be_adapter *adapter = netdev_priv(netdev);
1191
a85e9986
PR
1192 /* Packets with VID 0 are always received by Lancer by default */
1193 if (lancer_chip(adapter) && vid == 0)
9d4dfe4a 1194 return 0;
a85e9986 1195
f6cbd364 1196 clear_bit(vid, adapter->vids);
9d4dfe4a
KA
1197 adapter->vlans_added--;
1198
1199 return be_vid_config(adapter);
6b7c5b94
SP
1200}
1201
7ad09458
S
1202static void be_clear_promisc(struct be_adapter *adapter)
1203{
1204 adapter->promiscuous = false;
a0794885 1205 adapter->flags &= ~(BE_FLAGS_VLAN_PROMISC | BE_FLAGS_MCAST_PROMISC);
7ad09458
S
1206
1207 be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
1208}
1209
a54769f5 1210static void be_set_rx_mode(struct net_device *netdev)
6b7c5b94
SP
1211{
1212 struct be_adapter *adapter = netdev_priv(netdev);
0fc16ebf 1213 int status;
6b7c5b94 1214
24307eef 1215 if (netdev->flags & IFF_PROMISC) {
5b8821b7 1216 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
24307eef
SP
1217 adapter->promiscuous = true;
1218 goto done;
6b7c5b94
SP
1219 }
1220
25985edc 1221 /* BE was previously in promiscuous mode; disable it */
24307eef 1222 if (adapter->promiscuous) {
7ad09458 1223 be_clear_promisc(adapter);
c0e64ef4 1224 if (adapter->vlans_added)
10329df8 1225 be_vid_config(adapter);
6b7c5b94
SP
1226 }
1227
e7b909a6 1228 /* Enable multicast promisc if num configured exceeds what we support */
4cd24eaf 1229 if (netdev->flags & IFF_ALLMULTI ||
a0794885
KA
1230 netdev_mc_count(netdev) > be_max_mc(adapter))
1231 goto set_mcast_promisc;
6b7c5b94 1232
fbc13f01
AK
1233 if (netdev_uc_count(netdev) != adapter->uc_macs) {
1234 struct netdev_hw_addr *ha;
1235 int i = 1; /* First slot is claimed by the Primary MAC */
1236
1237 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++) {
1238 be_cmd_pmac_del(adapter, adapter->if_handle,
1239 adapter->pmac_id[i], 0);
1240 }
1241
92bf14ab 1242 if (netdev_uc_count(netdev) > be_max_uc(adapter)) {
fbc13f01
AK
1243 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
1244 adapter->promiscuous = true;
1245 goto done;
1246 }
1247
1248 netdev_for_each_uc_addr(ha, adapter->netdev) {
1249 adapter->uc_macs++; /* First slot is for Primary MAC */
1250 be_cmd_pmac_add(adapter, (u8 *)ha->addr,
1251 adapter->if_handle,
1252 &adapter->pmac_id[adapter->uc_macs], 0);
1253 }
1254 }
1255
0fc16ebf 1256 status = be_cmd_rx_filter(adapter, IFF_MULTICAST, ON);
a0794885
KA
1257 if (!status) {
1258 if (adapter->flags & BE_FLAGS_MCAST_PROMISC)
1259 adapter->flags &= ~BE_FLAGS_MCAST_PROMISC;
1260 goto done;
0fc16ebf 1261 }
a0794885
KA
1262
1263set_mcast_promisc:
1264 if (adapter->flags & BE_FLAGS_MCAST_PROMISC)
1265 return;
1266
1267 /* Set to MCAST promisc mode if setting MULTICAST address fails
1268 * or if num configured exceeds what we support
1269 */
1270 status = be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
1271 if (!status)
1272 adapter->flags |= BE_FLAGS_MCAST_PROMISC;
24307eef
SP
1273done:
1274 return;
6b7c5b94
SP
1275}
1276
ba343c77
SB
1277static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
1278{
1279 struct be_adapter *adapter = netdev_priv(netdev);
11ac75ed 1280 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
ba343c77
SB
1281 int status;
1282
11ac75ed 1283 if (!sriov_enabled(adapter))
ba343c77
SB
1284 return -EPERM;
1285
11ac75ed 1286 if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
ba343c77
SB
1287 return -EINVAL;
1288
3c31aaf3
VV
1289 /* Proceed further only if user provided MAC is different
1290 * from active MAC
1291 */
1292 if (ether_addr_equal(mac, vf_cfg->mac_addr))
1293 return 0;
1294
3175d8c2
SP
1295 if (BEx_chip(adapter)) {
1296 be_cmd_pmac_del(adapter, vf_cfg->if_handle, vf_cfg->pmac_id,
1297 vf + 1);
ba343c77 1298
11ac75ed
SP
1299 status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
1300 &vf_cfg->pmac_id, vf + 1);
3175d8c2
SP
1301 } else {
1302 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
1303 vf + 1);
590c391d
PR
1304 }
1305
abccf23e
KA
1306 if (status) {
1307 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed: %#x",
1308 mac, vf, status);
1309 return be_cmd_status(status);
1310 }
64600ea5 1311
abccf23e
KA
1312 ether_addr_copy(vf_cfg->mac_addr, mac);
1313
1314 return 0;
ba343c77
SB
1315}
1316
64600ea5 1317static int be_get_vf_config(struct net_device *netdev, int vf,
748b539a 1318 struct ifla_vf_info *vi)
64600ea5
AK
1319{
1320 struct be_adapter *adapter = netdev_priv(netdev);
11ac75ed 1321 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
64600ea5 1322
11ac75ed 1323 if (!sriov_enabled(adapter))
64600ea5
AK
1324 return -EPERM;
1325
11ac75ed 1326 if (vf >= adapter->num_vfs)
64600ea5
AK
1327 return -EINVAL;
1328
1329 vi->vf = vf;
ed616689
SC
1330 vi->max_tx_rate = vf_cfg->tx_rate;
1331 vi->min_tx_rate = 0;
a60b3a13
AK
1332 vi->vlan = vf_cfg->vlan_tag & VLAN_VID_MASK;
1333 vi->qos = vf_cfg->vlan_tag >> VLAN_PRIO_SHIFT;
11ac75ed 1334 memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
bdce2ad7 1335 vi->linkstate = adapter->vf_cfg[vf].plink_tracking;
64600ea5
AK
1336
1337 return 0;
1338}
1339
748b539a 1340static int be_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos)
1da87b7f
AK
1341{
1342 struct be_adapter *adapter = netdev_priv(netdev);
b9fc0e53 1343 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1da87b7f
AK
1344 int status = 0;
1345
11ac75ed 1346 if (!sriov_enabled(adapter))
1da87b7f
AK
1347 return -EPERM;
1348
b9fc0e53 1349 if (vf >= adapter->num_vfs || vlan > 4095 || qos > 7)
1da87b7f
AK
1350 return -EINVAL;
1351
b9fc0e53
AK
1352 if (vlan || qos) {
1353 vlan |= qos << VLAN_PRIO_SHIFT;
c502224e 1354 if (vf_cfg->vlan_tag != vlan)
b9fc0e53
AK
1355 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
1356 vf_cfg->if_handle, 0);
1da87b7f 1357 } else {
f1f3ee1b 1358 /* Reset Transparent Vlan Tagging. */
c502224e
SK
1359 status = be_cmd_set_hsw_config(adapter, BE_RESET_VLAN_TAG_ID,
1360 vf + 1, vf_cfg->if_handle, 0);
1da87b7f
AK
1361 }
1362
abccf23e
KA
1363 if (status) {
1364 dev_err(&adapter->pdev->dev,
1365 "VLAN %d config on VF %d failed : %#x\n", vlan,
1366 vf, status);
1367 return be_cmd_status(status);
1368 }
1369
1370 vf_cfg->vlan_tag = vlan;
1371
1372 return 0;
1da87b7f
AK
1373}
1374
ed616689
SC
1375static int be_set_vf_tx_rate(struct net_device *netdev, int vf,
1376 int min_tx_rate, int max_tx_rate)
e1d18735
AK
1377{
1378 struct be_adapter *adapter = netdev_priv(netdev);
0f77ba73
RN
1379 struct device *dev = &adapter->pdev->dev;
1380 int percent_rate, status = 0;
1381 u16 link_speed = 0;
1382 u8 link_status;
e1d18735 1383
11ac75ed 1384 if (!sriov_enabled(adapter))
e1d18735
AK
1385 return -EPERM;
1386
94f434c2 1387 if (vf >= adapter->num_vfs)
e1d18735
AK
1388 return -EINVAL;
1389
ed616689
SC
1390 if (min_tx_rate)
1391 return -EINVAL;
1392
0f77ba73
RN
1393 if (!max_tx_rate)
1394 goto config_qos;
1395
1396 status = be_cmd_link_status_query(adapter, &link_speed,
1397 &link_status, 0);
1398 if (status)
1399 goto err;
1400
1401 if (!link_status) {
1402 dev_err(dev, "TX-rate setting not allowed when link is down\n");
940a3fcd 1403 status = -ENETDOWN;
0f77ba73
RN
1404 goto err;
1405 }
1406
1407 if (max_tx_rate < 100 || max_tx_rate > link_speed) {
1408 dev_err(dev, "TX-rate must be between 100 and %d Mbps\n",
1409 link_speed);
1410 status = -EINVAL;
1411 goto err;
1412 }
1413
1414 /* On Skyhawk the QOS setting must be done only as a % value */
1415 percent_rate = link_speed / 100;
1416 if (skyhawk_chip(adapter) && (max_tx_rate % percent_rate)) {
1417 dev_err(dev, "TX-rate must be a multiple of %d Mbps\n",
1418 percent_rate);
1419 status = -EINVAL;
1420 goto err;
94f434c2 1421 }
e1d18735 1422
0f77ba73
RN
1423config_qos:
1424 status = be_cmd_config_qos(adapter, max_tx_rate, link_speed, vf + 1);
e1d18735 1425 if (status)
0f77ba73
RN
1426 goto err;
1427
1428 adapter->vf_cfg[vf].tx_rate = max_tx_rate;
1429 return 0;
1430
1431err:
1432 dev_err(dev, "TX-rate setting of %dMbps on VF%d failed\n",
1433 max_tx_rate, vf);
abccf23e 1434 return be_cmd_status(status);
e1d18735 1435}
e2fb1afa 1436
bdce2ad7
SR
1437static int be_set_vf_link_state(struct net_device *netdev, int vf,
1438 int link_state)
1439{
1440 struct be_adapter *adapter = netdev_priv(netdev);
1441 int status;
1442
1443 if (!sriov_enabled(adapter))
1444 return -EPERM;
1445
1446 if (vf >= adapter->num_vfs)
1447 return -EINVAL;
1448
1449 status = be_cmd_set_logical_link_config(adapter, link_state, vf+1);
abccf23e
KA
1450 if (status) {
1451 dev_err(&adapter->pdev->dev,
1452 "Link state change on VF %d failed: %#x\n", vf, status);
1453 return be_cmd_status(status);
1454 }
bdce2ad7 1455
abccf23e
KA
1456 adapter->vf_cfg[vf].plink_tracking = link_state;
1457
1458 return 0;
bdce2ad7 1459}
e1d18735 1460
2632bafd
SP
1461static void be_aic_update(struct be_aic_obj *aic, u64 rx_pkts, u64 tx_pkts,
1462 ulong now)
6b7c5b94 1463{
2632bafd
SP
1464 aic->rx_pkts_prev = rx_pkts;
1465 aic->tx_reqs_prev = tx_pkts;
1466 aic->jiffies = now;
1467}
ac124ff9 1468
2632bafd
SP
1469static void be_eqd_update(struct be_adapter *adapter)
1470{
1471 struct be_set_eqd set_eqd[MAX_EVT_QS];
1472 int eqd, i, num = 0, start;
1473 struct be_aic_obj *aic;
1474 struct be_eq_obj *eqo;
1475 struct be_rx_obj *rxo;
1476 struct be_tx_obj *txo;
1477 u64 rx_pkts, tx_pkts;
1478 ulong now;
1479 u32 pps, delta;
10ef9ab4 1480
2632bafd
SP
1481 for_all_evt_queues(adapter, eqo, i) {
1482 aic = &adapter->aic_obj[eqo->idx];
1483 if (!aic->enable) {
1484 if (aic->jiffies)
1485 aic->jiffies = 0;
1486 eqd = aic->et_eqd;
1487 goto modify_eqd;
1488 }
6b7c5b94 1489
2632bafd
SP
1490 rxo = &adapter->rx_obj[eqo->idx];
1491 do {
57a7744e 1492 start = u64_stats_fetch_begin_irq(&rxo->stats.sync);
2632bafd 1493 rx_pkts = rxo->stats.rx_pkts;
57a7744e 1494 } while (u64_stats_fetch_retry_irq(&rxo->stats.sync, start));
10ef9ab4 1495
2632bafd
SP
1496 txo = &adapter->tx_obj[eqo->idx];
1497 do {
57a7744e 1498 start = u64_stats_fetch_begin_irq(&txo->stats.sync);
2632bafd 1499 tx_pkts = txo->stats.tx_reqs;
57a7744e 1500 } while (u64_stats_fetch_retry_irq(&txo->stats.sync, start));
6b7c5b94 1501
2632bafd
SP
1502 /* Skip, if wrapped around or first calculation */
1503 now = jiffies;
1504 if (!aic->jiffies || time_before(now, aic->jiffies) ||
1505 rx_pkts < aic->rx_pkts_prev ||
1506 tx_pkts < aic->tx_reqs_prev) {
1507 be_aic_update(aic, rx_pkts, tx_pkts, now);
1508 continue;
1509 }
1510
1511 delta = jiffies_to_msecs(now - aic->jiffies);
1512 pps = (((u32)(rx_pkts - aic->rx_pkts_prev) * 1000) / delta) +
1513 (((u32)(tx_pkts - aic->tx_reqs_prev) * 1000) / delta);
1514 eqd = (pps / 15000) << 2;
10ef9ab4 1515
2632bafd
SP
1516 if (eqd < 8)
1517 eqd = 0;
1518 eqd = min_t(u32, eqd, aic->max_eqd);
1519 eqd = max_t(u32, eqd, aic->min_eqd);
1520
1521 be_aic_update(aic, rx_pkts, tx_pkts, now);
10ef9ab4 1522modify_eqd:
2632bafd
SP
1523 if (eqd != aic->prev_eqd) {
1524 set_eqd[num].delay_multiplier = (eqd * 65)/100;
1525 set_eqd[num].eq_id = eqo->q.id;
1526 aic->prev_eqd = eqd;
1527 num++;
1528 }
ac124ff9 1529 }
2632bafd
SP
1530
1531 if (num)
1532 be_cmd_modify_eqd(adapter, set_eqd, num);
6b7c5b94
SP
1533}
1534
3abcdeda 1535static void be_rx_stats_update(struct be_rx_obj *rxo,
748b539a 1536 struct be_rx_compl_info *rxcp)
4097f663 1537{
ac124ff9 1538 struct be_rx_stats *stats = rx_stats(rxo);
1ef78abe 1539
ab1594e9 1540 u64_stats_update_begin(&stats->sync);
3abcdeda 1541 stats->rx_compl++;
2e588f84 1542 stats->rx_bytes += rxcp->pkt_size;
3abcdeda 1543 stats->rx_pkts++;
2e588f84 1544 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
3abcdeda 1545 stats->rx_mcast_pkts++;
2e588f84 1546 if (rxcp->err)
ac124ff9 1547 stats->rx_compl_err++;
ab1594e9 1548 u64_stats_update_end(&stats->sync);
4097f663
SP
1549}
1550
2e588f84 1551static inline bool csum_passed(struct be_rx_compl_info *rxcp)
728a9972 1552{
19fad86f 1553 /* L4 checksum is not reliable for non TCP/UDP packets.
c9c47142
SP
1554 * Also ignore ipcksm for ipv6 pkts
1555 */
2e588f84 1556 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
c9c47142 1557 (rxcp->ip_csum || rxcp->ipv6) && !rxcp->err;
728a9972
AK
1558}
1559
0b0ef1d0 1560static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo)
6b7c5b94 1561{
10ef9ab4 1562 struct be_adapter *adapter = rxo->adapter;
6b7c5b94 1563 struct be_rx_page_info *rx_page_info;
3abcdeda 1564 struct be_queue_info *rxq = &rxo->q;
0b0ef1d0 1565 u16 frag_idx = rxq->tail;
6b7c5b94 1566
3abcdeda 1567 rx_page_info = &rxo->page_info_tbl[frag_idx];
6b7c5b94
SP
1568 BUG_ON(!rx_page_info->page);
1569
e50287be 1570 if (rx_page_info->last_frag) {
2b7bcebf
IV
1571 dma_unmap_page(&adapter->pdev->dev,
1572 dma_unmap_addr(rx_page_info, bus),
1573 adapter->big_page_size, DMA_FROM_DEVICE);
e50287be
SP
1574 rx_page_info->last_frag = false;
1575 } else {
1576 dma_sync_single_for_cpu(&adapter->pdev->dev,
1577 dma_unmap_addr(rx_page_info, bus),
1578 rx_frag_size, DMA_FROM_DEVICE);
205859a2 1579 }
6b7c5b94 1580
0b0ef1d0 1581 queue_tail_inc(rxq);
6b7c5b94
SP
1582 atomic_dec(&rxq->used);
1583 return rx_page_info;
1584}
1585
1586/* Throwaway the data in the Rx completion */
10ef9ab4
SP
1587static void be_rx_compl_discard(struct be_rx_obj *rxo,
1588 struct be_rx_compl_info *rxcp)
6b7c5b94 1589{
6b7c5b94 1590 struct be_rx_page_info *page_info;
2e588f84 1591 u16 i, num_rcvd = rxcp->num_rcvd;
6b7c5b94 1592
e80d9da6 1593 for (i = 0; i < num_rcvd; i++) {
0b0ef1d0 1594 page_info = get_rx_page_info(rxo);
e80d9da6
PR
1595 put_page(page_info->page);
1596 memset(page_info, 0, sizeof(*page_info));
6b7c5b94
SP
1597 }
1598}
1599
1600/*
1601 * skb_fill_rx_data forms a complete skb for an ether frame
1602 * indicated by rxcp.
1603 */
10ef9ab4
SP
1604static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
1605 struct be_rx_compl_info *rxcp)
6b7c5b94 1606{
6b7c5b94 1607 struct be_rx_page_info *page_info;
2e588f84
SP
1608 u16 i, j;
1609 u16 hdr_len, curr_frag_len, remaining;
6b7c5b94 1610 u8 *start;
6b7c5b94 1611
0b0ef1d0 1612 page_info = get_rx_page_info(rxo);
6b7c5b94
SP
1613 start = page_address(page_info->page) + page_info->page_offset;
1614 prefetch(start);
1615
1616 /* Copy data in the first descriptor of this completion */
2e588f84 1617 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
6b7c5b94 1618
6b7c5b94
SP
1619 skb->len = curr_frag_len;
1620 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
ac1ae5f3 1621 memcpy(skb->data, start, curr_frag_len);
6b7c5b94
SP
1622 /* Complete packet has now been moved to data */
1623 put_page(page_info->page);
1624 skb->data_len = 0;
1625 skb->tail += curr_frag_len;
1626 } else {
ac1ae5f3
ED
1627 hdr_len = ETH_HLEN;
1628 memcpy(skb->data, start, hdr_len);
6b7c5b94 1629 skb_shinfo(skb)->nr_frags = 1;
b061b39e 1630 skb_frag_set_page(skb, 0, page_info->page);
6b7c5b94
SP
1631 skb_shinfo(skb)->frags[0].page_offset =
1632 page_info->page_offset + hdr_len;
748b539a
SP
1633 skb_frag_size_set(&skb_shinfo(skb)->frags[0],
1634 curr_frag_len - hdr_len);
6b7c5b94 1635 skb->data_len = curr_frag_len - hdr_len;
bdb28a97 1636 skb->truesize += rx_frag_size;
6b7c5b94
SP
1637 skb->tail += hdr_len;
1638 }
205859a2 1639 page_info->page = NULL;
6b7c5b94 1640
2e588f84
SP
1641 if (rxcp->pkt_size <= rx_frag_size) {
1642 BUG_ON(rxcp->num_rcvd != 1);
1643 return;
6b7c5b94
SP
1644 }
1645
1646 /* More frags present for this completion */
2e588f84
SP
1647 remaining = rxcp->pkt_size - curr_frag_len;
1648 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
0b0ef1d0 1649 page_info = get_rx_page_info(rxo);
2e588f84 1650 curr_frag_len = min(remaining, rx_frag_size);
6b7c5b94 1651
bd46cb6c
AK
1652 /* Coalesce all frags from the same physical page in one slot */
1653 if (page_info->page_offset == 0) {
1654 /* Fresh page */
1655 j++;
b061b39e 1656 skb_frag_set_page(skb, j, page_info->page);
bd46cb6c
AK
1657 skb_shinfo(skb)->frags[j].page_offset =
1658 page_info->page_offset;
9e903e08 1659 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
bd46cb6c
AK
1660 skb_shinfo(skb)->nr_frags++;
1661 } else {
1662 put_page(page_info->page);
1663 }
1664
9e903e08 1665 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
6b7c5b94
SP
1666 skb->len += curr_frag_len;
1667 skb->data_len += curr_frag_len;
bdb28a97 1668 skb->truesize += rx_frag_size;
2e588f84 1669 remaining -= curr_frag_len;
205859a2 1670 page_info->page = NULL;
6b7c5b94 1671 }
bd46cb6c 1672 BUG_ON(j > MAX_SKB_FRAGS);
6b7c5b94
SP
1673}
1674
5be93b9a 1675/* Process the RX completion indicated by rxcp when GRO is disabled */
6384a4d0 1676static void be_rx_compl_process(struct be_rx_obj *rxo, struct napi_struct *napi,
10ef9ab4 1677 struct be_rx_compl_info *rxcp)
6b7c5b94 1678{
10ef9ab4 1679 struct be_adapter *adapter = rxo->adapter;
6332c8d3 1680 struct net_device *netdev = adapter->netdev;
6b7c5b94 1681 struct sk_buff *skb;
89420424 1682
bb349bb4 1683 skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE);
a058a632 1684 if (unlikely(!skb)) {
ac124ff9 1685 rx_stats(rxo)->rx_drops_no_skbs++;
10ef9ab4 1686 be_rx_compl_discard(rxo, rxcp);
6b7c5b94
SP
1687 return;
1688 }
1689
10ef9ab4 1690 skb_fill_rx_data(rxo, skb, rxcp);
6b7c5b94 1691
6332c8d3 1692 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
728a9972 1693 skb->ip_summed = CHECKSUM_UNNECESSARY;
c6ce2f4b
SK
1694 else
1695 skb_checksum_none_assert(skb);
6b7c5b94 1696
6332c8d3 1697 skb->protocol = eth_type_trans(skb, netdev);
aaa6daec 1698 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
10ef9ab4 1699 if (netdev->features & NETIF_F_RXHASH)
d2464c8c 1700 skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
c9c47142 1701
b6c0e89d 1702 skb->csum_level = rxcp->tunneled;
6384a4d0 1703 skb_mark_napi_id(skb, napi);
6b7c5b94 1704
343e43c0 1705 if (rxcp->vlanf)
86a9bad3 1706 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
4c5102f9
AK
1707
1708 netif_receive_skb(skb);
6b7c5b94
SP
1709}
1710
5be93b9a 1711/* Process the RX completion indicated by rxcp when GRO is enabled */
4188e7df
JH
1712static void be_rx_compl_process_gro(struct be_rx_obj *rxo,
1713 struct napi_struct *napi,
1714 struct be_rx_compl_info *rxcp)
6b7c5b94 1715{
10ef9ab4 1716 struct be_adapter *adapter = rxo->adapter;
6b7c5b94 1717 struct be_rx_page_info *page_info;
5be93b9a 1718 struct sk_buff *skb = NULL;
2e588f84
SP
1719 u16 remaining, curr_frag_len;
1720 u16 i, j;
3968fa1e 1721
10ef9ab4 1722 skb = napi_get_frags(napi);
5be93b9a 1723 if (!skb) {
10ef9ab4 1724 be_rx_compl_discard(rxo, rxcp);
5be93b9a
AK
1725 return;
1726 }
1727
2e588f84
SP
1728 remaining = rxcp->pkt_size;
1729 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
0b0ef1d0 1730 page_info = get_rx_page_info(rxo);
6b7c5b94
SP
1731
1732 curr_frag_len = min(remaining, rx_frag_size);
1733
bd46cb6c
AK
1734 /* Coalesce all frags from the same physical page in one slot */
1735 if (i == 0 || page_info->page_offset == 0) {
1736 /* First frag or Fresh page */
1737 j++;
b061b39e 1738 skb_frag_set_page(skb, j, page_info->page);
5be93b9a
AK
1739 skb_shinfo(skb)->frags[j].page_offset =
1740 page_info->page_offset;
9e903e08 1741 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
bd46cb6c
AK
1742 } else {
1743 put_page(page_info->page);
1744 }
9e903e08 1745 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
bdb28a97 1746 skb->truesize += rx_frag_size;
bd46cb6c 1747 remaining -= curr_frag_len;
6b7c5b94
SP
1748 memset(page_info, 0, sizeof(*page_info));
1749 }
bd46cb6c 1750 BUG_ON(j > MAX_SKB_FRAGS);
6b7c5b94 1751
5be93b9a 1752 skb_shinfo(skb)->nr_frags = j + 1;
2e588f84
SP
1753 skb->len = rxcp->pkt_size;
1754 skb->data_len = rxcp->pkt_size;
5be93b9a 1755 skb->ip_summed = CHECKSUM_UNNECESSARY;
aaa6daec 1756 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
4b972914 1757 if (adapter->netdev->features & NETIF_F_RXHASH)
d2464c8c 1758 skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
c9c47142 1759
b6c0e89d 1760 skb->csum_level = rxcp->tunneled;
6384a4d0 1761 skb_mark_napi_id(skb, napi);
5be93b9a 1762
343e43c0 1763 if (rxcp->vlanf)
86a9bad3 1764 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
4c5102f9 1765
10ef9ab4 1766 napi_gro_frags(napi);
2e588f84
SP
1767}
1768
10ef9ab4
SP
1769static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
1770 struct be_rx_compl_info *rxcp)
2e588f84 1771{
c3c18bc1
SP
1772 rxcp->pkt_size = GET_RX_COMPL_V1_BITS(pktsize, compl);
1773 rxcp->vlanf = GET_RX_COMPL_V1_BITS(vtp, compl);
1774 rxcp->err = GET_RX_COMPL_V1_BITS(err, compl);
1775 rxcp->tcpf = GET_RX_COMPL_V1_BITS(tcpf, compl);
1776 rxcp->udpf = GET_RX_COMPL_V1_BITS(udpf, compl);
1777 rxcp->ip_csum = GET_RX_COMPL_V1_BITS(ipcksm, compl);
1778 rxcp->l4_csum = GET_RX_COMPL_V1_BITS(l4_cksm, compl);
1779 rxcp->ipv6 = GET_RX_COMPL_V1_BITS(ip_version, compl);
1780 rxcp->num_rcvd = GET_RX_COMPL_V1_BITS(numfrags, compl);
1781 rxcp->pkt_type = GET_RX_COMPL_V1_BITS(cast_enc, compl);
1782 rxcp->rss_hash = GET_RX_COMPL_V1_BITS(rsshash, compl);
15d72184 1783 if (rxcp->vlanf) {
c3c18bc1
SP
1784 rxcp->qnq = GET_RX_COMPL_V1_BITS(qnq, compl);
1785 rxcp->vlan_tag = GET_RX_COMPL_V1_BITS(vlan_tag, compl);
15d72184 1786 }
c3c18bc1 1787 rxcp->port = GET_RX_COMPL_V1_BITS(port, compl);
c9c47142 1788 rxcp->tunneled =
c3c18bc1 1789 GET_RX_COMPL_V1_BITS(tunneled, compl);
2e588f84
SP
1790}
1791
10ef9ab4
SP
1792static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
1793 struct be_rx_compl_info *rxcp)
2e588f84 1794{
c3c18bc1
SP
1795 rxcp->pkt_size = GET_RX_COMPL_V0_BITS(pktsize, compl);
1796 rxcp->vlanf = GET_RX_COMPL_V0_BITS(vtp, compl);
1797 rxcp->err = GET_RX_COMPL_V0_BITS(err, compl);
1798 rxcp->tcpf = GET_RX_COMPL_V0_BITS(tcpf, compl);
1799 rxcp->udpf = GET_RX_COMPL_V0_BITS(udpf, compl);
1800 rxcp->ip_csum = GET_RX_COMPL_V0_BITS(ipcksm, compl);
1801 rxcp->l4_csum = GET_RX_COMPL_V0_BITS(l4_cksm, compl);
1802 rxcp->ipv6 = GET_RX_COMPL_V0_BITS(ip_version, compl);
1803 rxcp->num_rcvd = GET_RX_COMPL_V0_BITS(numfrags, compl);
1804 rxcp->pkt_type = GET_RX_COMPL_V0_BITS(cast_enc, compl);
1805 rxcp->rss_hash = GET_RX_COMPL_V0_BITS(rsshash, compl);
15d72184 1806 if (rxcp->vlanf) {
c3c18bc1
SP
1807 rxcp->qnq = GET_RX_COMPL_V0_BITS(qnq, compl);
1808 rxcp->vlan_tag = GET_RX_COMPL_V0_BITS(vlan_tag, compl);
15d72184 1809 }
c3c18bc1
SP
1810 rxcp->port = GET_RX_COMPL_V0_BITS(port, compl);
1811 rxcp->ip_frag = GET_RX_COMPL_V0_BITS(ip_frag, compl);
2e588f84
SP
1812}
1813
1814static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1815{
1816 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1817 struct be_rx_compl_info *rxcp = &rxo->rxcp;
1818 struct be_adapter *adapter = rxo->adapter;
6b7c5b94 1819
2e588f84
SP
1820 /* For checking the valid bit it is Ok to use either definition as the
1821 * valid bit is at the same position in both v0 and v1 Rx compl */
1822 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
1823 return NULL;
6b7c5b94 1824
2e588f84
SP
1825 rmb();
1826 be_dws_le_to_cpu(compl, sizeof(*compl));
6b7c5b94 1827
2e588f84 1828 if (adapter->be3_native)
10ef9ab4 1829 be_parse_rx_compl_v1(compl, rxcp);
2e588f84 1830 else
10ef9ab4 1831 be_parse_rx_compl_v0(compl, rxcp);
6b7c5b94 1832
e38b1706
SK
1833 if (rxcp->ip_frag)
1834 rxcp->l4_csum = 0;
1835
15d72184 1836 if (rxcp->vlanf) {
f93f160b
VV
1837 /* In QNQ modes, if qnq bit is not set, then the packet was
1838 * tagged only with the transparent outer vlan-tag and must
1839 * not be treated as a vlan packet by host
1840 */
1841 if (be_is_qnq_mode(adapter) && !rxcp->qnq)
15d72184 1842 rxcp->vlanf = 0;
6b7c5b94 1843
15d72184 1844 if (!lancer_chip(adapter))
3c709f8f 1845 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
6b7c5b94 1846
939cf306 1847 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
f6cbd364 1848 !test_bit(rxcp->vlan_tag, adapter->vids))
15d72184
SP
1849 rxcp->vlanf = 0;
1850 }
2e588f84
SP
1851
1852 /* As the compl has been parsed, reset it; we wont touch it again */
1853 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
6b7c5b94 1854
3abcdeda 1855 queue_tail_inc(&rxo->cq);
6b7c5b94
SP
1856 return rxcp;
1857}
1858
1829b086 1859static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
6b7c5b94 1860{
6b7c5b94 1861 u32 order = get_order(size);
1829b086 1862
6b7c5b94 1863 if (order > 0)
1829b086
ED
1864 gfp |= __GFP_COMP;
1865 return alloc_pages(gfp, order);
6b7c5b94
SP
1866}
1867
1868/*
1869 * Allocate a page, split it to fragments of size rx_frag_size and post as
1870 * receive buffers to BE
1871 */
c30d7266 1872static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp, u32 frags_needed)
6b7c5b94 1873{
3abcdeda 1874 struct be_adapter *adapter = rxo->adapter;
26d92f92 1875 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
3abcdeda 1876 struct be_queue_info *rxq = &rxo->q;
6b7c5b94 1877 struct page *pagep = NULL;
ba42fad0 1878 struct device *dev = &adapter->pdev->dev;
6b7c5b94
SP
1879 struct be_eth_rx_d *rxd;
1880 u64 page_dmaaddr = 0, frag_dmaaddr;
c30d7266 1881 u32 posted, page_offset = 0, notify = 0;
6b7c5b94 1882
3abcdeda 1883 page_info = &rxo->page_info_tbl[rxq->head];
c30d7266 1884 for (posted = 0; posted < frags_needed && !page_info->page; posted++) {
6b7c5b94 1885 if (!pagep) {
1829b086 1886 pagep = be_alloc_pages(adapter->big_page_size, gfp);
6b7c5b94 1887 if (unlikely(!pagep)) {
ac124ff9 1888 rx_stats(rxo)->rx_post_fail++;
6b7c5b94
SP
1889 break;
1890 }
ba42fad0
IV
1891 page_dmaaddr = dma_map_page(dev, pagep, 0,
1892 adapter->big_page_size,
2b7bcebf 1893 DMA_FROM_DEVICE);
ba42fad0
IV
1894 if (dma_mapping_error(dev, page_dmaaddr)) {
1895 put_page(pagep);
1896 pagep = NULL;
d3de1540 1897 adapter->drv_stats.dma_map_errors++;
ba42fad0
IV
1898 break;
1899 }
e50287be 1900 page_offset = 0;
6b7c5b94
SP
1901 } else {
1902 get_page(pagep);
e50287be 1903 page_offset += rx_frag_size;
6b7c5b94 1904 }
e50287be 1905 page_info->page_offset = page_offset;
6b7c5b94 1906 page_info->page = pagep;
6b7c5b94
SP
1907
1908 rxd = queue_head_node(rxq);
e50287be 1909 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
6b7c5b94
SP
1910 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1911 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
6b7c5b94
SP
1912
1913 /* Any space left in the current big page for another frag? */
1914 if ((page_offset + rx_frag_size + rx_frag_size) >
1915 adapter->big_page_size) {
1916 pagep = NULL;
e50287be
SP
1917 page_info->last_frag = true;
1918 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
1919 } else {
1920 dma_unmap_addr_set(page_info, bus, frag_dmaaddr);
6b7c5b94 1921 }
26d92f92
SP
1922
1923 prev_page_info = page_info;
1924 queue_head_inc(rxq);
10ef9ab4 1925 page_info = &rxo->page_info_tbl[rxq->head];
6b7c5b94 1926 }
e50287be
SP
1927
1928 /* Mark the last frag of a page when we break out of the above loop
1929 * with no more slots available in the RXQ
1930 */
1931 if (pagep) {
1932 prev_page_info->last_frag = true;
1933 dma_unmap_addr_set(prev_page_info, bus, page_dmaaddr);
1934 }
6b7c5b94
SP
1935
1936 if (posted) {
6b7c5b94 1937 atomic_add(posted, &rxq->used);
6384a4d0
SP
1938 if (rxo->rx_post_starved)
1939 rxo->rx_post_starved = false;
c30d7266
AK
1940 do {
1941 notify = min(256u, posted);
1942 be_rxq_notify(adapter, rxq->id, notify);
1943 posted -= notify;
1944 } while (posted);
ea1dae11
SP
1945 } else if (atomic_read(&rxq->used) == 0) {
1946 /* Let be_worker replenish when memory is available */
3abcdeda 1947 rxo->rx_post_starved = true;
6b7c5b94 1948 }
6b7c5b94
SP
1949}
1950
5fb379ee 1951static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
6b7c5b94 1952{
6b7c5b94
SP
1953 struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1954
1955 if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1956 return NULL;
1957
f3eb62d2 1958 rmb();
6b7c5b94
SP
1959 be_dws_le_to_cpu(txcp, sizeof(*txcp));
1960
1961 txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1962
1963 queue_tail_inc(tx_cq);
1964 return txcp;
1965}
1966
3c8def97 1967static u16 be_tx_compl_process(struct be_adapter *adapter,
748b539a 1968 struct be_tx_obj *txo, u16 last_index)
6b7c5b94 1969{
5f07b3c5 1970 struct sk_buff **sent_skbs = txo->sent_skb_list;
3c8def97 1971 struct be_queue_info *txq = &txo->q;
5f07b3c5
SP
1972 u16 frag_index, num_wrbs = 0;
1973 struct sk_buff *skb = NULL;
1974 bool unmap_skb_hdr = false;
a73b796e 1975 struct be_eth_wrb *wrb;
6b7c5b94 1976
ec43b1a6 1977 do {
5f07b3c5
SP
1978 if (sent_skbs[txq->tail]) {
1979 /* Free skb from prev req */
1980 if (skb)
1981 dev_consume_skb_any(skb);
1982 skb = sent_skbs[txq->tail];
1983 sent_skbs[txq->tail] = NULL;
1984 queue_tail_inc(txq); /* skip hdr wrb */
1985 num_wrbs++;
1986 unmap_skb_hdr = true;
1987 }
a73b796e 1988 wrb = queue_tail_node(txq);
5f07b3c5 1989 frag_index = txq->tail;
2b7bcebf 1990 unmap_tx_frag(&adapter->pdev->dev, wrb,
5f07b3c5 1991 (unmap_skb_hdr && skb_headlen(skb)));
ec43b1a6 1992 unmap_skb_hdr = false;
6b7c5b94 1993 queue_tail_inc(txq);
5f07b3c5
SP
1994 num_wrbs++;
1995 } while (frag_index != last_index);
1996 dev_consume_skb_any(skb);
6b7c5b94 1997
4d586b82 1998 return num_wrbs;
6b7c5b94
SP
1999}
2000
10ef9ab4
SP
2001/* Return the number of events in the event queue */
2002static inline int events_get(struct be_eq_obj *eqo)
859b1e4e 2003{
10ef9ab4
SP
2004 struct be_eq_entry *eqe;
2005 int num = 0;
859b1e4e 2006
10ef9ab4
SP
2007 do {
2008 eqe = queue_tail_node(&eqo->q);
2009 if (eqe->evt == 0)
2010 break;
859b1e4e 2011
10ef9ab4
SP
2012 rmb();
2013 eqe->evt = 0;
2014 num++;
2015 queue_tail_inc(&eqo->q);
2016 } while (true);
2017
2018 return num;
859b1e4e
SP
2019}
2020
10ef9ab4
SP
2021/* Leaves the EQ is disarmed state */
2022static void be_eq_clean(struct be_eq_obj *eqo)
859b1e4e 2023{
10ef9ab4 2024 int num = events_get(eqo);
859b1e4e 2025
10ef9ab4 2026 be_eq_notify(eqo->adapter, eqo->q.id, false, true, num);
859b1e4e
SP
2027}
2028
10ef9ab4 2029static void be_rx_cq_clean(struct be_rx_obj *rxo)
6b7c5b94
SP
2030{
2031 struct be_rx_page_info *page_info;
3abcdeda
SP
2032 struct be_queue_info *rxq = &rxo->q;
2033 struct be_queue_info *rx_cq = &rxo->cq;
2e588f84 2034 struct be_rx_compl_info *rxcp;
d23e946c
SP
2035 struct be_adapter *adapter = rxo->adapter;
2036 int flush_wait = 0;
6b7c5b94 2037
d23e946c
SP
2038 /* Consume pending rx completions.
2039 * Wait for the flush completion (identified by zero num_rcvd)
2040 * to arrive. Notify CQ even when there are no more CQ entries
2041 * for HW to flush partially coalesced CQ entries.
2042 * In Lancer, there is no need to wait for flush compl.
2043 */
2044 for (;;) {
2045 rxcp = be_rx_compl_get(rxo);
ddf1169f 2046 if (!rxcp) {
d23e946c
SP
2047 if (lancer_chip(adapter))
2048 break;
2049
2050 if (flush_wait++ > 10 || be_hw_error(adapter)) {
2051 dev_warn(&adapter->pdev->dev,
2052 "did not receive flush compl\n");
2053 break;
2054 }
2055 be_cq_notify(adapter, rx_cq->id, true, 0);
2056 mdelay(1);
2057 } else {
2058 be_rx_compl_discard(rxo, rxcp);
3f5dffe6 2059 be_cq_notify(adapter, rx_cq->id, false, 1);
d23e946c
SP
2060 if (rxcp->num_rcvd == 0)
2061 break;
2062 }
6b7c5b94
SP
2063 }
2064
d23e946c
SP
2065 /* After cleanup, leave the CQ in unarmed state */
2066 be_cq_notify(adapter, rx_cq->id, false, 0);
2067
2068 /* Then free posted rx buffers that were not used */
0b0ef1d0
SR
2069 while (atomic_read(&rxq->used) > 0) {
2070 page_info = get_rx_page_info(rxo);
6b7c5b94
SP
2071 put_page(page_info->page);
2072 memset(page_info, 0, sizeof(*page_info));
2073 }
2074 BUG_ON(atomic_read(&rxq->used));
5f820b6c
KA
2075 rxq->tail = 0;
2076 rxq->head = 0;
6b7c5b94
SP
2077}
2078
0ae57bb3 2079static void be_tx_compl_clean(struct be_adapter *adapter)
6b7c5b94 2080{
5f07b3c5
SP
2081 u16 end_idx, notified_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
2082 struct device *dev = &adapter->pdev->dev;
0ae57bb3
SP
2083 struct be_tx_obj *txo;
2084 struct be_queue_info *txq;
a8e9179a 2085 struct be_eth_tx_compl *txcp;
0ae57bb3 2086 int i, pending_txqs;
a8e9179a 2087
1a3d0717 2088 /* Stop polling for compls when HW has been silent for 10ms */
a8e9179a 2089 do {
0ae57bb3
SP
2090 pending_txqs = adapter->num_tx_qs;
2091
2092 for_all_tx_queues(adapter, txo, i) {
1a3d0717
VV
2093 cmpl = 0;
2094 num_wrbs = 0;
0ae57bb3
SP
2095 txq = &txo->q;
2096 while ((txcp = be_tx_compl_get(&txo->cq))) {
c3c18bc1 2097 end_idx = GET_TX_COMPL_BITS(wrb_index, txcp);
0ae57bb3
SP
2098 num_wrbs += be_tx_compl_process(adapter, txo,
2099 end_idx);
2100 cmpl++;
2101 }
2102 if (cmpl) {
2103 be_cq_notify(adapter, txo->cq.id, false, cmpl);
2104 atomic_sub(num_wrbs, &txq->used);
1a3d0717 2105 timeo = 0;
0ae57bb3 2106 }
5f07b3c5 2107 if (atomic_read(&txq->used) == txo->pend_wrb_cnt)
0ae57bb3 2108 pending_txqs--;
a8e9179a
SP
2109 }
2110
1a3d0717 2111 if (pending_txqs == 0 || ++timeo > 10 || be_hw_error(adapter))
a8e9179a
SP
2112 break;
2113
2114 mdelay(1);
2115 } while (true);
2116
5f07b3c5 2117 /* Free enqueued TX that was never notified to HW */
0ae57bb3
SP
2118 for_all_tx_queues(adapter, txo, i) {
2119 txq = &txo->q;
0ae57bb3 2120
5f07b3c5
SP
2121 if (atomic_read(&txq->used)) {
2122 dev_info(dev, "txq%d: cleaning %d pending tx-wrbs\n",
2123 i, atomic_read(&txq->used));
2124 notified_idx = txq->tail;
0ae57bb3 2125 end_idx = txq->tail;
5f07b3c5
SP
2126 index_adv(&end_idx, atomic_read(&txq->used) - 1,
2127 txq->len);
2128 /* Use the tx-compl process logic to handle requests
2129 * that were not sent to the HW.
2130 */
0ae57bb3
SP
2131 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
2132 atomic_sub(num_wrbs, &txq->used);
5f07b3c5
SP
2133 BUG_ON(atomic_read(&txq->used));
2134 txo->pend_wrb_cnt = 0;
2135 /* Since hw was never notified of these requests,
2136 * reset TXQ indices
2137 */
2138 txq->head = notified_idx;
2139 txq->tail = notified_idx;
0ae57bb3 2140 }
b03388d6 2141 }
6b7c5b94
SP
2142}
2143
10ef9ab4
SP
2144static void be_evt_queues_destroy(struct be_adapter *adapter)
2145{
2146 struct be_eq_obj *eqo;
2147 int i;
2148
2149 for_all_evt_queues(adapter, eqo, i) {
19d59aa7
PR
2150 if (eqo->q.created) {
2151 be_eq_clean(eqo);
10ef9ab4 2152 be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
6384a4d0 2153 napi_hash_del(&eqo->napi);
68d7bdcb 2154 netif_napi_del(&eqo->napi);
19d59aa7 2155 }
10ef9ab4
SP
2156 be_queue_free(adapter, &eqo->q);
2157 }
2158}
2159
2160static int be_evt_queues_create(struct be_adapter *adapter)
2161{
2162 struct be_queue_info *eq;
2163 struct be_eq_obj *eqo;
2632bafd 2164 struct be_aic_obj *aic;
10ef9ab4
SP
2165 int i, rc;
2166
92bf14ab
SP
2167 adapter->num_evt_qs = min_t(u16, num_irqs(adapter),
2168 adapter->cfg_num_qs);
10ef9ab4
SP
2169
2170 for_all_evt_queues(adapter, eqo, i) {
68d7bdcb
SP
2171 netif_napi_add(adapter->netdev, &eqo->napi, be_poll,
2172 BE_NAPI_WEIGHT);
6384a4d0 2173 napi_hash_add(&eqo->napi);
2632bafd 2174 aic = &adapter->aic_obj[i];
10ef9ab4 2175 eqo->adapter = adapter;
10ef9ab4 2176 eqo->idx = i;
2632bafd
SP
2177 aic->max_eqd = BE_MAX_EQD;
2178 aic->enable = true;
10ef9ab4
SP
2179
2180 eq = &eqo->q;
2181 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
748b539a 2182 sizeof(struct be_eq_entry));
10ef9ab4
SP
2183 if (rc)
2184 return rc;
2185
f2f781a7 2186 rc = be_cmd_eq_create(adapter, eqo);
10ef9ab4
SP
2187 if (rc)
2188 return rc;
2189 }
1cfafab9 2190 return 0;
10ef9ab4
SP
2191}
2192
5fb379ee
SP
2193static void be_mcc_queues_destroy(struct be_adapter *adapter)
2194{
2195 struct be_queue_info *q;
5fb379ee 2196
8788fdc2 2197 q = &adapter->mcc_obj.q;
5fb379ee 2198 if (q->created)
8788fdc2 2199 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
5fb379ee
SP
2200 be_queue_free(adapter, q);
2201
8788fdc2 2202 q = &adapter->mcc_obj.cq;
5fb379ee 2203 if (q->created)
8788fdc2 2204 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
5fb379ee
SP
2205 be_queue_free(adapter, q);
2206}
2207
2208/* Must be called only after TX qs are created as MCC shares TX EQ */
2209static int be_mcc_queues_create(struct be_adapter *adapter)
2210{
2211 struct be_queue_info *q, *cq;
5fb379ee 2212
8788fdc2 2213 cq = &adapter->mcc_obj.cq;
5fb379ee 2214 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
748b539a 2215 sizeof(struct be_mcc_compl)))
5fb379ee
SP
2216 goto err;
2217
10ef9ab4
SP
2218 /* Use the default EQ for MCC completions */
2219 if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0))
5fb379ee
SP
2220 goto mcc_cq_free;
2221
8788fdc2 2222 q = &adapter->mcc_obj.q;
5fb379ee
SP
2223 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
2224 goto mcc_cq_destroy;
2225
8788fdc2 2226 if (be_cmd_mccq_create(adapter, q, cq))
5fb379ee
SP
2227 goto mcc_q_free;
2228
2229 return 0;
2230
2231mcc_q_free:
2232 be_queue_free(adapter, q);
2233mcc_cq_destroy:
8788fdc2 2234 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
5fb379ee
SP
2235mcc_cq_free:
2236 be_queue_free(adapter, cq);
2237err:
2238 return -1;
2239}
2240
6b7c5b94
SP
2241static void be_tx_queues_destroy(struct be_adapter *adapter)
2242{
2243 struct be_queue_info *q;
3c8def97
SP
2244 struct be_tx_obj *txo;
2245 u8 i;
6b7c5b94 2246
3c8def97
SP
2247 for_all_tx_queues(adapter, txo, i) {
2248 q = &txo->q;
2249 if (q->created)
2250 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
2251 be_queue_free(adapter, q);
6b7c5b94 2252
3c8def97
SP
2253 q = &txo->cq;
2254 if (q->created)
2255 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2256 be_queue_free(adapter, q);
2257 }
6b7c5b94
SP
2258}
2259
7707133c 2260static int be_tx_qs_create(struct be_adapter *adapter)
6b7c5b94 2261{
10ef9ab4 2262 struct be_queue_info *cq, *eq;
3c8def97 2263 struct be_tx_obj *txo;
92bf14ab 2264 int status, i;
6b7c5b94 2265
92bf14ab 2266 adapter->num_tx_qs = min(adapter->num_evt_qs, be_max_txqs(adapter));
dafc0fe3 2267
10ef9ab4
SP
2268 for_all_tx_queues(adapter, txo, i) {
2269 cq = &txo->cq;
2270 status = be_queue_alloc(adapter, cq, TX_CQ_LEN,
2271 sizeof(struct be_eth_tx_compl));
2272 if (status)
2273 return status;
3c8def97 2274
827da44c
JS
2275 u64_stats_init(&txo->stats.sync);
2276 u64_stats_init(&txo->stats.sync_compl);
2277
10ef9ab4
SP
2278 /* If num_evt_qs is less than num_tx_qs, then more than
2279 * one txq share an eq
2280 */
2281 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2282 status = be_cmd_cq_create(adapter, cq, eq, false, 3);
2283 if (status)
2284 return status;
6b7c5b94 2285
10ef9ab4
SP
2286 status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
2287 sizeof(struct be_eth_wrb));
2288 if (status)
2289 return status;
6b7c5b94 2290
94d73aaa 2291 status = be_cmd_txq_create(adapter, txo);
10ef9ab4
SP
2292 if (status)
2293 return status;
3c8def97 2294 }
6b7c5b94 2295
d379142b
SP
2296 dev_info(&adapter->pdev->dev, "created %d TX queue(s)\n",
2297 adapter->num_tx_qs);
10ef9ab4 2298 return 0;
6b7c5b94
SP
2299}
2300
10ef9ab4 2301static void be_rx_cqs_destroy(struct be_adapter *adapter)
6b7c5b94
SP
2302{
2303 struct be_queue_info *q;
3abcdeda
SP
2304 struct be_rx_obj *rxo;
2305 int i;
2306
2307 for_all_rx_queues(adapter, rxo, i) {
3abcdeda
SP
2308 q = &rxo->cq;
2309 if (q->created)
2310 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2311 be_queue_free(adapter, q);
ac6a0c4a
SP
2312 }
2313}
2314
10ef9ab4 2315static int be_rx_cqs_create(struct be_adapter *adapter)
6b7c5b94 2316{
10ef9ab4 2317 struct be_queue_info *eq, *cq;
3abcdeda
SP
2318 struct be_rx_obj *rxo;
2319 int rc, i;
6b7c5b94 2320
92bf14ab
SP
2321 /* We can create as many RSS rings as there are EQs. */
2322 adapter->num_rx_qs = adapter->num_evt_qs;
2323
2324 /* We'll use RSS only if atleast 2 RSS rings are supported.
2325 * When RSS is used, we'll need a default RXQ for non-IP traffic.
10ef9ab4 2326 */
92bf14ab
SP
2327 if (adapter->num_rx_qs > 1)
2328 adapter->num_rx_qs++;
2329
6b7c5b94 2330 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
3abcdeda
SP
2331 for_all_rx_queues(adapter, rxo, i) {
2332 rxo->adapter = adapter;
3abcdeda
SP
2333 cq = &rxo->cq;
2334 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
748b539a 2335 sizeof(struct be_eth_rx_compl));
3abcdeda 2336 if (rc)
10ef9ab4 2337 return rc;
3abcdeda 2338
827da44c 2339 u64_stats_init(&rxo->stats.sync);
10ef9ab4
SP
2340 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2341 rc = be_cmd_cq_create(adapter, cq, eq, false, 3);
3abcdeda 2342 if (rc)
10ef9ab4 2343 return rc;
3abcdeda 2344 }
6b7c5b94 2345
d379142b
SP
2346 dev_info(&adapter->pdev->dev,
2347 "created %d RSS queue(s) and 1 default RX queue\n",
2348 adapter->num_rx_qs - 1);
10ef9ab4 2349 return 0;
b628bde2
SP
2350}
2351
6b7c5b94
SP
2352static irqreturn_t be_intx(int irq, void *dev)
2353{
e49cc34f
SP
2354 struct be_eq_obj *eqo = dev;
2355 struct be_adapter *adapter = eqo->adapter;
2356 int num_evts = 0;
6b7c5b94 2357
d0b9cec3
SP
2358 /* IRQ is not expected when NAPI is scheduled as the EQ
2359 * will not be armed.
2360 * But, this can happen on Lancer INTx where it takes
2361 * a while to de-assert INTx or in BE2 where occasionaly
2362 * an interrupt may be raised even when EQ is unarmed.
2363 * If NAPI is already scheduled, then counting & notifying
2364 * events will orphan them.
e49cc34f 2365 */
d0b9cec3 2366 if (napi_schedule_prep(&eqo->napi)) {
e49cc34f 2367 num_evts = events_get(eqo);
d0b9cec3
SP
2368 __napi_schedule(&eqo->napi);
2369 if (num_evts)
2370 eqo->spurious_intr = 0;
2371 }
2372 be_eq_notify(adapter, eqo->q.id, false, true, num_evts);
e49cc34f 2373
d0b9cec3
SP
2374 /* Return IRQ_HANDLED only for the the first spurious intr
2375 * after a valid intr to stop the kernel from branding
2376 * this irq as a bad one!
e49cc34f 2377 */
d0b9cec3
SP
2378 if (num_evts || eqo->spurious_intr++ == 0)
2379 return IRQ_HANDLED;
2380 else
2381 return IRQ_NONE;
6b7c5b94
SP
2382}
2383
10ef9ab4 2384static irqreturn_t be_msix(int irq, void *dev)
6b7c5b94 2385{
10ef9ab4 2386 struct be_eq_obj *eqo = dev;
6b7c5b94 2387
0b545a62
SP
2388 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
2389 napi_schedule(&eqo->napi);
6b7c5b94
SP
2390 return IRQ_HANDLED;
2391}
2392
2e588f84 2393static inline bool do_gro(struct be_rx_compl_info *rxcp)
6b7c5b94 2394{
e38b1706 2395 return (rxcp->tcpf && !rxcp->err && rxcp->l4_csum) ? true : false;
6b7c5b94
SP
2396}
2397
10ef9ab4 2398static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
748b539a 2399 int budget, int polling)
6b7c5b94 2400{
3abcdeda
SP
2401 struct be_adapter *adapter = rxo->adapter;
2402 struct be_queue_info *rx_cq = &rxo->cq;
2e588f84 2403 struct be_rx_compl_info *rxcp;
6b7c5b94 2404 u32 work_done;
c30d7266 2405 u32 frags_consumed = 0;
6b7c5b94
SP
2406
2407 for (work_done = 0; work_done < budget; work_done++) {
3abcdeda 2408 rxcp = be_rx_compl_get(rxo);
6b7c5b94
SP
2409 if (!rxcp)
2410 break;
2411
12004ae9
SP
2412 /* Is it a flush compl that has no data */
2413 if (unlikely(rxcp->num_rcvd == 0))
2414 goto loop_continue;
2415
2416 /* Discard compl with partial DMA Lancer B0 */
2417 if (unlikely(!rxcp->pkt_size)) {
10ef9ab4 2418 be_rx_compl_discard(rxo, rxcp);
12004ae9
SP
2419 goto loop_continue;
2420 }
2421
2422 /* On BE drop pkts that arrive due to imperfect filtering in
2423 * promiscuous mode on some skews
2424 */
2425 if (unlikely(rxcp->port != adapter->port_num &&
748b539a 2426 !lancer_chip(adapter))) {
10ef9ab4 2427 be_rx_compl_discard(rxo, rxcp);
12004ae9 2428 goto loop_continue;
64642811 2429 }
009dd872 2430
6384a4d0
SP
2431 /* Don't do gro when we're busy_polling */
2432 if (do_gro(rxcp) && polling != BUSY_POLLING)
10ef9ab4 2433 be_rx_compl_process_gro(rxo, napi, rxcp);
12004ae9 2434 else
6384a4d0
SP
2435 be_rx_compl_process(rxo, napi, rxcp);
2436
12004ae9 2437loop_continue:
c30d7266 2438 frags_consumed += rxcp->num_rcvd;
2e588f84 2439 be_rx_stats_update(rxo, rxcp);
6b7c5b94
SP
2440 }
2441
10ef9ab4
SP
2442 if (work_done) {
2443 be_cq_notify(adapter, rx_cq->id, true, work_done);
9372cacb 2444
6384a4d0
SP
2445 /* When an rx-obj gets into post_starved state, just
2446 * let be_worker do the posting.
2447 */
2448 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM &&
2449 !rxo->rx_post_starved)
c30d7266
AK
2450 be_post_rx_frags(rxo, GFP_ATOMIC,
2451 max_t(u32, MAX_RX_POST,
2452 frags_consumed));
6b7c5b94 2453 }
10ef9ab4 2454
6b7c5b94
SP
2455 return work_done;
2456}
2457
512bb8a2
KA
2458static inline void be_update_tx_err(struct be_tx_obj *txo, u32 status)
2459{
2460 switch (status) {
2461 case BE_TX_COMP_HDR_PARSE_ERR:
2462 tx_stats(txo)->tx_hdr_parse_err++;
2463 break;
2464 case BE_TX_COMP_NDMA_ERR:
2465 tx_stats(txo)->tx_dma_err++;
2466 break;
2467 case BE_TX_COMP_ACL_ERR:
2468 tx_stats(txo)->tx_spoof_check_err++;
2469 break;
2470 }
2471}
2472
2473static inline void lancer_update_tx_err(struct be_tx_obj *txo, u32 status)
2474{
2475 switch (status) {
2476 case LANCER_TX_COMP_LSO_ERR:
2477 tx_stats(txo)->tx_tso_err++;
2478 break;
2479 case LANCER_TX_COMP_HSW_DROP_MAC_ERR:
2480 case LANCER_TX_COMP_HSW_DROP_VLAN_ERR:
2481 tx_stats(txo)->tx_spoof_check_err++;
2482 break;
2483 case LANCER_TX_COMP_QINQ_ERR:
2484 tx_stats(txo)->tx_qinq_err++;
2485 break;
2486 case LANCER_TX_COMP_PARITY_ERR:
2487 tx_stats(txo)->tx_internal_parity_err++;
2488 break;
2489 case LANCER_TX_COMP_DMA_ERR:
2490 tx_stats(txo)->tx_dma_err++;
2491 break;
2492 }
2493}
2494
c8f64615
SP
2495static void be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
2496 int idx)
6b7c5b94 2497{
6b7c5b94 2498 struct be_eth_tx_compl *txcp;
c8f64615 2499 int num_wrbs = 0, work_done = 0;
512bb8a2 2500 u32 compl_status;
c8f64615
SP
2501 u16 last_idx;
2502
2503 while ((txcp = be_tx_compl_get(&txo->cq))) {
2504 last_idx = GET_TX_COMPL_BITS(wrb_index, txcp);
2505 num_wrbs += be_tx_compl_process(adapter, txo, last_idx);
2506 work_done++;
3c8def97 2507
512bb8a2
KA
2508 compl_status = GET_TX_COMPL_BITS(status, txcp);
2509 if (compl_status) {
2510 if (lancer_chip(adapter))
2511 lancer_update_tx_err(txo, compl_status);
2512 else
2513 be_update_tx_err(txo, compl_status);
2514 }
10ef9ab4 2515 }
6b7c5b94 2516
10ef9ab4
SP
2517 if (work_done) {
2518 be_cq_notify(adapter, txo->cq.id, true, work_done);
2519 atomic_sub(num_wrbs, &txo->q.used);
3c8def97 2520
10ef9ab4
SP
2521 /* As Tx wrbs have been freed up, wake up netdev queue
2522 * if it was stopped due to lack of tx wrbs. */
2523 if (__netif_subqueue_stopped(adapter->netdev, idx) &&
748b539a 2524 atomic_read(&txo->q.used) < txo->q.len / 2) {
10ef9ab4 2525 netif_wake_subqueue(adapter->netdev, idx);
3c8def97 2526 }
10ef9ab4
SP
2527
2528 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
2529 tx_stats(txo)->tx_compl += work_done;
2530 u64_stats_update_end(&tx_stats(txo)->sync_compl);
6b7c5b94 2531 }
10ef9ab4 2532}
6b7c5b94 2533
f7062ee5
SP
2534#ifdef CONFIG_NET_RX_BUSY_POLL
2535static inline bool be_lock_napi(struct be_eq_obj *eqo)
2536{
2537 bool status = true;
2538
2539 spin_lock(&eqo->lock); /* BH is already disabled */
2540 if (eqo->state & BE_EQ_LOCKED) {
2541 WARN_ON(eqo->state & BE_EQ_NAPI);
2542 eqo->state |= BE_EQ_NAPI_YIELD;
2543 status = false;
2544 } else {
2545 eqo->state = BE_EQ_NAPI;
2546 }
2547 spin_unlock(&eqo->lock);
2548 return status;
2549}
2550
2551static inline void be_unlock_napi(struct be_eq_obj *eqo)
2552{
2553 spin_lock(&eqo->lock); /* BH is already disabled */
2554
2555 WARN_ON(eqo->state & (BE_EQ_POLL | BE_EQ_NAPI_YIELD));
2556 eqo->state = BE_EQ_IDLE;
2557
2558 spin_unlock(&eqo->lock);
2559}
2560
2561static inline bool be_lock_busy_poll(struct be_eq_obj *eqo)
2562{
2563 bool status = true;
2564
2565 spin_lock_bh(&eqo->lock);
2566 if (eqo->state & BE_EQ_LOCKED) {
2567 eqo->state |= BE_EQ_POLL_YIELD;
2568 status = false;
2569 } else {
2570 eqo->state |= BE_EQ_POLL;
2571 }
2572 spin_unlock_bh(&eqo->lock);
2573 return status;
2574}
2575
2576static inline void be_unlock_busy_poll(struct be_eq_obj *eqo)
2577{
2578 spin_lock_bh(&eqo->lock);
2579
2580 WARN_ON(eqo->state & (BE_EQ_NAPI));
2581 eqo->state = BE_EQ_IDLE;
2582
2583 spin_unlock_bh(&eqo->lock);
2584}
2585
2586static inline void be_enable_busy_poll(struct be_eq_obj *eqo)
2587{
2588 spin_lock_init(&eqo->lock);
2589 eqo->state = BE_EQ_IDLE;
2590}
2591
2592static inline void be_disable_busy_poll(struct be_eq_obj *eqo)
2593{
2594 local_bh_disable();
2595
2596 /* It's enough to just acquire napi lock on the eqo to stop
2597 * be_busy_poll() from processing any queueus.
2598 */
2599 while (!be_lock_napi(eqo))
2600 mdelay(1);
2601
2602 local_bh_enable();
2603}
2604
2605#else /* CONFIG_NET_RX_BUSY_POLL */
2606
2607static inline bool be_lock_napi(struct be_eq_obj *eqo)
2608{
2609 return true;
2610}
2611
2612static inline void be_unlock_napi(struct be_eq_obj *eqo)
2613{
2614}
2615
2616static inline bool be_lock_busy_poll(struct be_eq_obj *eqo)
2617{
2618 return false;
2619}
2620
2621static inline void be_unlock_busy_poll(struct be_eq_obj *eqo)
2622{
2623}
2624
2625static inline void be_enable_busy_poll(struct be_eq_obj *eqo)
2626{
2627}
2628
2629static inline void be_disable_busy_poll(struct be_eq_obj *eqo)
2630{
2631}
2632#endif /* CONFIG_NET_RX_BUSY_POLL */
2633
68d7bdcb 2634int be_poll(struct napi_struct *napi, int budget)
10ef9ab4
SP
2635{
2636 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2637 struct be_adapter *adapter = eqo->adapter;
0b545a62 2638 int max_work = 0, work, i, num_evts;
6384a4d0 2639 struct be_rx_obj *rxo;
a4906ea0 2640 struct be_tx_obj *txo;
f31e50a8 2641
0b545a62
SP
2642 num_evts = events_get(eqo);
2643
a4906ea0
SP
2644 for_all_tx_queues_on_eq(adapter, eqo, txo, i)
2645 be_process_tx(adapter, txo, i);
f31e50a8 2646
6384a4d0
SP
2647 if (be_lock_napi(eqo)) {
2648 /* This loop will iterate twice for EQ0 in which
2649 * completions of the last RXQ (default one) are also processed
2650 * For other EQs the loop iterates only once
2651 */
2652 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
2653 work = be_process_rx(rxo, napi, budget, NAPI_POLLING);
2654 max_work = max(work, max_work);
2655 }
2656 be_unlock_napi(eqo);
2657 } else {
2658 max_work = budget;
10ef9ab4 2659 }
6b7c5b94 2660
10ef9ab4
SP
2661 if (is_mcc_eqo(eqo))
2662 be_process_mcc(adapter);
93c86700 2663
10ef9ab4
SP
2664 if (max_work < budget) {
2665 napi_complete(napi);
0b545a62 2666 be_eq_notify(adapter, eqo->q.id, true, false, num_evts);
10ef9ab4
SP
2667 } else {
2668 /* As we'll continue in polling mode, count and clear events */
0b545a62 2669 be_eq_notify(adapter, eqo->q.id, false, false, num_evts);
93c86700 2670 }
10ef9ab4 2671 return max_work;
6b7c5b94
SP
2672}
2673
6384a4d0
SP
2674#ifdef CONFIG_NET_RX_BUSY_POLL
2675static int be_busy_poll(struct napi_struct *napi)
2676{
2677 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2678 struct be_adapter *adapter = eqo->adapter;
2679 struct be_rx_obj *rxo;
2680 int i, work = 0;
2681
2682 if (!be_lock_busy_poll(eqo))
2683 return LL_FLUSH_BUSY;
2684
2685 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
2686 work = be_process_rx(rxo, napi, 4, BUSY_POLLING);
2687 if (work)
2688 break;
2689 }
2690
2691 be_unlock_busy_poll(eqo);
2692 return work;
2693}
2694#endif
2695
f67ef7ba 2696void be_detect_error(struct be_adapter *adapter)
7c185276 2697{
e1cfb67a
PR
2698 u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
2699 u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
7c185276 2700 u32 i;
eb0eecc1
SK
2701 bool error_detected = false;
2702 struct device *dev = &adapter->pdev->dev;
2703 struct net_device *netdev = adapter->netdev;
7c185276 2704
d23e946c 2705 if (be_hw_error(adapter))
72f02485
SP
2706 return;
2707
e1cfb67a
PR
2708 if (lancer_chip(adapter)) {
2709 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2710 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2711 sliport_err1 = ioread32(adapter->db +
748b539a 2712 SLIPORT_ERROR1_OFFSET);
e1cfb67a 2713 sliport_err2 = ioread32(adapter->db +
748b539a 2714 SLIPORT_ERROR2_OFFSET);
eb0eecc1
SK
2715 adapter->hw_error = true;
2716 /* Do not log error messages if its a FW reset */
2717 if (sliport_err1 == SLIPORT_ERROR_FW_RESET1 &&
2718 sliport_err2 == SLIPORT_ERROR_FW_RESET2) {
2719 dev_info(dev, "Firmware update in progress\n");
2720 } else {
2721 error_detected = true;
2722 dev_err(dev, "Error detected in the card\n");
2723 dev_err(dev, "ERR: sliport status 0x%x\n",
2724 sliport_status);
2725 dev_err(dev, "ERR: sliport error1 0x%x\n",
2726 sliport_err1);
2727 dev_err(dev, "ERR: sliport error2 0x%x\n",
2728 sliport_err2);
2729 }
e1cfb67a
PR
2730 }
2731 } else {
2732 pci_read_config_dword(adapter->pdev,
748b539a 2733 PCICFG_UE_STATUS_LOW, &ue_lo);
e1cfb67a 2734 pci_read_config_dword(adapter->pdev,
748b539a 2735 PCICFG_UE_STATUS_HIGH, &ue_hi);
e1cfb67a 2736 pci_read_config_dword(adapter->pdev,
748b539a 2737 PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask);
e1cfb67a 2738 pci_read_config_dword(adapter->pdev,
748b539a 2739 PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
e1cfb67a 2740
f67ef7ba
PR
2741 ue_lo = (ue_lo & ~ue_lo_mask);
2742 ue_hi = (ue_hi & ~ue_hi_mask);
7c185276 2743
eb0eecc1
SK
2744 /* On certain platforms BE hardware can indicate spurious UEs.
2745 * Allow HW to stop working completely in case of a real UE.
2746 * Hence not setting the hw_error for UE detection.
2747 */
f67ef7ba 2748
eb0eecc1
SK
2749 if (ue_lo || ue_hi) {
2750 error_detected = true;
2751 dev_err(dev,
2752 "Unrecoverable Error detected in the adapter");
2753 dev_err(dev, "Please reboot server to recover");
2754 if (skyhawk_chip(adapter))
2755 adapter->hw_error = true;
2756 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
2757 if (ue_lo & 1)
2758 dev_err(dev, "UE: %s bit set\n",
2759 ue_status_low_desc[i]);
2760 }
2761 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
2762 if (ue_hi & 1)
2763 dev_err(dev, "UE: %s bit set\n",
2764 ue_status_hi_desc[i]);
2765 }
7c185276
AK
2766 }
2767 }
eb0eecc1
SK
2768 if (error_detected)
2769 netif_carrier_off(netdev);
7c185276
AK
2770}
2771
8d56ff11
SP
2772static void be_msix_disable(struct be_adapter *adapter)
2773{
ac6a0c4a 2774 if (msix_enabled(adapter)) {
8d56ff11 2775 pci_disable_msix(adapter->pdev);
ac6a0c4a 2776 adapter->num_msix_vec = 0;
68d7bdcb 2777 adapter->num_msix_roce_vec = 0;
3abcdeda
SP
2778 }
2779}
2780
c2bba3df 2781static int be_msix_enable(struct be_adapter *adapter)
6b7c5b94 2782{
7dc4c064 2783 int i, num_vec;
d379142b 2784 struct device *dev = &adapter->pdev->dev;
6b7c5b94 2785
92bf14ab
SP
2786 /* If RoCE is supported, program the max number of NIC vectors that
2787 * may be configured via set-channels, along with vectors needed for
2788 * RoCe. Else, just program the number we'll use initially.
2789 */
2790 if (be_roce_supported(adapter))
2791 num_vec = min_t(int, 2 * be_max_eqs(adapter),
2792 2 * num_online_cpus());
2793 else
2794 num_vec = adapter->cfg_num_qs;
3abcdeda 2795
ac6a0c4a 2796 for (i = 0; i < num_vec; i++)
6b7c5b94
SP
2797 adapter->msix_entries[i].entry = i;
2798
7dc4c064
AG
2799 num_vec = pci_enable_msix_range(adapter->pdev, adapter->msix_entries,
2800 MIN_MSIX_VECTORS, num_vec);
2801 if (num_vec < 0)
2802 goto fail;
92bf14ab 2803
92bf14ab
SP
2804 if (be_roce_supported(adapter) && num_vec > MIN_MSIX_VECTORS) {
2805 adapter->num_msix_roce_vec = num_vec / 2;
2806 dev_info(dev, "enabled %d MSI-x vector(s) for RoCE\n",
2807 adapter->num_msix_roce_vec);
2808 }
2809
2810 adapter->num_msix_vec = num_vec - adapter->num_msix_roce_vec;
2811
2812 dev_info(dev, "enabled %d MSI-x vector(s) for NIC\n",
2813 adapter->num_msix_vec);
c2bba3df 2814 return 0;
7dc4c064
AG
2815
2816fail:
2817 dev_warn(dev, "MSIx enable failed\n");
2818
2819 /* INTx is not supported in VFs, so fail probe if enable_msix fails */
2820 if (!be_physfn(adapter))
2821 return num_vec;
2822 return 0;
6b7c5b94
SP
2823}
2824
fe6d2a38 2825static inline int be_msix_vec_get(struct be_adapter *adapter,
748b539a 2826 struct be_eq_obj *eqo)
b628bde2 2827{
f2f781a7 2828 return adapter->msix_entries[eqo->msix_idx].vector;
b628bde2 2829}
6b7c5b94 2830
b628bde2
SP
2831static int be_msix_register(struct be_adapter *adapter)
2832{
10ef9ab4
SP
2833 struct net_device *netdev = adapter->netdev;
2834 struct be_eq_obj *eqo;
2835 int status, i, vec;
6b7c5b94 2836
10ef9ab4
SP
2837 for_all_evt_queues(adapter, eqo, i) {
2838 sprintf(eqo->desc, "%s-q%d", netdev->name, i);
2839 vec = be_msix_vec_get(adapter, eqo);
2840 status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
3abcdeda
SP
2841 if (status)
2842 goto err_msix;
2843 }
b628bde2 2844
6b7c5b94 2845 return 0;
3abcdeda 2846err_msix:
10ef9ab4
SP
2847 for (i--, eqo = &adapter->eq_obj[i]; i >= 0; i--, eqo--)
2848 free_irq(be_msix_vec_get(adapter, eqo), eqo);
2849 dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
748b539a 2850 status);
ac6a0c4a 2851 be_msix_disable(adapter);
6b7c5b94
SP
2852 return status;
2853}
2854
2855static int be_irq_register(struct be_adapter *adapter)
2856{
2857 struct net_device *netdev = adapter->netdev;
2858 int status;
2859
ac6a0c4a 2860 if (msix_enabled(adapter)) {
6b7c5b94
SP
2861 status = be_msix_register(adapter);
2862 if (status == 0)
2863 goto done;
ba343c77
SB
2864 /* INTx is not supported for VF */
2865 if (!be_physfn(adapter))
2866 return status;
6b7c5b94
SP
2867 }
2868
e49cc34f 2869 /* INTx: only the first EQ is used */
6b7c5b94
SP
2870 netdev->irq = adapter->pdev->irq;
2871 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
e49cc34f 2872 &adapter->eq_obj[0]);
6b7c5b94
SP
2873 if (status) {
2874 dev_err(&adapter->pdev->dev,
2875 "INTx request IRQ failed - err %d\n", status);
2876 return status;
2877 }
2878done:
2879 adapter->isr_registered = true;
2880 return 0;
2881}
2882
2883static void be_irq_unregister(struct be_adapter *adapter)
2884{
2885 struct net_device *netdev = adapter->netdev;
10ef9ab4 2886 struct be_eq_obj *eqo;
3abcdeda 2887 int i;
6b7c5b94
SP
2888
2889 if (!adapter->isr_registered)
2890 return;
2891
2892 /* INTx */
ac6a0c4a 2893 if (!msix_enabled(adapter)) {
e49cc34f 2894 free_irq(netdev->irq, &adapter->eq_obj[0]);
6b7c5b94
SP
2895 goto done;
2896 }
2897
2898 /* MSIx */
10ef9ab4
SP
2899 for_all_evt_queues(adapter, eqo, i)
2900 free_irq(be_msix_vec_get(adapter, eqo), eqo);
3abcdeda 2901
6b7c5b94
SP
2902done:
2903 adapter->isr_registered = false;
6b7c5b94
SP
2904}
2905
10ef9ab4 2906static void be_rx_qs_destroy(struct be_adapter *adapter)
482c9e79
SP
2907{
2908 struct be_queue_info *q;
2909 struct be_rx_obj *rxo;
2910 int i;
2911
2912 for_all_rx_queues(adapter, rxo, i) {
2913 q = &rxo->q;
2914 if (q->created) {
2915 be_cmd_rxq_destroy(adapter, q);
10ef9ab4 2916 be_rx_cq_clean(rxo);
482c9e79 2917 }
10ef9ab4 2918 be_queue_free(adapter, q);
482c9e79
SP
2919 }
2920}
2921
889cd4b2
SP
2922static int be_close(struct net_device *netdev)
2923{
2924 struct be_adapter *adapter = netdev_priv(netdev);
10ef9ab4
SP
2925 struct be_eq_obj *eqo;
2926 int i;
889cd4b2 2927
e1ad8e33
KA
2928 /* This protection is needed as be_close() may be called even when the
2929 * adapter is in cleared state (after eeh perm failure)
2930 */
2931 if (!(adapter->flags & BE_FLAGS_SETUP_DONE))
2932 return 0;
2933
045508a8
PP
2934 be_roce_dev_close(adapter);
2935
dff345c5
IV
2936 if (adapter->flags & BE_FLAGS_NAPI_ENABLED) {
2937 for_all_evt_queues(adapter, eqo, i) {
04d3d624 2938 napi_disable(&eqo->napi);
6384a4d0
SP
2939 be_disable_busy_poll(eqo);
2940 }
71237b6f 2941 adapter->flags &= ~BE_FLAGS_NAPI_ENABLED;
04d3d624 2942 }
a323d9bf
SP
2943
2944 be_async_mcc_disable(adapter);
2945
2946 /* Wait for all pending tx completions to arrive so that
2947 * all tx skbs are freed.
2948 */
fba87559 2949 netif_tx_disable(netdev);
6e1f9975 2950 be_tx_compl_clean(adapter);
a323d9bf
SP
2951
2952 be_rx_qs_destroy(adapter);
2953
d11a347d
AK
2954 for (i = 1; i < (adapter->uc_macs + 1); i++)
2955 be_cmd_pmac_del(adapter, adapter->if_handle,
2956 adapter->pmac_id[i], 0);
2957 adapter->uc_macs = 0;
2958
a323d9bf 2959 for_all_evt_queues(adapter, eqo, i) {
10ef9ab4
SP
2960 if (msix_enabled(adapter))
2961 synchronize_irq(be_msix_vec_get(adapter, eqo));
2962 else
2963 synchronize_irq(netdev->irq);
2964 be_eq_clean(eqo);
63fcb27f
PR
2965 }
2966
889cd4b2
SP
2967 be_irq_unregister(adapter);
2968
482c9e79
SP
2969 return 0;
2970}
2971
10ef9ab4 2972static int be_rx_qs_create(struct be_adapter *adapter)
482c9e79 2973{
1dcf7b1c
ED
2974 struct rss_info *rss = &adapter->rss_info;
2975 u8 rss_key[RSS_HASH_KEY_LEN];
482c9e79 2976 struct be_rx_obj *rxo;
e9008ee9 2977 int rc, i, j;
482c9e79
SP
2978
2979 for_all_rx_queues(adapter, rxo, i) {
10ef9ab4
SP
2980 rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
2981 sizeof(struct be_eth_rx_d));
2982 if (rc)
2983 return rc;
2984 }
2985
2986 /* The FW would like the default RXQ to be created first */
2987 rxo = default_rxo(adapter);
2988 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id, rx_frag_size,
2989 adapter->if_handle, false, &rxo->rss_id);
2990 if (rc)
2991 return rc;
2992
2993 for_all_rss_queues(adapter, rxo, i) {
482c9e79 2994 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
10ef9ab4
SP
2995 rx_frag_size, adapter->if_handle,
2996 true, &rxo->rss_id);
482c9e79
SP
2997 if (rc)
2998 return rc;
2999 }
3000
3001 if (be_multi_rxq(adapter)) {
e2557877
VD
3002 for (j = 0; j < RSS_INDIR_TABLE_LEN;
3003 j += adapter->num_rx_qs - 1) {
e9008ee9 3004 for_all_rss_queues(adapter, rxo, i) {
e2557877 3005 if ((j + i) >= RSS_INDIR_TABLE_LEN)
e9008ee9 3006 break;
e2557877
VD
3007 rss->rsstable[j + i] = rxo->rss_id;
3008 rss->rss_queue[j + i] = i;
e9008ee9
PR
3009 }
3010 }
e2557877
VD
3011 rss->rss_flags = RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_IPV4 |
3012 RSS_ENABLE_TCP_IPV6 | RSS_ENABLE_IPV6;
594ad54a
SR
3013
3014 if (!BEx_chip(adapter))
e2557877
VD
3015 rss->rss_flags |= RSS_ENABLE_UDP_IPV4 |
3016 RSS_ENABLE_UDP_IPV6;
da1388d6
VV
3017 } else {
3018 /* Disable RSS, if only default RX Q is created */
e2557877 3019 rss->rss_flags = RSS_ENABLE_NONE;
da1388d6 3020 }
594ad54a 3021
1dcf7b1c 3022 netdev_rss_key_fill(rss_key, RSS_HASH_KEY_LEN);
748b539a 3023 rc = be_cmd_rss_config(adapter, rss->rsstable, rss->rss_flags,
1dcf7b1c 3024 128, rss_key);
da1388d6 3025 if (rc) {
e2557877 3026 rss->rss_flags = RSS_ENABLE_NONE;
da1388d6 3027 return rc;
482c9e79
SP
3028 }
3029
1dcf7b1c 3030 memcpy(rss->rss_hkey, rss_key, RSS_HASH_KEY_LEN);
e2557877 3031
482c9e79 3032 /* First time posting */
10ef9ab4 3033 for_all_rx_queues(adapter, rxo, i)
c30d7266 3034 be_post_rx_frags(rxo, GFP_KERNEL, MAX_RX_POST);
889cd4b2
SP
3035 return 0;
3036}
3037
6b7c5b94
SP
3038static int be_open(struct net_device *netdev)
3039{
3040 struct be_adapter *adapter = netdev_priv(netdev);
10ef9ab4 3041 struct be_eq_obj *eqo;
3abcdeda 3042 struct be_rx_obj *rxo;
10ef9ab4 3043 struct be_tx_obj *txo;
b236916a 3044 u8 link_status;
3abcdeda 3045 int status, i;
5fb379ee 3046
10ef9ab4 3047 status = be_rx_qs_create(adapter);
482c9e79
SP
3048 if (status)
3049 goto err;
3050
c2bba3df
SK
3051 status = be_irq_register(adapter);
3052 if (status)
3053 goto err;
5fb379ee 3054
10ef9ab4 3055 for_all_rx_queues(adapter, rxo, i)
3abcdeda 3056 be_cq_notify(adapter, rxo->cq.id, true, 0);
5fb379ee 3057
10ef9ab4
SP
3058 for_all_tx_queues(adapter, txo, i)
3059 be_cq_notify(adapter, txo->cq.id, true, 0);
3060
7a1e9b20
SP
3061 be_async_mcc_enable(adapter);
3062
10ef9ab4
SP
3063 for_all_evt_queues(adapter, eqo, i) {
3064 napi_enable(&eqo->napi);
6384a4d0 3065 be_enable_busy_poll(eqo);
4cad9f3b 3066 be_eq_notify(adapter, eqo->q.id, true, true, 0);
10ef9ab4 3067 }
04d3d624 3068 adapter->flags |= BE_FLAGS_NAPI_ENABLED;
10ef9ab4 3069
323ff71e 3070 status = be_cmd_link_status_query(adapter, NULL, &link_status, 0);
b236916a
AK
3071 if (!status)
3072 be_link_status_update(adapter, link_status);
3073
fba87559 3074 netif_tx_start_all_queues(netdev);
045508a8 3075 be_roce_dev_open(adapter);
c9c47142 3076
c5abe7c0 3077#ifdef CONFIG_BE2NET_VXLAN
c9c47142
SP
3078 if (skyhawk_chip(adapter))
3079 vxlan_get_rx_port(netdev);
c5abe7c0
SP
3080#endif
3081
889cd4b2
SP
3082 return 0;
3083err:
3084 be_close(adapter->netdev);
3085 return -EIO;
5fb379ee
SP
3086}
3087
71d8d1b5
AK
3088static int be_setup_wol(struct be_adapter *adapter, bool enable)
3089{
3090 struct be_dma_mem cmd;
3091 int status = 0;
3092 u8 mac[ETH_ALEN];
3093
3094 memset(mac, 0, ETH_ALEN);
3095
3096 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
ede23fa8
JP
3097 cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
3098 GFP_KERNEL);
ddf1169f 3099 if (!cmd.va)
6b568689 3100 return -ENOMEM;
71d8d1b5
AK
3101
3102 if (enable) {
3103 status = pci_write_config_dword(adapter->pdev,
748b539a
SP
3104 PCICFG_PM_CONTROL_OFFSET,
3105 PCICFG_PM_CONTROL_MASK);
71d8d1b5
AK
3106 if (status) {
3107 dev_err(&adapter->pdev->dev,
2381a55c 3108 "Could not enable Wake-on-lan\n");
2b7bcebf
IV
3109 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
3110 cmd.dma);
71d8d1b5
AK
3111 return status;
3112 }
3113 status = be_cmd_enable_magic_wol(adapter,
748b539a
SP
3114 adapter->netdev->dev_addr,
3115 &cmd);
71d8d1b5
AK
3116 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
3117 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
3118 } else {
3119 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
3120 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
3121 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
3122 }
3123
2b7bcebf 3124 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
71d8d1b5
AK
3125 return status;
3126}
3127
f7062ee5
SP
3128static void be_vf_eth_addr_generate(struct be_adapter *adapter, u8 *mac)
3129{
3130 u32 addr;
3131
3132 addr = jhash(adapter->netdev->dev_addr, ETH_ALEN, 0);
3133
3134 mac[5] = (u8)(addr & 0xFF);
3135 mac[4] = (u8)((addr >> 8) & 0xFF);
3136 mac[3] = (u8)((addr >> 16) & 0xFF);
3137 /* Use the OUI from the current MAC address */
3138 memcpy(mac, adapter->netdev->dev_addr, 3);
3139}
3140
6d87f5c3
AK
3141/*
3142 * Generate a seed MAC address from the PF MAC Address using jhash.
3143 * MAC Address for VFs are assigned incrementally starting from the seed.
3144 * These addresses are programmed in the ASIC by the PF and the VF driver
3145 * queries for the MAC address during its probe.
3146 */
4c876616 3147static int be_vf_eth_addr_config(struct be_adapter *adapter)
6d87f5c3 3148{
f9449ab7 3149 u32 vf;
3abcdeda 3150 int status = 0;
6d87f5c3 3151 u8 mac[ETH_ALEN];
11ac75ed 3152 struct be_vf_cfg *vf_cfg;
6d87f5c3
AK
3153
3154 be_vf_eth_addr_generate(adapter, mac);
3155
11ac75ed 3156 for_all_vfs(adapter, vf_cfg, vf) {
3175d8c2 3157 if (BEx_chip(adapter))
590c391d 3158 status = be_cmd_pmac_add(adapter, mac,
11ac75ed
SP
3159 vf_cfg->if_handle,
3160 &vf_cfg->pmac_id, vf + 1);
3175d8c2
SP
3161 else
3162 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
3163 vf + 1);
590c391d 3164
6d87f5c3
AK
3165 if (status)
3166 dev_err(&adapter->pdev->dev,
748b539a
SP
3167 "Mac address assignment failed for VF %d\n",
3168 vf);
6d87f5c3 3169 else
11ac75ed 3170 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
6d87f5c3
AK
3171
3172 mac[5] += 1;
3173 }
3174 return status;
3175}
3176
4c876616
SP
3177static int be_vfs_mac_query(struct be_adapter *adapter)
3178{
3179 int status, vf;
3180 u8 mac[ETH_ALEN];
3181 struct be_vf_cfg *vf_cfg;
4c876616
SP
3182
3183 for_all_vfs(adapter, vf_cfg, vf) {
b188f090
SR
3184 status = be_cmd_get_active_mac(adapter, vf_cfg->pmac_id,
3185 mac, vf_cfg->if_handle,
3186 false, vf+1);
4c876616
SP
3187 if (status)
3188 return status;
3189 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
3190 }
3191 return 0;
3192}
3193
f9449ab7 3194static void be_vf_clear(struct be_adapter *adapter)
6d87f5c3 3195{
11ac75ed 3196 struct be_vf_cfg *vf_cfg;
6d87f5c3
AK
3197 u32 vf;
3198
257a3feb 3199 if (pci_vfs_assigned(adapter->pdev)) {
4c876616
SP
3200 dev_warn(&adapter->pdev->dev,
3201 "VFs are assigned to VMs: not disabling VFs\n");
39f1d94d
SP
3202 goto done;
3203 }
3204
b4c1df93
SP
3205 pci_disable_sriov(adapter->pdev);
3206
11ac75ed 3207 for_all_vfs(adapter, vf_cfg, vf) {
3175d8c2 3208 if (BEx_chip(adapter))
11ac75ed
SP
3209 be_cmd_pmac_del(adapter, vf_cfg->if_handle,
3210 vf_cfg->pmac_id, vf + 1);
3175d8c2
SP
3211 else
3212 be_cmd_set_mac(adapter, NULL, vf_cfg->if_handle,
3213 vf + 1);
f9449ab7 3214
11ac75ed
SP
3215 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
3216 }
39f1d94d
SP
3217done:
3218 kfree(adapter->vf_cfg);
3219 adapter->num_vfs = 0;
f174c7ec 3220 adapter->flags &= ~BE_FLAGS_SRIOV_ENABLED;
6d87f5c3
AK
3221}
3222
7707133c
SP
3223static void be_clear_queues(struct be_adapter *adapter)
3224{
3225 be_mcc_queues_destroy(adapter);
3226 be_rx_cqs_destroy(adapter);
3227 be_tx_queues_destroy(adapter);
3228 be_evt_queues_destroy(adapter);
3229}
3230
68d7bdcb 3231static void be_cancel_worker(struct be_adapter *adapter)
a54769f5 3232{
191eb756
SP
3233 if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) {
3234 cancel_delayed_work_sync(&adapter->work);
3235 adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED;
3236 }
68d7bdcb
SP
3237}
3238
b05004ad 3239static void be_mac_clear(struct be_adapter *adapter)
68d7bdcb
SP
3240{
3241 int i;
3242
b05004ad
SK
3243 if (adapter->pmac_id) {
3244 for (i = 0; i < (adapter->uc_macs + 1); i++)
3245 be_cmd_pmac_del(adapter, adapter->if_handle,
3246 adapter->pmac_id[i], 0);
3247 adapter->uc_macs = 0;
3248
3249 kfree(adapter->pmac_id);
3250 adapter->pmac_id = NULL;
3251 }
3252}
3253
c5abe7c0 3254#ifdef CONFIG_BE2NET_VXLAN
c9c47142
SP
3255static void be_disable_vxlan_offloads(struct be_adapter *adapter)
3256{
630f4b70
SB
3257 struct net_device *netdev = adapter->netdev;
3258
c9c47142
SP
3259 if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS)
3260 be_cmd_manage_iface(adapter, adapter->if_handle,
3261 OP_CONVERT_TUNNEL_TO_NORMAL);
3262
3263 if (adapter->vxlan_port)
3264 be_cmd_set_vxlan_port(adapter, 0);
3265
3266 adapter->flags &= ~BE_FLAGS_VXLAN_OFFLOADS;
3267 adapter->vxlan_port = 0;
630f4b70
SB
3268
3269 netdev->hw_enc_features = 0;
3270 netdev->hw_features &= ~(NETIF_F_GSO_UDP_TUNNEL);
ac9a3d84 3271 netdev->features &= ~(NETIF_F_GSO_UDP_TUNNEL);
c9c47142 3272}
c5abe7c0 3273#endif
c9c47142 3274
b05004ad
SK
3275static int be_clear(struct be_adapter *adapter)
3276{
68d7bdcb 3277 be_cancel_worker(adapter);
191eb756 3278
11ac75ed 3279 if (sriov_enabled(adapter))
f9449ab7
SP
3280 be_vf_clear(adapter);
3281
bec84e6b
VV
3282 /* Re-configure FW to distribute resources evenly across max-supported
3283 * number of VFs, only when VFs are not already enabled.
3284 */
3285 if (be_physfn(adapter) && !pci_vfs_assigned(adapter->pdev))
3286 be_cmd_set_sriov_config(adapter, adapter->pool_res,
3287 pci_sriov_get_totalvfs(adapter->pdev));
3288
c5abe7c0 3289#ifdef CONFIG_BE2NET_VXLAN
c9c47142 3290 be_disable_vxlan_offloads(adapter);
c5abe7c0 3291#endif
2d17f403 3292 /* delete the primary mac along with the uc-mac list */
b05004ad 3293 be_mac_clear(adapter);
fbc13f01 3294
f9449ab7 3295 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
a54769f5 3296
7707133c 3297 be_clear_queues(adapter);
a54769f5 3298
10ef9ab4 3299 be_msix_disable(adapter);
e1ad8e33 3300 adapter->flags &= ~BE_FLAGS_SETUP_DONE;
a54769f5
SP
3301 return 0;
3302}
3303
0700d816
KA
3304static int be_if_create(struct be_adapter *adapter, u32 *if_handle,
3305 u32 cap_flags, u32 vf)
3306{
3307 u32 en_flags;
3308 int status;
3309
3310 en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
3311 BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS |
3312 BE_IF_FLAGS_RSS;
3313
3314 en_flags &= cap_flags;
3315
3316 status = be_cmd_if_create(adapter, cap_flags, en_flags,
3317 if_handle, vf);
3318
3319 return status;
3320}
3321
4c876616 3322static int be_vfs_if_create(struct be_adapter *adapter)
abb93951 3323{
92bf14ab 3324 struct be_resources res = {0};
4c876616 3325 struct be_vf_cfg *vf_cfg;
0700d816
KA
3326 u32 cap_flags, vf;
3327 int status;
abb93951 3328
0700d816 3329 /* If a FW profile exists, then cap_flags are updated */
4c876616
SP
3330 cap_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
3331 BE_IF_FLAGS_MULTICAST;
abb93951 3332
4c876616 3333 for_all_vfs(adapter, vf_cfg, vf) {
92bf14ab
SP
3334 if (!BE3_chip(adapter)) {
3335 status = be_cmd_get_profile_config(adapter, &res,
3336 vf + 1);
3337 if (!status)
3338 cap_flags = res.if_cap_flags;
3339 }
4c876616 3340
0700d816
KA
3341 status = be_if_create(adapter, &vf_cfg->if_handle,
3342 cap_flags, vf + 1);
4c876616 3343 if (status)
0700d816 3344 return status;
4c876616 3345 }
0700d816
KA
3346
3347 return 0;
abb93951
PR
3348}
3349
39f1d94d 3350static int be_vf_setup_init(struct be_adapter *adapter)
30128031 3351{
11ac75ed 3352 struct be_vf_cfg *vf_cfg;
30128031
SP
3353 int vf;
3354
39f1d94d
SP
3355 adapter->vf_cfg = kcalloc(adapter->num_vfs, sizeof(*vf_cfg),
3356 GFP_KERNEL);
3357 if (!adapter->vf_cfg)
3358 return -ENOMEM;
3359
11ac75ed
SP
3360 for_all_vfs(adapter, vf_cfg, vf) {
3361 vf_cfg->if_handle = -1;
3362 vf_cfg->pmac_id = -1;
30128031 3363 }
39f1d94d 3364 return 0;
30128031
SP
3365}
3366
f9449ab7
SP
3367static int be_vf_setup(struct be_adapter *adapter)
3368{
c502224e 3369 struct device *dev = &adapter->pdev->dev;
11ac75ed 3370 struct be_vf_cfg *vf_cfg;
4c876616 3371 int status, old_vfs, vf;
04a06028 3372 u32 privileges;
39f1d94d 3373
257a3feb 3374 old_vfs = pci_num_vf(adapter->pdev);
39f1d94d
SP
3375
3376 status = be_vf_setup_init(adapter);
3377 if (status)
3378 goto err;
30128031 3379
4c876616
SP
3380 if (old_vfs) {
3381 for_all_vfs(adapter, vf_cfg, vf) {
3382 status = be_cmd_get_if_id(adapter, vf_cfg, vf);
3383 if (status)
3384 goto err;
3385 }
f9449ab7 3386
4c876616
SP
3387 status = be_vfs_mac_query(adapter);
3388 if (status)
3389 goto err;
3390 } else {
bec84e6b
VV
3391 status = be_vfs_if_create(adapter);
3392 if (status)
3393 goto err;
3394
39f1d94d
SP
3395 status = be_vf_eth_addr_config(adapter);
3396 if (status)
3397 goto err;
3398 }
f9449ab7 3399
11ac75ed 3400 for_all_vfs(adapter, vf_cfg, vf) {
04a06028
SP
3401 /* Allow VFs to programs MAC/VLAN filters */
3402 status = be_cmd_get_fn_privileges(adapter, &privileges, vf + 1);
3403 if (!status && !(privileges & BE_PRIV_FILTMGMT)) {
3404 status = be_cmd_set_fn_privileges(adapter,
3405 privileges |
3406 BE_PRIV_FILTMGMT,
3407 vf + 1);
3408 if (!status)
3409 dev_info(dev, "VF%d has FILTMGMT privilege\n",
3410 vf);
3411 }
3412
0f77ba73
RN
3413 /* Allow full available bandwidth */
3414 if (!old_vfs)
3415 be_cmd_config_qos(adapter, 0, 0, vf + 1);
f1f3ee1b 3416
bdce2ad7 3417 if (!old_vfs) {
0599863d 3418 be_cmd_enable_vf(adapter, vf + 1);
bdce2ad7
SR
3419 be_cmd_set_logical_link_config(adapter,
3420 IFLA_VF_LINK_STATE_AUTO,
3421 vf+1);
3422 }
f9449ab7 3423 }
b4c1df93
SP
3424
3425 if (!old_vfs) {
3426 status = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
3427 if (status) {
3428 dev_err(dev, "SRIOV enable failed\n");
3429 adapter->num_vfs = 0;
3430 goto err;
3431 }
3432 }
f174c7ec
VV
3433
3434 adapter->flags |= BE_FLAGS_SRIOV_ENABLED;
f9449ab7
SP
3435 return 0;
3436err:
4c876616
SP
3437 dev_err(dev, "VF setup failed\n");
3438 be_vf_clear(adapter);
f9449ab7
SP
3439 return status;
3440}
3441
f93f160b
VV
3442/* Converting function_mode bits on BE3 to SH mc_type enums */
3443
3444static u8 be_convert_mc_type(u32 function_mode)
3445{
66064dbc 3446 if (function_mode & VNIC_MODE && function_mode & QNQ_MODE)
f93f160b 3447 return vNIC1;
66064dbc 3448 else if (function_mode & QNQ_MODE)
f93f160b
VV
3449 return FLEX10;
3450 else if (function_mode & VNIC_MODE)
3451 return vNIC2;
3452 else if (function_mode & UMC_ENABLED)
3453 return UMC;
3454 else
3455 return MC_NONE;
3456}
3457
92bf14ab
SP
3458/* On BE2/BE3 FW does not suggest the supported limits */
3459static void BEx_get_resources(struct be_adapter *adapter,
3460 struct be_resources *res)
3461{
bec84e6b 3462 bool use_sriov = adapter->num_vfs ? 1 : 0;
92bf14ab
SP
3463
3464 if (be_physfn(adapter))
3465 res->max_uc_mac = BE_UC_PMAC_COUNT;
3466 else
3467 res->max_uc_mac = BE_VF_UC_PMAC_COUNT;
3468
f93f160b
VV
3469 adapter->mc_type = be_convert_mc_type(adapter->function_mode);
3470
3471 if (be_is_mc(adapter)) {
3472 /* Assuming that there are 4 channels per port,
3473 * when multi-channel is enabled
3474 */
3475 if (be_is_qnq_mode(adapter))
3476 res->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
3477 else
3478 /* In a non-qnq multichannel mode, the pvid
3479 * takes up one vlan entry
3480 */
3481 res->max_vlans = (BE_NUM_VLANS_SUPPORTED / 4) - 1;
3482 } else {
92bf14ab 3483 res->max_vlans = BE_NUM_VLANS_SUPPORTED;
f93f160b
VV
3484 }
3485
92bf14ab
SP
3486 res->max_mcast_mac = BE_MAX_MC;
3487
a5243dab
VV
3488 /* 1) For BE3 1Gb ports, FW does not support multiple TXQs
3489 * 2) Create multiple TX rings on a BE3-R multi-channel interface
3490 * *only* if it is RSS-capable.
3491 */
3492 if (BE2_chip(adapter) || use_sriov || (adapter->port_num > 1) ||
3493 !be_physfn(adapter) || (be_is_mc(adapter) &&
a28277dc 3494 !(adapter->function_caps & BE_FUNCTION_CAPS_RSS))) {
92bf14ab 3495 res->max_tx_qs = 1;
a28277dc
SR
3496 } else if (adapter->function_caps & BE_FUNCTION_CAPS_SUPER_NIC) {
3497 struct be_resources super_nic_res = {0};
3498
3499 /* On a SuperNIC profile, the driver needs to use the
3500 * GET_PROFILE_CONFIG cmd to query the per-function TXQ limits
3501 */
3502 be_cmd_get_profile_config(adapter, &super_nic_res, 0);
3503 /* Some old versions of BE3 FW don't report max_tx_qs value */
3504 res->max_tx_qs = super_nic_res.max_tx_qs ? : BE3_MAX_TX_QS;
3505 } else {
92bf14ab 3506 res->max_tx_qs = BE3_MAX_TX_QS;
a28277dc 3507 }
92bf14ab
SP
3508
3509 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
3510 !use_sriov && be_physfn(adapter))
3511 res->max_rss_qs = (adapter->be3_native) ?
3512 BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
3513 res->max_rx_qs = res->max_rss_qs + 1;
3514
e3dc867c 3515 if (be_physfn(adapter))
d3518e21 3516 res->max_evt_qs = (be_max_vfs(adapter) > 0) ?
e3dc867c
SR
3517 BE3_SRIOV_MAX_EVT_QS : BE3_MAX_EVT_QS;
3518 else
3519 res->max_evt_qs = 1;
92bf14ab
SP
3520
3521 res->if_cap_flags = BE_IF_CAP_FLAGS_WANT;
3522 if (!(adapter->function_caps & BE_FUNCTION_CAPS_RSS))
3523 res->if_cap_flags &= ~BE_IF_FLAGS_RSS;
3524}
3525
30128031
SP
3526static void be_setup_init(struct be_adapter *adapter)
3527{
3528 adapter->vlan_prio_bmap = 0xff;
42f11cf2 3529 adapter->phy.link_speed = -1;
30128031
SP
3530 adapter->if_handle = -1;
3531 adapter->be3_native = false;
3532 adapter->promiscuous = false;
f25b119c
PR
3533 if (be_physfn(adapter))
3534 adapter->cmd_privileges = MAX_PRIVILEGES;
3535 else
3536 adapter->cmd_privileges = MIN_PRIVILEGES;
30128031
SP
3537}
3538
bec84e6b
VV
3539static int be_get_sriov_config(struct be_adapter *adapter)
3540{
3541 struct device *dev = &adapter->pdev->dev;
3542 struct be_resources res = {0};
d3d18312 3543 int max_vfs, old_vfs;
bec84e6b
VV
3544
3545 /* Some old versions of BE3 FW don't report max_vfs value */
d3d18312
SP
3546 be_cmd_get_profile_config(adapter, &res, 0);
3547
bec84e6b
VV
3548 if (BE3_chip(adapter) && !res.max_vfs) {
3549 max_vfs = pci_sriov_get_totalvfs(adapter->pdev);
3550 res.max_vfs = max_vfs > 0 ? min(MAX_VFS, max_vfs) : 0;
3551 }
3552
d3d18312 3553 adapter->pool_res = res;
bec84e6b
VV
3554
3555 if (!be_max_vfs(adapter)) {
3556 if (num_vfs)
50762667 3557 dev_warn(dev, "SRIOV is disabled. Ignoring num_vfs\n");
bec84e6b
VV
3558 adapter->num_vfs = 0;
3559 return 0;
3560 }
3561
d3d18312
SP
3562 pci_sriov_set_totalvfs(adapter->pdev, be_max_vfs(adapter));
3563
bec84e6b
VV
3564 /* validate num_vfs module param */
3565 old_vfs = pci_num_vf(adapter->pdev);
3566 if (old_vfs) {
3567 dev_info(dev, "%d VFs are already enabled\n", old_vfs);
3568 if (old_vfs != num_vfs)
3569 dev_warn(dev, "Ignoring num_vfs=%d setting\n", num_vfs);
3570 adapter->num_vfs = old_vfs;
3571 } else {
3572 if (num_vfs > be_max_vfs(adapter)) {
3573 dev_info(dev, "Resources unavailable to init %d VFs\n",
3574 num_vfs);
3575 dev_info(dev, "Limiting to %d VFs\n",
3576 be_max_vfs(adapter));
3577 }
3578 adapter->num_vfs = min_t(u16, num_vfs, be_max_vfs(adapter));
3579 }
3580
3581 return 0;
3582}
3583
92bf14ab 3584static int be_get_resources(struct be_adapter *adapter)
abb93951 3585{
92bf14ab
SP
3586 struct device *dev = &adapter->pdev->dev;
3587 struct be_resources res = {0};
3588 int status;
abb93951 3589
92bf14ab
SP
3590 if (BEx_chip(adapter)) {
3591 BEx_get_resources(adapter, &res);
3592 adapter->res = res;
abb93951
PR
3593 }
3594
92bf14ab
SP
3595 /* For Lancer, SH etc read per-function resource limits from FW.
3596 * GET_FUNC_CONFIG returns per function guaranteed limits.
3597 * GET_PROFILE_CONFIG returns PCI-E related limits PF-pool limits
3598 */
3599 if (!BEx_chip(adapter)) {
3600 status = be_cmd_get_func_config(adapter, &res);
3601 if (status)
3602 return status;
abb93951 3603
92bf14ab
SP
3604 /* If RoCE may be enabled stash away half the EQs for RoCE */
3605 if (be_roce_supported(adapter))
3606 res.max_evt_qs /= 2;
3607 adapter->res = res;
abb93951 3608 }
4c876616 3609
acbafeb1
SP
3610 dev_info(dev, "Max: txqs %d, rxqs %d, rss %d, eqs %d, vfs %d\n",
3611 be_max_txqs(adapter), be_max_rxqs(adapter),
3612 be_max_rss(adapter), be_max_eqs(adapter),
3613 be_max_vfs(adapter));
3614 dev_info(dev, "Max: uc-macs %d, mc-macs %d, vlans %d\n",
3615 be_max_uc(adapter), be_max_mc(adapter),
3616 be_max_vlans(adapter));
3617
92bf14ab 3618 return 0;
abb93951
PR
3619}
3620
d3d18312
SP
3621static void be_sriov_config(struct be_adapter *adapter)
3622{
3623 struct device *dev = &adapter->pdev->dev;
3624 int status;
3625
3626 status = be_get_sriov_config(adapter);
3627 if (status) {
3628 dev_err(dev, "Failed to query SR-IOV configuration\n");
3629 dev_err(dev, "SR-IOV cannot be enabled\n");
3630 return;
3631 }
3632
3633 /* When the HW is in SRIOV capable configuration, the PF-pool
3634 * resources are equally distributed across the max-number of
3635 * VFs. The user may request only a subset of the max-vfs to be
3636 * enabled. Based on num_vfs, redistribute the resources across
3637 * num_vfs so that each VF will have access to more number of
3638 * resources. This facility is not available in BE3 FW.
3639 * Also, this is done by FW in Lancer chip.
3640 */
3641 if (be_max_vfs(adapter) && !pci_num_vf(adapter->pdev)) {
3642 status = be_cmd_set_sriov_config(adapter,
3643 adapter->pool_res,
3644 adapter->num_vfs);
3645 if (status)
3646 dev_err(dev, "Failed to optimize SR-IOV resources\n");
3647 }
3648}
3649
39f1d94d
SP
3650static int be_get_config(struct be_adapter *adapter)
3651{
542963b7 3652 u16 profile_id;
4c876616 3653 int status;
39f1d94d 3654
e97e3cda 3655 status = be_cmd_query_fw_cfg(adapter);
abb93951 3656 if (status)
92bf14ab 3657 return status;
abb93951 3658
542963b7
VV
3659 if (be_physfn(adapter)) {
3660 status = be_cmd_get_active_profile(adapter, &profile_id);
3661 if (!status)
3662 dev_info(&adapter->pdev->dev,
3663 "Using profile 0x%x\n", profile_id);
962bcb75 3664 }
bec84e6b 3665
d3d18312
SP
3666 if (!BE2_chip(adapter) && be_physfn(adapter))
3667 be_sriov_config(adapter);
542963b7 3668
92bf14ab
SP
3669 status = be_get_resources(adapter);
3670 if (status)
3671 return status;
abb93951 3672
46ee9c14
RN
3673 adapter->pmac_id = kcalloc(be_max_uc(adapter),
3674 sizeof(*adapter->pmac_id), GFP_KERNEL);
92bf14ab
SP
3675 if (!adapter->pmac_id)
3676 return -ENOMEM;
abb93951 3677
92bf14ab
SP
3678 /* Sanitize cfg_num_qs based on HW and platform limits */
3679 adapter->cfg_num_qs = min(adapter->cfg_num_qs, be_max_qs(adapter));
3680
3681 return 0;
39f1d94d
SP
3682}
3683
95046b92
SP
3684static int be_mac_setup(struct be_adapter *adapter)
3685{
3686 u8 mac[ETH_ALEN];
3687 int status;
3688
3689 if (is_zero_ether_addr(adapter->netdev->dev_addr)) {
3690 status = be_cmd_get_perm_mac(adapter, mac);
3691 if (status)
3692 return status;
3693
3694 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
3695 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
3696 } else {
3697 /* Maybe the HW was reset; dev_addr must be re-programmed */
3698 memcpy(mac, adapter->netdev->dev_addr, ETH_ALEN);
3699 }
3700
2c7a9dc1
AK
3701 /* For BE3-R VFs, the PF programs the initial MAC address */
3702 if (!(BEx_chip(adapter) && be_virtfn(adapter)))
3703 be_cmd_pmac_add(adapter, mac, adapter->if_handle,
3704 &adapter->pmac_id[0], 0);
95046b92
SP
3705 return 0;
3706}
3707
68d7bdcb
SP
3708static void be_schedule_worker(struct be_adapter *adapter)
3709{
3710 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
3711 adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
3712}
3713
7707133c 3714static int be_setup_queues(struct be_adapter *adapter)
5fb379ee 3715{
68d7bdcb 3716 struct net_device *netdev = adapter->netdev;
10ef9ab4 3717 int status;
ba343c77 3718
7707133c 3719 status = be_evt_queues_create(adapter);
abb93951
PR
3720 if (status)
3721 goto err;
73d540f2 3722
7707133c 3723 status = be_tx_qs_create(adapter);
c2bba3df
SK
3724 if (status)
3725 goto err;
10ef9ab4 3726
7707133c 3727 status = be_rx_cqs_create(adapter);
10ef9ab4 3728 if (status)
a54769f5 3729 goto err;
6b7c5b94 3730
7707133c 3731 status = be_mcc_queues_create(adapter);
10ef9ab4
SP
3732 if (status)
3733 goto err;
3734
68d7bdcb
SP
3735 status = netif_set_real_num_rx_queues(netdev, adapter->num_rx_qs);
3736 if (status)
3737 goto err;
3738
3739 status = netif_set_real_num_tx_queues(netdev, adapter->num_tx_qs);
3740 if (status)
3741 goto err;
3742
7707133c
SP
3743 return 0;
3744err:
3745 dev_err(&adapter->pdev->dev, "queue_setup failed\n");
3746 return status;
3747}
3748
68d7bdcb
SP
3749int be_update_queues(struct be_adapter *adapter)
3750{
3751 struct net_device *netdev = adapter->netdev;
3752 int status;
3753
3754 if (netif_running(netdev))
3755 be_close(netdev);
3756
3757 be_cancel_worker(adapter);
3758
3759 /* If any vectors have been shared with RoCE we cannot re-program
3760 * the MSIx table.
3761 */
3762 if (!adapter->num_msix_roce_vec)
3763 be_msix_disable(adapter);
3764
3765 be_clear_queues(adapter);
3766
3767 if (!msix_enabled(adapter)) {
3768 status = be_msix_enable(adapter);
3769 if (status)
3770 return status;
3771 }
3772
3773 status = be_setup_queues(adapter);
3774 if (status)
3775 return status;
3776
3777 be_schedule_worker(adapter);
3778
3779 if (netif_running(netdev))
3780 status = be_open(netdev);
3781
3782 return status;
3783}
3784
f7062ee5
SP
3785static inline int fw_major_num(const char *fw_ver)
3786{
3787 int fw_major = 0, i;
3788
3789 i = sscanf(fw_ver, "%d.", &fw_major);
3790 if (i != 1)
3791 return 0;
3792
3793 return fw_major;
3794}
3795
7707133c
SP
3796static int be_setup(struct be_adapter *adapter)
3797{
3798 struct device *dev = &adapter->pdev->dev;
7707133c
SP
3799 int status;
3800
3801 be_setup_init(adapter);
3802
3803 if (!lancer_chip(adapter))
3804 be_cmd_req_native_mode(adapter);
3805
3806 status = be_get_config(adapter);
10ef9ab4 3807 if (status)
a54769f5 3808 goto err;
6b7c5b94 3809
7707133c 3810 status = be_msix_enable(adapter);
10ef9ab4 3811 if (status)
a54769f5 3812 goto err;
6b7c5b94 3813
0700d816
KA
3814 status = be_if_create(adapter, &adapter->if_handle,
3815 be_if_cap_flags(adapter), 0);
7707133c 3816 if (status)
a54769f5 3817 goto err;
6b7c5b94 3818
68d7bdcb
SP
3819 /* Updating real_num_tx/rx_queues() requires rtnl_lock() */
3820 rtnl_lock();
7707133c 3821 status = be_setup_queues(adapter);
68d7bdcb 3822 rtnl_unlock();
95046b92 3823 if (status)
1578e777
PR
3824 goto err;
3825
7707133c 3826 be_cmd_get_fn_privileges(adapter, &adapter->cmd_privileges, 0);
7707133c
SP
3827
3828 status = be_mac_setup(adapter);
10ef9ab4
SP
3829 if (status)
3830 goto err;
3831
e97e3cda 3832 be_cmd_get_fw_ver(adapter);
acbafeb1 3833 dev_info(dev, "FW version is %s\n", adapter->fw_ver);
5a56eb10 3834
e9e2a904 3835 if (BE2_chip(adapter) && fw_major_num(adapter->fw_ver) < 4) {
50762667 3836 dev_err(dev, "Firmware on card is old(%s), IRQs may not work",
e9e2a904
SK
3837 adapter->fw_ver);
3838 dev_err(dev, "Please upgrade firmware to version >= 4.0\n");
3839 }
3840
1d1e9a46 3841 if (adapter->vlans_added)
10329df8 3842 be_vid_config(adapter);
7ab8b0b4 3843
a54769f5 3844 be_set_rx_mode(adapter->netdev);
5fb379ee 3845
76a9e08e
SR
3846 be_cmd_get_acpi_wol_cap(adapter);
3847
00d594c3
KA
3848 status = be_cmd_set_flow_control(adapter, adapter->tx_fc,
3849 adapter->rx_fc);
3850 if (status)
3851 be_cmd_get_flow_control(adapter, &adapter->tx_fc,
3852 &adapter->rx_fc);
590c391d 3853
00d594c3
KA
3854 dev_info(&adapter->pdev->dev, "HW Flow control - TX:%d RX:%d\n",
3855 adapter->tx_fc, adapter->rx_fc);
2dc1deb6 3856
bdce2ad7
SR
3857 if (be_physfn(adapter))
3858 be_cmd_set_logical_link_config(adapter,
3859 IFLA_VF_LINK_STATE_AUTO, 0);
3860
bec84e6b
VV
3861 if (adapter->num_vfs)
3862 be_vf_setup(adapter);
f9449ab7 3863
f25b119c
PR
3864 status = be_cmd_get_phy_info(adapter);
3865 if (!status && be_pause_supported(adapter))
42f11cf2
AK
3866 adapter->phy.fc_autoneg = 1;
3867
68d7bdcb 3868 be_schedule_worker(adapter);
e1ad8e33 3869 adapter->flags |= BE_FLAGS_SETUP_DONE;
f9449ab7 3870 return 0;
a54769f5
SP
3871err:
3872 be_clear(adapter);
3873 return status;
3874}
6b7c5b94 3875
66268739
IV
3876#ifdef CONFIG_NET_POLL_CONTROLLER
3877static void be_netpoll(struct net_device *netdev)
3878{
3879 struct be_adapter *adapter = netdev_priv(netdev);
10ef9ab4 3880 struct be_eq_obj *eqo;
66268739
IV
3881 int i;
3882
e49cc34f
SP
3883 for_all_evt_queues(adapter, eqo, i) {
3884 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
3885 napi_schedule(&eqo->napi);
3886 }
66268739
IV
3887}
3888#endif
3889
96c9b2e4 3890static char flash_cookie[2][16] = {"*** SE FLAS", "H DIRECTORY *** "};
fa9a6fed 3891
306f1348
SP
3892static bool phy_flashing_required(struct be_adapter *adapter)
3893{
e02cfd96 3894 return (adapter->phy.phy_type == PHY_TYPE_TN_8022 &&
42f11cf2 3895 adapter->phy.interface_type == PHY_TYPE_BASET_10GB);
306f1348
SP
3896}
3897
c165541e
PR
3898static bool is_comp_in_ufi(struct be_adapter *adapter,
3899 struct flash_section_info *fsec, int type)
3900{
3901 int i = 0, img_type = 0;
3902 struct flash_section_info_g2 *fsec_g2 = NULL;
3903
ca34fe38 3904 if (BE2_chip(adapter))
c165541e
PR
3905 fsec_g2 = (struct flash_section_info_g2 *)fsec;
3906
3907 for (i = 0; i < MAX_FLASH_COMP; i++) {
3908 if (fsec_g2)
3909 img_type = le32_to_cpu(fsec_g2->fsec_entry[i].type);
3910 else
3911 img_type = le32_to_cpu(fsec->fsec_entry[i].type);
3912
3913 if (img_type == type)
3914 return true;
3915 }
3916 return false;
3917
3918}
3919
4188e7df 3920static struct flash_section_info *get_fsec_info(struct be_adapter *adapter,
748b539a
SP
3921 int header_size,
3922 const struct firmware *fw)
c165541e
PR
3923{
3924 struct flash_section_info *fsec = NULL;
3925 const u8 *p = fw->data;
3926
3927 p += header_size;
3928 while (p < (fw->data + fw->size)) {
3929 fsec = (struct flash_section_info *)p;
3930 if (!memcmp(flash_cookie, fsec->cookie, sizeof(flash_cookie)))
3931 return fsec;
3932 p += 32;
3933 }
3934 return NULL;
3935}
3936
96c9b2e4
VV
3937static int be_check_flash_crc(struct be_adapter *adapter, const u8 *p,
3938 u32 img_offset, u32 img_size, int hdr_size,
3939 u16 img_optype, bool *crc_match)
3940{
3941 u32 crc_offset;
3942 int status;
3943 u8 crc[4];
3944
3945 status = be_cmd_get_flash_crc(adapter, crc, img_optype, img_size - 4);
3946 if (status)
3947 return status;
3948
3949 crc_offset = hdr_size + img_offset + img_size - 4;
3950
3951 /* Skip flashing, if crc of flashed region matches */
3952 if (!memcmp(crc, p + crc_offset, 4))
3953 *crc_match = true;
3954 else
3955 *crc_match = false;
3956
3957 return status;
3958}
3959
773a2d7c 3960static int be_flash(struct be_adapter *adapter, const u8 *img,
748b539a 3961 struct be_dma_mem *flash_cmd, int optype, int img_size)
773a2d7c 3962{
773a2d7c 3963 struct be_cmd_write_flashrom *req = flash_cmd->va;
96c9b2e4
VV
3964 u32 total_bytes, flash_op, num_bytes;
3965 int status;
773a2d7c
PR
3966
3967 total_bytes = img_size;
3968 while (total_bytes) {
3969 num_bytes = min_t(u32, 32*1024, total_bytes);
3970
3971 total_bytes -= num_bytes;
3972
3973 if (!total_bytes) {
3974 if (optype == OPTYPE_PHY_FW)
3975 flash_op = FLASHROM_OPER_PHY_FLASH;
3976 else
3977 flash_op = FLASHROM_OPER_FLASH;
3978 } else {
3979 if (optype == OPTYPE_PHY_FW)
3980 flash_op = FLASHROM_OPER_PHY_SAVE;
3981 else
3982 flash_op = FLASHROM_OPER_SAVE;
3983 }
3984
be716446 3985 memcpy(req->data_buf, img, num_bytes);
773a2d7c
PR
3986 img += num_bytes;
3987 status = be_cmd_write_flashrom(adapter, flash_cmd, optype,
748b539a 3988 flash_op, num_bytes);
4c60005f 3989 if (base_status(status) == MCC_STATUS_ILLEGAL_REQUEST &&
96c9b2e4
VV
3990 optype == OPTYPE_PHY_FW)
3991 break;
3992 else if (status)
773a2d7c 3993 return status;
773a2d7c
PR
3994 }
3995 return 0;
3996}
3997
0ad3157e 3998/* For BE2, BE3 and BE3-R */
ca34fe38 3999static int be_flash_BEx(struct be_adapter *adapter,
748b539a
SP
4000 const struct firmware *fw,
4001 struct be_dma_mem *flash_cmd, int num_of_images)
84517482 4002{
c165541e 4003 int img_hdrs_size = (num_of_images * sizeof(struct image_hdr));
96c9b2e4 4004 struct device *dev = &adapter->pdev->dev;
c165541e 4005 struct flash_section_info *fsec = NULL;
96c9b2e4
VV
4006 int status, i, filehdr_size, num_comp;
4007 const struct flash_comp *pflashcomp;
4008 bool crc_match;
4009 const u8 *p;
c165541e
PR
4010
4011 struct flash_comp gen3_flash_types[] = {
4012 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, OPTYPE_ISCSI_ACTIVE,
4013 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_iSCSI},
4014 { FLASH_REDBOOT_START_g3, OPTYPE_REDBOOT,
4015 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3, IMAGE_BOOT_CODE},
4016 { FLASH_iSCSI_BIOS_START_g3, OPTYPE_BIOS,
4017 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_ISCSI},
4018 { FLASH_PXE_BIOS_START_g3, OPTYPE_PXE_BIOS,
4019 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_PXE},
4020 { FLASH_FCoE_BIOS_START_g3, OPTYPE_FCOE_BIOS,
4021 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_FCoE},
4022 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, OPTYPE_ISCSI_BACKUP,
4023 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_iSCSI},
4024 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, OPTYPE_FCOE_FW_ACTIVE,
4025 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_FCoE},
4026 { FLASH_FCoE_BACKUP_IMAGE_START_g3, OPTYPE_FCOE_FW_BACKUP,
4027 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_FCoE},
4028 { FLASH_NCSI_START_g3, OPTYPE_NCSI_FW,
4029 FLASH_NCSI_IMAGE_MAX_SIZE_g3, IMAGE_NCSI},
4030 { FLASH_PHY_FW_START_g3, OPTYPE_PHY_FW,
4031 FLASH_PHY_FW_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_PHY}
3f0d4560 4032 };
c165541e
PR
4033
4034 struct flash_comp gen2_flash_types[] = {
4035 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, OPTYPE_ISCSI_ACTIVE,
4036 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_iSCSI},
4037 { FLASH_REDBOOT_START_g2, OPTYPE_REDBOOT,
4038 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2, IMAGE_BOOT_CODE},
4039 { FLASH_iSCSI_BIOS_START_g2, OPTYPE_BIOS,
4040 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_ISCSI},
4041 { FLASH_PXE_BIOS_START_g2, OPTYPE_PXE_BIOS,
4042 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_PXE},
4043 { FLASH_FCoE_BIOS_START_g2, OPTYPE_FCOE_BIOS,
4044 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_FCoE},
4045 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, OPTYPE_ISCSI_BACKUP,
4046 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_iSCSI},
4047 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, OPTYPE_FCOE_FW_ACTIVE,
4048 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_FCoE},
4049 { FLASH_FCoE_BACKUP_IMAGE_START_g2, OPTYPE_FCOE_FW_BACKUP,
4050 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_FCoE}
3f0d4560
AK
4051 };
4052
ca34fe38 4053 if (BE3_chip(adapter)) {
3f0d4560
AK
4054 pflashcomp = gen3_flash_types;
4055 filehdr_size = sizeof(struct flash_file_hdr_g3);
215faf9c 4056 num_comp = ARRAY_SIZE(gen3_flash_types);
3f0d4560
AK
4057 } else {
4058 pflashcomp = gen2_flash_types;
4059 filehdr_size = sizeof(struct flash_file_hdr_g2);
215faf9c 4060 num_comp = ARRAY_SIZE(gen2_flash_types);
84517482 4061 }
ca34fe38 4062
c165541e
PR
4063 /* Get flash section info*/
4064 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
4065 if (!fsec) {
96c9b2e4 4066 dev_err(dev, "Invalid Cookie. FW image may be corrupted\n");
c165541e
PR
4067 return -1;
4068 }
9fe96934 4069 for (i = 0; i < num_comp; i++) {
c165541e 4070 if (!is_comp_in_ufi(adapter, fsec, pflashcomp[i].img_type))
9fe96934 4071 continue;
c165541e
PR
4072
4073 if ((pflashcomp[i].optype == OPTYPE_NCSI_FW) &&
4074 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
4075 continue;
4076
773a2d7c
PR
4077 if (pflashcomp[i].optype == OPTYPE_PHY_FW &&
4078 !phy_flashing_required(adapter))
306f1348 4079 continue;
c165541e 4080
773a2d7c 4081 if (pflashcomp[i].optype == OPTYPE_REDBOOT) {
96c9b2e4
VV
4082 status = be_check_flash_crc(adapter, fw->data,
4083 pflashcomp[i].offset,
4084 pflashcomp[i].size,
4085 filehdr_size +
4086 img_hdrs_size,
4087 OPTYPE_REDBOOT, &crc_match);
4088 if (status) {
4089 dev_err(dev,
4090 "Could not get CRC for 0x%x region\n",
4091 pflashcomp[i].optype);
4092 continue;
4093 }
4094
4095 if (crc_match)
773a2d7c
PR
4096 continue;
4097 }
c165541e 4098
96c9b2e4
VV
4099 p = fw->data + filehdr_size + pflashcomp[i].offset +
4100 img_hdrs_size;
306f1348
SP
4101 if (p + pflashcomp[i].size > fw->data + fw->size)
4102 return -1;
773a2d7c
PR
4103
4104 status = be_flash(adapter, p, flash_cmd, pflashcomp[i].optype,
748b539a 4105 pflashcomp[i].size);
773a2d7c 4106 if (status) {
96c9b2e4 4107 dev_err(dev, "Flashing section type 0x%x failed\n",
773a2d7c
PR
4108 pflashcomp[i].img_type);
4109 return status;
84517482 4110 }
84517482 4111 }
84517482
AK
4112 return 0;
4113}
4114
96c9b2e4
VV
4115static u16 be_get_img_optype(struct flash_section_entry fsec_entry)
4116{
4117 u32 img_type = le32_to_cpu(fsec_entry.type);
4118 u16 img_optype = le16_to_cpu(fsec_entry.optype);
4119
4120 if (img_optype != 0xFFFF)
4121 return img_optype;
4122
4123 switch (img_type) {
4124 case IMAGE_FIRMWARE_iSCSI:
4125 img_optype = OPTYPE_ISCSI_ACTIVE;
4126 break;
4127 case IMAGE_BOOT_CODE:
4128 img_optype = OPTYPE_REDBOOT;
4129 break;
4130 case IMAGE_OPTION_ROM_ISCSI:
4131 img_optype = OPTYPE_BIOS;
4132 break;
4133 case IMAGE_OPTION_ROM_PXE:
4134 img_optype = OPTYPE_PXE_BIOS;
4135 break;
4136 case IMAGE_OPTION_ROM_FCoE:
4137 img_optype = OPTYPE_FCOE_BIOS;
4138 break;
4139 case IMAGE_FIRMWARE_BACKUP_iSCSI:
4140 img_optype = OPTYPE_ISCSI_BACKUP;
4141 break;
4142 case IMAGE_NCSI:
4143 img_optype = OPTYPE_NCSI_FW;
4144 break;
4145 case IMAGE_FLASHISM_JUMPVECTOR:
4146 img_optype = OPTYPE_FLASHISM_JUMPVECTOR;
4147 break;
4148 case IMAGE_FIRMWARE_PHY:
4149 img_optype = OPTYPE_SH_PHY_FW;
4150 break;
4151 case IMAGE_REDBOOT_DIR:
4152 img_optype = OPTYPE_REDBOOT_DIR;
4153 break;
4154 case IMAGE_REDBOOT_CONFIG:
4155 img_optype = OPTYPE_REDBOOT_CONFIG;
4156 break;
4157 case IMAGE_UFI_DIR:
4158 img_optype = OPTYPE_UFI_DIR;
4159 break;
4160 default:
4161 break;
4162 }
4163
4164 return img_optype;
4165}
4166
773a2d7c 4167static int be_flash_skyhawk(struct be_adapter *adapter,
748b539a
SP
4168 const struct firmware *fw,
4169 struct be_dma_mem *flash_cmd, int num_of_images)
3f0d4560 4170{
773a2d7c 4171 int img_hdrs_size = num_of_images * sizeof(struct image_hdr);
96c9b2e4 4172 struct device *dev = &adapter->pdev->dev;
773a2d7c 4173 struct flash_section_info *fsec = NULL;
96c9b2e4
VV
4174 u32 img_offset, img_size, img_type;
4175 int status, i, filehdr_size;
4176 bool crc_match, old_fw_img;
4177 u16 img_optype;
4178 const u8 *p;
773a2d7c
PR
4179
4180 filehdr_size = sizeof(struct flash_file_hdr_g3);
4181 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
4182 if (!fsec) {
96c9b2e4 4183 dev_err(dev, "Invalid Cookie. FW image may be corrupted\n");
56ace3a0 4184 return -EINVAL;
773a2d7c
PR
4185 }
4186
4187 for (i = 0; i < le32_to_cpu(fsec->fsec_hdr.num_images); i++) {
4188 img_offset = le32_to_cpu(fsec->fsec_entry[i].offset);
4189 img_size = le32_to_cpu(fsec->fsec_entry[i].pad_size);
96c9b2e4
VV
4190 img_type = le32_to_cpu(fsec->fsec_entry[i].type);
4191 img_optype = be_get_img_optype(fsec->fsec_entry[i]);
4192 old_fw_img = fsec->fsec_entry[i].optype == 0xFFFF;
773a2d7c 4193
96c9b2e4 4194 if (img_optype == 0xFFFF)
773a2d7c 4195 continue;
96c9b2e4
VV
4196 /* Don't bother verifying CRC if an old FW image is being
4197 * flashed
4198 */
4199 if (old_fw_img)
4200 goto flash;
4201
4202 status = be_check_flash_crc(adapter, fw->data, img_offset,
4203 img_size, filehdr_size +
4204 img_hdrs_size, img_optype,
4205 &crc_match);
4206 /* The current FW image on the card does not recognize the new
4207 * FLASH op_type. The FW download is partially complete.
4208 * Reboot the server now to enable FW image to recognize the
4209 * new FLASH op_type. To complete the remaining process,
4210 * download the same FW again after the reboot.
4211 */
4c60005f
KA
4212 if (base_status(status) == MCC_STATUS_ILLEGAL_REQUEST ||
4213 base_status(status) == MCC_STATUS_ILLEGAL_FIELD) {
96c9b2e4
VV
4214 dev_err(dev, "Flash incomplete. Reset the server\n");
4215 dev_err(dev, "Download FW image again after reset\n");
4216 return -EAGAIN;
4217 } else if (status) {
4218 dev_err(dev, "Could not get CRC for 0x%x region\n",
4219 img_optype);
4220 return -EFAULT;
773a2d7c
PR
4221 }
4222
96c9b2e4
VV
4223 if (crc_match)
4224 continue;
773a2d7c 4225
96c9b2e4
VV
4226flash:
4227 p = fw->data + filehdr_size + img_offset + img_hdrs_size;
773a2d7c
PR
4228 if (p + img_size > fw->data + fw->size)
4229 return -1;
4230
4231 status = be_flash(adapter, p, flash_cmd, img_optype, img_size);
96c9b2e4
VV
4232 /* For old FW images ignore ILLEGAL_FIELD error or errors on
4233 * UFI_DIR region
4234 */
4c60005f
KA
4235 if (old_fw_img &&
4236 (base_status(status) == MCC_STATUS_ILLEGAL_FIELD ||
4237 (img_optype == OPTYPE_UFI_DIR &&
4238 base_status(status) == MCC_STATUS_FAILED))) {
96c9b2e4
VV
4239 continue;
4240 } else if (status) {
4241 dev_err(dev, "Flashing section type 0x%x failed\n",
4242 img_type);
4243 return -EFAULT;
773a2d7c
PR
4244 }
4245 }
4246 return 0;
3f0d4560
AK
4247}
4248
485bf569 4249static int lancer_fw_download(struct be_adapter *adapter,
748b539a 4250 const struct firmware *fw)
84517482 4251{
485bf569
SN
4252#define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024)
4253#define LANCER_FW_DOWNLOAD_LOCATION "/prg"
bb864e07 4254 struct device *dev = &adapter->pdev->dev;
84517482 4255 struct be_dma_mem flash_cmd;
485bf569
SN
4256 const u8 *data_ptr = NULL;
4257 u8 *dest_image_ptr = NULL;
4258 size_t image_size = 0;
4259 u32 chunk_size = 0;
4260 u32 data_written = 0;
4261 u32 offset = 0;
4262 int status = 0;
4263 u8 add_status = 0;
f67ef7ba 4264 u8 change_status;
84517482 4265
485bf569 4266 if (!IS_ALIGNED(fw->size, sizeof(u32))) {
bb864e07 4267 dev_err(dev, "FW image size should be multiple of 4\n");
3fb8cb80 4268 return -EINVAL;
d9efd2af
SB
4269 }
4270
485bf569
SN
4271 flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
4272 + LANCER_FW_DOWNLOAD_CHUNK;
bb864e07 4273 flash_cmd.va = dma_alloc_coherent(dev, flash_cmd.size,
d0320f75 4274 &flash_cmd.dma, GFP_KERNEL);
3fb8cb80
KA
4275 if (!flash_cmd.va)
4276 return -ENOMEM;
84517482 4277
485bf569
SN
4278 dest_image_ptr = flash_cmd.va +
4279 sizeof(struct lancer_cmd_req_write_object);
4280 image_size = fw->size;
4281 data_ptr = fw->data;
4282
4283 while (image_size) {
4284 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
4285
4286 /* Copy the image chunk content. */
4287 memcpy(dest_image_ptr, data_ptr, chunk_size);
4288
4289 status = lancer_cmd_write_object(adapter, &flash_cmd,
f67ef7ba
PR
4290 chunk_size, offset,
4291 LANCER_FW_DOWNLOAD_LOCATION,
4292 &data_written, &change_status,
4293 &add_status);
485bf569
SN
4294 if (status)
4295 break;
4296
4297 offset += data_written;
4298 data_ptr += data_written;
4299 image_size -= data_written;
4300 }
4301
4302 if (!status) {
4303 /* Commit the FW written */
4304 status = lancer_cmd_write_object(adapter, &flash_cmd,
f67ef7ba
PR
4305 0, offset,
4306 LANCER_FW_DOWNLOAD_LOCATION,
4307 &data_written, &change_status,
4308 &add_status);
485bf569
SN
4309 }
4310
bb864e07 4311 dma_free_coherent(dev, flash_cmd.size, flash_cmd.va, flash_cmd.dma);
485bf569 4312 if (status) {
bb864e07 4313 dev_err(dev, "Firmware load error\n");
3fb8cb80 4314 return be_cmd_status(status);
485bf569
SN
4315 }
4316
bb864e07
KA
4317 dev_info(dev, "Firmware flashed successfully\n");
4318
f67ef7ba 4319 if (change_status == LANCER_FW_RESET_NEEDED) {
bb864e07 4320 dev_info(dev, "Resetting adapter to activate new FW\n");
5c510811
SK
4321 status = lancer_physdev_ctrl(adapter,
4322 PHYSDEV_CONTROL_FW_RESET_MASK);
f67ef7ba 4323 if (status) {
bb864e07
KA
4324 dev_err(dev, "Adapter busy, could not reset FW\n");
4325 dev_err(dev, "Reboot server to activate new FW\n");
f67ef7ba
PR
4326 }
4327 } else if (change_status != LANCER_NO_RESET_NEEDED) {
bb864e07 4328 dev_info(dev, "Reboot server to activate new FW\n");
f67ef7ba 4329 }
3fb8cb80
KA
4330
4331 return 0;
485bf569
SN
4332}
4333
ca34fe38
SP
4334#define UFI_TYPE2 2
4335#define UFI_TYPE3 3
0ad3157e 4336#define UFI_TYPE3R 10
ca34fe38
SP
4337#define UFI_TYPE4 4
4338static int be_get_ufi_type(struct be_adapter *adapter,
0ad3157e 4339 struct flash_file_hdr_g3 *fhdr)
773a2d7c 4340{
ddf1169f 4341 if (!fhdr)
773a2d7c
PR
4342 goto be_get_ufi_exit;
4343
ca34fe38
SP
4344 if (skyhawk_chip(adapter) && fhdr->build[0] == '4')
4345 return UFI_TYPE4;
0ad3157e
VV
4346 else if (BE3_chip(adapter) && fhdr->build[0] == '3') {
4347 if (fhdr->asic_type_rev == 0x10)
4348 return UFI_TYPE3R;
4349 else
4350 return UFI_TYPE3;
4351 } else if (BE2_chip(adapter) && fhdr->build[0] == '2')
ca34fe38 4352 return UFI_TYPE2;
773a2d7c
PR
4353
4354be_get_ufi_exit:
4355 dev_err(&adapter->pdev->dev,
4356 "UFI and Interface are not compatible for flashing\n");
4357 return -1;
4358}
4359
485bf569
SN
4360static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
4361{
485bf569
SN
4362 struct flash_file_hdr_g3 *fhdr3;
4363 struct image_hdr *img_hdr_ptr = NULL;
4364 struct be_dma_mem flash_cmd;
4365 const u8 *p;
773a2d7c 4366 int status = 0, i = 0, num_imgs = 0, ufi_type = 0;
84517482 4367
be716446 4368 flash_cmd.size = sizeof(struct be_cmd_write_flashrom);
2b7bcebf
IV
4369 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
4370 &flash_cmd.dma, GFP_KERNEL);
84517482
AK
4371 if (!flash_cmd.va) {
4372 status = -ENOMEM;
485bf569 4373 goto be_fw_exit;
84517482
AK
4374 }
4375
773a2d7c 4376 p = fw->data;
0ad3157e 4377 fhdr3 = (struct flash_file_hdr_g3 *)p;
773a2d7c 4378
0ad3157e 4379 ufi_type = be_get_ufi_type(adapter, fhdr3);
773a2d7c 4380
773a2d7c
PR
4381 num_imgs = le32_to_cpu(fhdr3->num_imgs);
4382 for (i = 0; i < num_imgs; i++) {
4383 img_hdr_ptr = (struct image_hdr *)(fw->data +
4384 (sizeof(struct flash_file_hdr_g3) +
4385 i * sizeof(struct image_hdr)));
4386 if (le32_to_cpu(img_hdr_ptr->imageid) == 1) {
0ad3157e
VV
4387 switch (ufi_type) {
4388 case UFI_TYPE4:
773a2d7c 4389 status = be_flash_skyhawk(adapter, fw,
748b539a 4390 &flash_cmd, num_imgs);
0ad3157e
VV
4391 break;
4392 case UFI_TYPE3R:
ca34fe38
SP
4393 status = be_flash_BEx(adapter, fw, &flash_cmd,
4394 num_imgs);
0ad3157e
VV
4395 break;
4396 case UFI_TYPE3:
4397 /* Do not flash this ufi on BE3-R cards */
4398 if (adapter->asic_rev < 0x10)
4399 status = be_flash_BEx(adapter, fw,
4400 &flash_cmd,
4401 num_imgs);
4402 else {
56ace3a0 4403 status = -EINVAL;
0ad3157e
VV
4404 dev_err(&adapter->pdev->dev,
4405 "Can't load BE3 UFI on BE3R\n");
4406 }
4407 }
3f0d4560 4408 }
773a2d7c
PR
4409 }
4410
ca34fe38
SP
4411 if (ufi_type == UFI_TYPE2)
4412 status = be_flash_BEx(adapter, fw, &flash_cmd, 0);
773a2d7c 4413 else if (ufi_type == -1)
56ace3a0 4414 status = -EINVAL;
84517482 4415
2b7bcebf
IV
4416 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
4417 flash_cmd.dma);
84517482
AK
4418 if (status) {
4419 dev_err(&adapter->pdev->dev, "Firmware load error\n");
485bf569 4420 goto be_fw_exit;
84517482
AK
4421 }
4422
af901ca1 4423 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
84517482 4424
485bf569
SN
4425be_fw_exit:
4426 return status;
4427}
4428
4429int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
4430{
4431 const struct firmware *fw;
4432 int status;
4433
4434 if (!netif_running(adapter->netdev)) {
4435 dev_err(&adapter->pdev->dev,
4436 "Firmware load not allowed (interface is down)\n");
940a3fcd 4437 return -ENETDOWN;
485bf569
SN
4438 }
4439
4440 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
4441 if (status)
4442 goto fw_exit;
4443
4444 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
4445
4446 if (lancer_chip(adapter))
4447 status = lancer_fw_download(adapter, fw);
4448 else
4449 status = be_fw_download(adapter, fw);
4450
eeb65ced 4451 if (!status)
e97e3cda 4452 be_cmd_get_fw_ver(adapter);
eeb65ced 4453
84517482
AK
4454fw_exit:
4455 release_firmware(fw);
4456 return status;
4457}
4458
add511b3
RP
4459static int be_ndo_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
4460 u16 flags)
a77dcb8c
AK
4461{
4462 struct be_adapter *adapter = netdev_priv(dev);
4463 struct nlattr *attr, *br_spec;
4464 int rem;
4465 int status = 0;
4466 u16 mode = 0;
4467
4468 if (!sriov_enabled(adapter))
4469 return -EOPNOTSUPP;
4470
4471 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
4ea85e83
TG
4472 if (!br_spec)
4473 return -EINVAL;
a77dcb8c
AK
4474
4475 nla_for_each_nested(attr, br_spec, rem) {
4476 if (nla_type(attr) != IFLA_BRIDGE_MODE)
4477 continue;
4478
b7c1a314
TG
4479 if (nla_len(attr) < sizeof(mode))
4480 return -EINVAL;
4481
a77dcb8c
AK
4482 mode = nla_get_u16(attr);
4483 if (mode != BRIDGE_MODE_VEPA && mode != BRIDGE_MODE_VEB)
4484 return -EINVAL;
4485
4486 status = be_cmd_set_hsw_config(adapter, 0, 0,
4487 adapter->if_handle,
4488 mode == BRIDGE_MODE_VEPA ?
4489 PORT_FWD_TYPE_VEPA :
4490 PORT_FWD_TYPE_VEB);
4491 if (status)
4492 goto err;
4493
4494 dev_info(&adapter->pdev->dev, "enabled switch mode: %s\n",
4495 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
4496
4497 return status;
4498 }
4499err:
4500 dev_err(&adapter->pdev->dev, "Failed to set switch mode %s\n",
4501 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
4502
4503 return status;
4504}
4505
4506static int be_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
748b539a 4507 struct net_device *dev, u32 filter_mask)
a77dcb8c
AK
4508{
4509 struct be_adapter *adapter = netdev_priv(dev);
4510 int status = 0;
4511 u8 hsw_mode;
4512
4513 if (!sriov_enabled(adapter))
4514 return 0;
4515
4516 /* BE and Lancer chips support VEB mode only */
4517 if (BEx_chip(adapter) || lancer_chip(adapter)) {
4518 hsw_mode = PORT_FWD_TYPE_VEB;
4519 } else {
4520 status = be_cmd_get_hsw_config(adapter, NULL, 0,
4521 adapter->if_handle, &hsw_mode);
4522 if (status)
4523 return 0;
4524 }
4525
4526 return ndo_dflt_bridge_getlink(skb, pid, seq, dev,
4527 hsw_mode == PORT_FWD_TYPE_VEPA ?
2c3c031c
SF
4528 BRIDGE_MODE_VEPA : BRIDGE_MODE_VEB,
4529 0, 0);
a77dcb8c
AK
4530}
4531
c5abe7c0 4532#ifdef CONFIG_BE2NET_VXLAN
630f4b70
SB
4533/* VxLAN offload Notes:
4534 *
4535 * The stack defines tunnel offload flags (hw_enc_features) for IP and doesn't
4536 * distinguish various types of transports (VxLAN, GRE, NVGRE ..). So, offload
4537 * is expected to work across all types of IP tunnels once exported. Skyhawk
4538 * supports offloads for either VxLAN or NVGRE, exclusively. So we export VxLAN
16dde0d6
SB
4539 * offloads in hw_enc_features only when a VxLAN port is added. If other (non
4540 * VxLAN) tunnels are configured while VxLAN offloads are enabled, offloads for
4541 * those other tunnels are unexported on the fly through ndo_features_check().
630f4b70
SB
4542 *
4543 * Skyhawk supports VxLAN offloads only for one UDP dport. So, if the stack
4544 * adds more than one port, disable offloads and don't re-enable them again
4545 * until after all the tunnels are removed.
4546 */
c9c47142
SP
4547static void be_add_vxlan_port(struct net_device *netdev, sa_family_t sa_family,
4548 __be16 port)
4549{
4550 struct be_adapter *adapter = netdev_priv(netdev);
4551 struct device *dev = &adapter->pdev->dev;
4552 int status;
4553
4554 if (lancer_chip(adapter) || BEx_chip(adapter))
4555 return;
4556
4557 if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS) {
c9c47142
SP
4558 dev_info(dev,
4559 "Only one UDP port supported for VxLAN offloads\n");
630f4b70
SB
4560 dev_info(dev, "Disabling VxLAN offloads\n");
4561 adapter->vxlan_port_count++;
4562 goto err;
c9c47142
SP
4563 }
4564
630f4b70
SB
4565 if (adapter->vxlan_port_count++ >= 1)
4566 return;
4567
c9c47142
SP
4568 status = be_cmd_manage_iface(adapter, adapter->if_handle,
4569 OP_CONVERT_NORMAL_TO_TUNNEL);
4570 if (status) {
4571 dev_warn(dev, "Failed to convert normal interface to tunnel\n");
4572 goto err;
4573 }
4574
4575 status = be_cmd_set_vxlan_port(adapter, port);
4576 if (status) {
4577 dev_warn(dev, "Failed to add VxLAN port\n");
4578 goto err;
4579 }
4580 adapter->flags |= BE_FLAGS_VXLAN_OFFLOADS;
4581 adapter->vxlan_port = port;
4582
630f4b70
SB
4583 netdev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
4584 NETIF_F_TSO | NETIF_F_TSO6 |
4585 NETIF_F_GSO_UDP_TUNNEL;
4586 netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL;
ac9a3d84 4587 netdev->features |= NETIF_F_GSO_UDP_TUNNEL;
630f4b70 4588
c9c47142
SP
4589 dev_info(dev, "Enabled VxLAN offloads for UDP port %d\n",
4590 be16_to_cpu(port));
4591 return;
4592err:
4593 be_disable_vxlan_offloads(adapter);
c9c47142
SP
4594}
4595
4596static void be_del_vxlan_port(struct net_device *netdev, sa_family_t sa_family,
4597 __be16 port)
4598{
4599 struct be_adapter *adapter = netdev_priv(netdev);
4600
4601 if (lancer_chip(adapter) || BEx_chip(adapter))
4602 return;
4603
4604 if (adapter->vxlan_port != port)
630f4b70 4605 goto done;
c9c47142
SP
4606
4607 be_disable_vxlan_offloads(adapter);
4608
4609 dev_info(&adapter->pdev->dev,
4610 "Disabled VxLAN offloads for UDP port %d\n",
4611 be16_to_cpu(port));
630f4b70
SB
4612done:
4613 adapter->vxlan_port_count--;
c9c47142 4614}
725d548f 4615
5f35227e
JG
4616static netdev_features_t be_features_check(struct sk_buff *skb,
4617 struct net_device *dev,
4618 netdev_features_t features)
725d548f 4619{
16dde0d6
SB
4620 struct be_adapter *adapter = netdev_priv(dev);
4621 u8 l4_hdr = 0;
4622
4623 /* The code below restricts offload features for some tunneled packets.
4624 * Offload features for normal (non tunnel) packets are unchanged.
4625 */
4626 if (!skb->encapsulation ||
4627 !(adapter->flags & BE_FLAGS_VXLAN_OFFLOADS))
4628 return features;
4629
4630 /* It's an encapsulated packet and VxLAN offloads are enabled. We
4631 * should disable tunnel offload features if it's not a VxLAN packet,
4632 * as tunnel offloads have been enabled only for VxLAN. This is done to
4633 * allow other tunneled traffic like GRE work fine while VxLAN
4634 * offloads are configured in Skyhawk-R.
4635 */
4636 switch (vlan_get_protocol(skb)) {
4637 case htons(ETH_P_IP):
4638 l4_hdr = ip_hdr(skb)->protocol;
4639 break;
4640 case htons(ETH_P_IPV6):
4641 l4_hdr = ipv6_hdr(skb)->nexthdr;
4642 break;
4643 default:
4644 return features;
4645 }
4646
4647 if (l4_hdr != IPPROTO_UDP ||
4648 skb->inner_protocol_type != ENCAP_TYPE_ETHER ||
4649 skb->inner_protocol != htons(ETH_P_TEB) ||
4650 skb_inner_mac_header(skb) - skb_transport_header(skb) !=
4651 sizeof(struct udphdr) + sizeof(struct vxlanhdr))
4652 return features & ~(NETIF_F_ALL_CSUM | NETIF_F_GSO_MASK);
4653
4654 return features;
725d548f 4655}
c5abe7c0 4656#endif
c9c47142 4657
e5686ad8 4658static const struct net_device_ops be_netdev_ops = {
6b7c5b94
SP
4659 .ndo_open = be_open,
4660 .ndo_stop = be_close,
4661 .ndo_start_xmit = be_xmit,
a54769f5 4662 .ndo_set_rx_mode = be_set_rx_mode,
6b7c5b94
SP
4663 .ndo_set_mac_address = be_mac_addr_set,
4664 .ndo_change_mtu = be_change_mtu,
ab1594e9 4665 .ndo_get_stats64 = be_get_stats64,
6b7c5b94 4666 .ndo_validate_addr = eth_validate_addr,
6b7c5b94
SP
4667 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
4668 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
64600ea5 4669 .ndo_set_vf_mac = be_set_vf_mac,
1da87b7f 4670 .ndo_set_vf_vlan = be_set_vf_vlan,
ed616689 4671 .ndo_set_vf_rate = be_set_vf_tx_rate,
66268739 4672 .ndo_get_vf_config = be_get_vf_config,
bdce2ad7 4673 .ndo_set_vf_link_state = be_set_vf_link_state,
66268739
IV
4674#ifdef CONFIG_NET_POLL_CONTROLLER
4675 .ndo_poll_controller = be_netpoll,
4676#endif
a77dcb8c
AK
4677 .ndo_bridge_setlink = be_ndo_bridge_setlink,
4678 .ndo_bridge_getlink = be_ndo_bridge_getlink,
6384a4d0 4679#ifdef CONFIG_NET_RX_BUSY_POLL
c9c47142 4680 .ndo_busy_poll = be_busy_poll,
6384a4d0 4681#endif
c5abe7c0 4682#ifdef CONFIG_BE2NET_VXLAN
c9c47142
SP
4683 .ndo_add_vxlan_port = be_add_vxlan_port,
4684 .ndo_del_vxlan_port = be_del_vxlan_port,
5f35227e 4685 .ndo_features_check = be_features_check,
c5abe7c0 4686#endif
6b7c5b94
SP
4687};
4688
4689static void be_netdev_init(struct net_device *netdev)
4690{
4691 struct be_adapter *adapter = netdev_priv(netdev);
4692
6332c8d3 4693 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
8b8ddc68 4694 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
f646968f 4695 NETIF_F_HW_VLAN_CTAG_TX;
8b8ddc68
MM
4696 if (be_multi_rxq(adapter))
4697 netdev->hw_features |= NETIF_F_RXHASH;
6332c8d3
MM
4698
4699 netdev->features |= netdev->hw_features |
f646968f 4700 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER;
4b972914 4701
eb8a50d9 4702 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
79032644 4703 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
51c59870 4704
fbc13f01
AK
4705 netdev->priv_flags |= IFF_UNICAST_FLT;
4706
6b7c5b94
SP
4707 netdev->flags |= IFF_MULTICAST;
4708
b7e5887e 4709 netif_set_gso_max_size(netdev, 65535 - ETH_HLEN);
c190e3c8 4710
10ef9ab4 4711 netdev->netdev_ops = &be_netdev_ops;
6b7c5b94 4712
7ad24ea4 4713 netdev->ethtool_ops = &be_ethtool_ops;
6b7c5b94
SP
4714}
4715
4716static void be_unmap_pci_bars(struct be_adapter *adapter)
4717{
c5b3ad4c
SP
4718 if (adapter->csr)
4719 pci_iounmap(adapter->pdev, adapter->csr);
8788fdc2 4720 if (adapter->db)
ce66f781 4721 pci_iounmap(adapter->pdev, adapter->db);
045508a8
PP
4722}
4723
ce66f781
SP
4724static int db_bar(struct be_adapter *adapter)
4725{
4726 if (lancer_chip(adapter) || !be_physfn(adapter))
4727 return 0;
4728 else
4729 return 4;
4730}
4731
4732static int be_roce_map_pci_bars(struct be_adapter *adapter)
045508a8 4733{
dbf0f2a7 4734 if (skyhawk_chip(adapter)) {
ce66f781
SP
4735 adapter->roce_db.size = 4096;
4736 adapter->roce_db.io_addr = pci_resource_start(adapter->pdev,
4737 db_bar(adapter));
4738 adapter->roce_db.total_size = pci_resource_len(adapter->pdev,
4739 db_bar(adapter));
4740 }
045508a8 4741 return 0;
6b7c5b94
SP
4742}
4743
4744static int be_map_pci_bars(struct be_adapter *adapter)
4745{
4746 u8 __iomem *addr;
fe6d2a38 4747
c5b3ad4c
SP
4748 if (BEx_chip(adapter) && be_physfn(adapter)) {
4749 adapter->csr = pci_iomap(adapter->pdev, 2, 0);
ddf1169f 4750 if (!adapter->csr)
c5b3ad4c
SP
4751 return -ENOMEM;
4752 }
4753
ce66f781 4754 addr = pci_iomap(adapter->pdev, db_bar(adapter), 0);
ddf1169f 4755 if (!addr)
6b7c5b94 4756 goto pci_map_err;
ba343c77 4757 adapter->db = addr;
ce66f781
SP
4758
4759 be_roce_map_pci_bars(adapter);
6b7c5b94 4760 return 0;
ce66f781 4761
6b7c5b94 4762pci_map_err:
acbafeb1 4763 dev_err(&adapter->pdev->dev, "Error in mapping PCI BARs\n");
6b7c5b94
SP
4764 be_unmap_pci_bars(adapter);
4765 return -ENOMEM;
4766}
4767
6b7c5b94
SP
4768static void be_ctrl_cleanup(struct be_adapter *adapter)
4769{
8788fdc2 4770 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
6b7c5b94
SP
4771
4772 be_unmap_pci_bars(adapter);
4773
4774 if (mem->va)
2b7bcebf
IV
4775 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
4776 mem->dma);
e7b909a6 4777
5b8821b7 4778 mem = &adapter->rx_filter;
e7b909a6 4779 if (mem->va)
2b7bcebf
IV
4780 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
4781 mem->dma);
6b7c5b94
SP
4782}
4783
6b7c5b94
SP
4784static int be_ctrl_init(struct be_adapter *adapter)
4785{
8788fdc2
SP
4786 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
4787 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
5b8821b7 4788 struct be_dma_mem *rx_filter = &adapter->rx_filter;
ce66f781 4789 u32 sli_intf;
6b7c5b94 4790 int status;
6b7c5b94 4791
ce66f781
SP
4792 pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
4793 adapter->sli_family = (sli_intf & SLI_INTF_FAMILY_MASK) >>
4794 SLI_INTF_FAMILY_SHIFT;
4795 adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
4796
6b7c5b94
SP
4797 status = be_map_pci_bars(adapter);
4798 if (status)
e7b909a6 4799 goto done;
6b7c5b94
SP
4800
4801 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
2b7bcebf
IV
4802 mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
4803 mbox_mem_alloc->size,
4804 &mbox_mem_alloc->dma,
4805 GFP_KERNEL);
6b7c5b94 4806 if (!mbox_mem_alloc->va) {
e7b909a6
SP
4807 status = -ENOMEM;
4808 goto unmap_pci_bars;
6b7c5b94
SP
4809 }
4810 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
4811 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
4812 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
4813 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
e7b909a6 4814
5b8821b7 4815 rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
ede23fa8
JP
4816 rx_filter->va = dma_zalloc_coherent(&adapter->pdev->dev,
4817 rx_filter->size, &rx_filter->dma,
4818 GFP_KERNEL);
ddf1169f 4819 if (!rx_filter->va) {
e7b909a6
SP
4820 status = -ENOMEM;
4821 goto free_mbox;
4822 }
1f9061d2 4823
2984961c 4824 mutex_init(&adapter->mbox_lock);
8788fdc2
SP
4825 spin_lock_init(&adapter->mcc_lock);
4826 spin_lock_init(&adapter->mcc_cq_lock);
a8f447bd 4827
5eeff635 4828 init_completion(&adapter->et_cmd_compl);
cf588477 4829 pci_save_state(adapter->pdev);
6b7c5b94 4830 return 0;
e7b909a6
SP
4831
4832free_mbox:
2b7bcebf
IV
4833 dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
4834 mbox_mem_alloc->va, mbox_mem_alloc->dma);
e7b909a6
SP
4835
4836unmap_pci_bars:
4837 be_unmap_pci_bars(adapter);
4838
4839done:
4840 return status;
6b7c5b94
SP
4841}
4842
4843static void be_stats_cleanup(struct be_adapter *adapter)
4844{
3abcdeda 4845 struct be_dma_mem *cmd = &adapter->stats_cmd;
6b7c5b94
SP
4846
4847 if (cmd->va)
2b7bcebf
IV
4848 dma_free_coherent(&adapter->pdev->dev, cmd->size,
4849 cmd->va, cmd->dma);
6b7c5b94
SP
4850}
4851
4852static int be_stats_init(struct be_adapter *adapter)
4853{
3abcdeda 4854 struct be_dma_mem *cmd = &adapter->stats_cmd;
6b7c5b94 4855
ca34fe38
SP
4856 if (lancer_chip(adapter))
4857 cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
4858 else if (BE2_chip(adapter))
89a88ab8 4859 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
61000861 4860 else if (BE3_chip(adapter))
ca34fe38 4861 cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
61000861
AK
4862 else
4863 /* ALL non-BE ASICs */
4864 cmd->size = sizeof(struct be_cmd_req_get_stats_v2);
ca34fe38 4865
ede23fa8
JP
4866 cmd->va = dma_zalloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
4867 GFP_KERNEL);
ddf1169f 4868 if (!cmd->va)
6b568689 4869 return -ENOMEM;
6b7c5b94
SP
4870 return 0;
4871}
4872
3bc6b06c 4873static void be_remove(struct pci_dev *pdev)
6b7c5b94
SP
4874{
4875 struct be_adapter *adapter = pci_get_drvdata(pdev);
8d56ff11 4876
6b7c5b94
SP
4877 if (!adapter)
4878 return;
4879
045508a8 4880 be_roce_dev_remove(adapter);
8cef7a78 4881 be_intr_set(adapter, false);
045508a8 4882
f67ef7ba
PR
4883 cancel_delayed_work_sync(&adapter->func_recovery_work);
4884
6b7c5b94
SP
4885 unregister_netdev(adapter->netdev);
4886
5fb379ee
SP
4887 be_clear(adapter);
4888
bf99e50d
PR
4889 /* tell fw we're done with firing cmds */
4890 be_cmd_fw_clean(adapter);
4891
6b7c5b94
SP
4892 be_stats_cleanup(adapter);
4893
4894 be_ctrl_cleanup(adapter);
4895
d6b6d987
SP
4896 pci_disable_pcie_error_reporting(pdev);
4897
6b7c5b94
SP
4898 pci_release_regions(pdev);
4899 pci_disable_device(pdev);
4900
4901 free_netdev(adapter->netdev);
4902}
4903
39f1d94d 4904static int be_get_initial_config(struct be_adapter *adapter)
6b7c5b94 4905{
baaa08d1 4906 int status, level;
6b7c5b94 4907
9e1453c5
AK
4908 status = be_cmd_get_cntl_attributes(adapter);
4909 if (status)
4910 return status;
4911
7aeb2156
PR
4912 /* Must be a power of 2 or else MODULO will BUG_ON */
4913 adapter->be_get_temp_freq = 64;
4914
baaa08d1
VV
4915 if (BEx_chip(adapter)) {
4916 level = be_cmd_get_fw_log_level(adapter);
4917 adapter->msg_enable =
4918 level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0;
4919 }
941a77d5 4920
92bf14ab 4921 adapter->cfg_num_qs = netif_get_num_default_rss_queues();
2243e2e9 4922 return 0;
6b7c5b94
SP
4923}
4924
f67ef7ba 4925static int lancer_recover_func(struct be_adapter *adapter)
d8110f62 4926{
01e5b2c4 4927 struct device *dev = &adapter->pdev->dev;
d8110f62 4928 int status;
d8110f62 4929
f67ef7ba
PR
4930 status = lancer_test_and_set_rdy_state(adapter);
4931 if (status)
4932 goto err;
d8110f62 4933
f67ef7ba
PR
4934 if (netif_running(adapter->netdev))
4935 be_close(adapter->netdev);
d8110f62 4936
f67ef7ba
PR
4937 be_clear(adapter);
4938
01e5b2c4 4939 be_clear_all_error(adapter);
f67ef7ba
PR
4940
4941 status = be_setup(adapter);
4942 if (status)
4943 goto err;
d8110f62 4944
f67ef7ba
PR
4945 if (netif_running(adapter->netdev)) {
4946 status = be_open(adapter->netdev);
d8110f62
PR
4947 if (status)
4948 goto err;
f67ef7ba 4949 }
d8110f62 4950
4bebb56a 4951 dev_err(dev, "Adapter recovery successful\n");
f67ef7ba
PR
4952 return 0;
4953err:
01e5b2c4
SK
4954 if (status == -EAGAIN)
4955 dev_err(dev, "Waiting for resource provisioning\n");
4956 else
4bebb56a 4957 dev_err(dev, "Adapter recovery failed\n");
d8110f62 4958
f67ef7ba
PR
4959 return status;
4960}
4961
4962static void be_func_recovery_task(struct work_struct *work)
4963{
4964 struct be_adapter *adapter =
4965 container_of(work, struct be_adapter, func_recovery_work.work);
01e5b2c4 4966 int status = 0;
d8110f62 4967
f67ef7ba 4968 be_detect_error(adapter);
d8110f62 4969
f67ef7ba 4970 if (adapter->hw_error && lancer_chip(adapter)) {
f67ef7ba
PR
4971 rtnl_lock();
4972 netif_device_detach(adapter->netdev);
4973 rtnl_unlock();
d8110f62 4974
f67ef7ba 4975 status = lancer_recover_func(adapter);
f67ef7ba
PR
4976 if (!status)
4977 netif_device_attach(adapter->netdev);
d8110f62 4978 }
f67ef7ba 4979
01e5b2c4
SK
4980 /* In Lancer, for all errors other than provisioning error (-EAGAIN),
4981 * no need to attempt further recovery.
4982 */
4983 if (!status || status == -EAGAIN)
4984 schedule_delayed_work(&adapter->func_recovery_work,
4985 msecs_to_jiffies(1000));
d8110f62
PR
4986}
4987
4988static void be_worker(struct work_struct *work)
4989{
4990 struct be_adapter *adapter =
4991 container_of(work, struct be_adapter, work.work);
4992 struct be_rx_obj *rxo;
4993 int i;
4994
d8110f62
PR
4995 /* when interrupts are not yet enabled, just reap any pending
4996 * mcc completions */
4997 if (!netif_running(adapter->netdev)) {
072a9c48 4998 local_bh_disable();
10ef9ab4 4999 be_process_mcc(adapter);
072a9c48 5000 local_bh_enable();
d8110f62
PR
5001 goto reschedule;
5002 }
5003
5004 if (!adapter->stats_cmd_sent) {
5005 if (lancer_chip(adapter))
5006 lancer_cmd_get_pport_stats(adapter,
cd3307aa 5007 &adapter->stats_cmd);
d8110f62
PR
5008 else
5009 be_cmd_get_stats(adapter, &adapter->stats_cmd);
5010 }
5011
d696b5e2
VV
5012 if (be_physfn(adapter) &&
5013 MODULO(adapter->work_counter, adapter->be_get_temp_freq) == 0)
7aeb2156
PR
5014 be_cmd_get_die_temperature(adapter);
5015
d8110f62 5016 for_all_rx_queues(adapter, rxo, i) {
6384a4d0
SP
5017 /* Replenish RX-queues starved due to memory
5018 * allocation failures.
5019 */
5020 if (rxo->rx_post_starved)
c30d7266 5021 be_post_rx_frags(rxo, GFP_KERNEL, MAX_RX_POST);
d8110f62
PR
5022 }
5023
2632bafd 5024 be_eqd_update(adapter);
10ef9ab4 5025
d8110f62
PR
5026reschedule:
5027 adapter->work_counter++;
5028 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
5029}
5030
257a3feb 5031/* If any VFs are already enabled don't FLR the PF */
39f1d94d
SP
5032static bool be_reset_required(struct be_adapter *adapter)
5033{
257a3feb 5034 return pci_num_vf(adapter->pdev) ? false : true;
39f1d94d
SP
5035}
5036
d379142b
SP
5037static char *mc_name(struct be_adapter *adapter)
5038{
f93f160b
VV
5039 char *str = ""; /* default */
5040
5041 switch (adapter->mc_type) {
5042 case UMC:
5043 str = "UMC";
5044 break;
5045 case FLEX10:
5046 str = "FLEX10";
5047 break;
5048 case vNIC1:
5049 str = "vNIC-1";
5050 break;
5051 case nPAR:
5052 str = "nPAR";
5053 break;
5054 case UFP:
5055 str = "UFP";
5056 break;
5057 case vNIC2:
5058 str = "vNIC-2";
5059 break;
5060 default:
5061 str = "";
5062 }
5063
5064 return str;
d379142b
SP
5065}
5066
5067static inline char *func_name(struct be_adapter *adapter)
5068{
5069 return be_physfn(adapter) ? "PF" : "VF";
5070}
5071
f7062ee5
SP
5072static inline char *nic_name(struct pci_dev *pdev)
5073{
5074 switch (pdev->device) {
5075 case OC_DEVICE_ID1:
5076 return OC_NAME;
5077 case OC_DEVICE_ID2:
5078 return OC_NAME_BE;
5079 case OC_DEVICE_ID3:
5080 case OC_DEVICE_ID4:
5081 return OC_NAME_LANCER;
5082 case BE_DEVICE_ID2:
5083 return BE3_NAME;
5084 case OC_DEVICE_ID5:
5085 case OC_DEVICE_ID6:
5086 return OC_NAME_SH;
5087 default:
5088 return BE_NAME;
5089 }
5090}
5091
1dd06ae8 5092static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id)
6b7c5b94
SP
5093{
5094 int status = 0;
5095 struct be_adapter *adapter;
5096 struct net_device *netdev;
b4e32a71 5097 char port_name;
6b7c5b94 5098
acbafeb1
SP
5099 dev_info(&pdev->dev, "%s version is %s\n", DRV_NAME, DRV_VER);
5100
6b7c5b94
SP
5101 status = pci_enable_device(pdev);
5102 if (status)
5103 goto do_none;
5104
5105 status = pci_request_regions(pdev, DRV_NAME);
5106 if (status)
5107 goto disable_dev;
5108 pci_set_master(pdev);
5109
7f640062 5110 netdev = alloc_etherdev_mqs(sizeof(*adapter), MAX_TX_QS, MAX_RX_QS);
ddf1169f 5111 if (!netdev) {
6b7c5b94
SP
5112 status = -ENOMEM;
5113 goto rel_reg;
5114 }
5115 adapter = netdev_priv(netdev);
5116 adapter->pdev = pdev;
5117 pci_set_drvdata(pdev, adapter);
5118 adapter->netdev = netdev;
2243e2e9 5119 SET_NETDEV_DEV(netdev, &pdev->dev);
6b7c5b94 5120
4c15c243 5121 status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
6b7c5b94
SP
5122 if (!status) {
5123 netdev->features |= NETIF_F_HIGHDMA;
5124 } else {
4c15c243 5125 status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
6b7c5b94
SP
5126 if (status) {
5127 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
5128 goto free_netdev;
5129 }
5130 }
5131
2f951a9a
KA
5132 status = pci_enable_pcie_error_reporting(pdev);
5133 if (!status)
5134 dev_info(&pdev->dev, "PCIe error reporting enabled\n");
d6b6d987 5135
6b7c5b94
SP
5136 status = be_ctrl_init(adapter);
5137 if (status)
39f1d94d 5138 goto free_netdev;
6b7c5b94 5139
2243e2e9 5140 /* sync up with fw's ready state */
ba343c77 5141 if (be_physfn(adapter)) {
bf99e50d 5142 status = be_fw_wait_ready(adapter);
ba343c77
SB
5143 if (status)
5144 goto ctrl_clean;
ba343c77 5145 }
6b7c5b94 5146
39f1d94d
SP
5147 if (be_reset_required(adapter)) {
5148 status = be_cmd_reset_function(adapter);
5149 if (status)
5150 goto ctrl_clean;
556ae191 5151
2d177be8
KA
5152 /* Wait for interrupts to quiesce after an FLR */
5153 msleep(100);
5154 }
8cef7a78
SK
5155
5156 /* Allow interrupts for other ULPs running on NIC function */
5157 be_intr_set(adapter, true);
10ef9ab4 5158
2d177be8
KA
5159 /* tell fw we're ready to fire cmds */
5160 status = be_cmd_fw_init(adapter);
5161 if (status)
5162 goto ctrl_clean;
5163
2243e2e9
SP
5164 status = be_stats_init(adapter);
5165 if (status)
5166 goto ctrl_clean;
5167
39f1d94d 5168 status = be_get_initial_config(adapter);
6b7c5b94
SP
5169 if (status)
5170 goto stats_clean;
6b7c5b94
SP
5171
5172 INIT_DELAYED_WORK(&adapter->work, be_worker);
f67ef7ba 5173 INIT_DELAYED_WORK(&adapter->func_recovery_work, be_func_recovery_task);
5f820b6c
KA
5174 adapter->rx_fc = true;
5175 adapter->tx_fc = true;
6b7c5b94 5176
5fb379ee
SP
5177 status = be_setup(adapter);
5178 if (status)
55f5c3c5 5179 goto stats_clean;
2243e2e9 5180
3abcdeda 5181 be_netdev_init(netdev);
6b7c5b94
SP
5182 status = register_netdev(netdev);
5183 if (status != 0)
5fb379ee 5184 goto unsetup;
6b7c5b94 5185
045508a8
PP
5186 be_roce_dev_add(adapter);
5187
f67ef7ba
PR
5188 schedule_delayed_work(&adapter->func_recovery_work,
5189 msecs_to_jiffies(1000));
b4e32a71
PR
5190
5191 be_cmd_query_port_name(adapter, &port_name);
5192
d379142b
SP
5193 dev_info(&pdev->dev, "%s: %s %s port %c\n", nic_name(pdev),
5194 func_name(adapter), mc_name(adapter), port_name);
34b1ef04 5195
6b7c5b94
SP
5196 return 0;
5197
5fb379ee
SP
5198unsetup:
5199 be_clear(adapter);
6b7c5b94
SP
5200stats_clean:
5201 be_stats_cleanup(adapter);
5202ctrl_clean:
5203 be_ctrl_cleanup(adapter);
f9449ab7 5204free_netdev:
fe6d2a38 5205 free_netdev(netdev);
6b7c5b94
SP
5206rel_reg:
5207 pci_release_regions(pdev);
5208disable_dev:
5209 pci_disable_device(pdev);
5210do_none:
c4ca2374 5211 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
6b7c5b94
SP
5212 return status;
5213}
5214
5215static int be_suspend(struct pci_dev *pdev, pm_message_t state)
5216{
5217 struct be_adapter *adapter = pci_get_drvdata(pdev);
5218 struct net_device *netdev = adapter->netdev;
5219
76a9e08e 5220 if (adapter->wol_en)
71d8d1b5
AK
5221 be_setup_wol(adapter, true);
5222
d4360d6f 5223 be_intr_set(adapter, false);
f67ef7ba
PR
5224 cancel_delayed_work_sync(&adapter->func_recovery_work);
5225
6b7c5b94
SP
5226 netif_device_detach(netdev);
5227 if (netif_running(netdev)) {
5228 rtnl_lock();
5229 be_close(netdev);
5230 rtnl_unlock();
5231 }
9b0365f1 5232 be_clear(adapter);
6b7c5b94
SP
5233
5234 pci_save_state(pdev);
5235 pci_disable_device(pdev);
5236 pci_set_power_state(pdev, pci_choose_state(pdev, state));
5237 return 0;
5238}
5239
5240static int be_resume(struct pci_dev *pdev)
5241{
5242 int status = 0;
5243 struct be_adapter *adapter = pci_get_drvdata(pdev);
5244 struct net_device *netdev = adapter->netdev;
5245
5246 netif_device_detach(netdev);
5247
5248 status = pci_enable_device(pdev);
5249 if (status)
5250 return status;
5251
1ca01512 5252 pci_set_power_state(pdev, PCI_D0);
6b7c5b94
SP
5253 pci_restore_state(pdev);
5254
dd5746bf
SB
5255 status = be_fw_wait_ready(adapter);
5256 if (status)
5257 return status;
5258
9a6d73d9
KA
5259 status = be_cmd_reset_function(adapter);
5260 if (status)
5261 return status;
5262
d4360d6f 5263 be_intr_set(adapter, true);
2243e2e9
SP
5264 /* tell fw we're ready to fire cmds */
5265 status = be_cmd_fw_init(adapter);
5266 if (status)
5267 return status;
5268
9b0365f1 5269 be_setup(adapter);
6b7c5b94
SP
5270 if (netif_running(netdev)) {
5271 rtnl_lock();
5272 be_open(netdev);
5273 rtnl_unlock();
5274 }
f67ef7ba
PR
5275
5276 schedule_delayed_work(&adapter->func_recovery_work,
5277 msecs_to_jiffies(1000));
6b7c5b94 5278 netif_device_attach(netdev);
71d8d1b5 5279
76a9e08e 5280 if (adapter->wol_en)
71d8d1b5 5281 be_setup_wol(adapter, false);
a4ca055f 5282
6b7c5b94
SP
5283 return 0;
5284}
5285
82456b03
SP
5286/*
5287 * An FLR will stop BE from DMAing any data.
5288 */
5289static void be_shutdown(struct pci_dev *pdev)
5290{
5291 struct be_adapter *adapter = pci_get_drvdata(pdev);
82456b03 5292
2d5d4154
AK
5293 if (!adapter)
5294 return;
82456b03 5295
d114f99a 5296 be_roce_dev_shutdown(adapter);
0f4a6828 5297 cancel_delayed_work_sync(&adapter->work);
f67ef7ba 5298 cancel_delayed_work_sync(&adapter->func_recovery_work);
a4ca055f 5299
2d5d4154 5300 netif_device_detach(adapter->netdev);
82456b03 5301
57841869
AK
5302 be_cmd_reset_function(adapter);
5303
82456b03 5304 pci_disable_device(pdev);
82456b03
SP
5305}
5306
cf588477 5307static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
748b539a 5308 pci_channel_state_t state)
cf588477
SP
5309{
5310 struct be_adapter *adapter = pci_get_drvdata(pdev);
5311 struct net_device *netdev = adapter->netdev;
5312
5313 dev_err(&adapter->pdev->dev, "EEH error detected\n");
5314
01e5b2c4
SK
5315 if (!adapter->eeh_error) {
5316 adapter->eeh_error = true;
cf588477 5317
01e5b2c4 5318 cancel_delayed_work_sync(&adapter->func_recovery_work);
cf588477 5319
cf588477 5320 rtnl_lock();
01e5b2c4
SK
5321 netif_device_detach(netdev);
5322 if (netif_running(netdev))
5323 be_close(netdev);
cf588477 5324 rtnl_unlock();
01e5b2c4
SK
5325
5326 be_clear(adapter);
cf588477 5327 }
cf588477
SP
5328
5329 if (state == pci_channel_io_perm_failure)
5330 return PCI_ERS_RESULT_DISCONNECT;
5331
5332 pci_disable_device(pdev);
5333
eeb7fc7b
SK
5334 /* The error could cause the FW to trigger a flash debug dump.
5335 * Resetting the card while flash dump is in progress
c8a54163
PR
5336 * can cause it not to recover; wait for it to finish.
5337 * Wait only for first function as it is needed only once per
5338 * adapter.
eeb7fc7b 5339 */
c8a54163
PR
5340 if (pdev->devfn == 0)
5341 ssleep(30);
5342
cf588477
SP
5343 return PCI_ERS_RESULT_NEED_RESET;
5344}
5345
5346static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
5347{
5348 struct be_adapter *adapter = pci_get_drvdata(pdev);
5349 int status;
5350
5351 dev_info(&adapter->pdev->dev, "EEH reset\n");
cf588477
SP
5352
5353 status = pci_enable_device(pdev);
5354 if (status)
5355 return PCI_ERS_RESULT_DISCONNECT;
5356
5357 pci_set_master(pdev);
1ca01512 5358 pci_set_power_state(pdev, PCI_D0);
cf588477
SP
5359 pci_restore_state(pdev);
5360
5361 /* Check if card is ok and fw is ready */
c5b3ad4c
SP
5362 dev_info(&adapter->pdev->dev,
5363 "Waiting for FW to be ready after EEH reset\n");
bf99e50d 5364 status = be_fw_wait_ready(adapter);
cf588477
SP
5365 if (status)
5366 return PCI_ERS_RESULT_DISCONNECT;
5367
d6b6d987 5368 pci_cleanup_aer_uncorrect_error_status(pdev);
01e5b2c4 5369 be_clear_all_error(adapter);
cf588477
SP
5370 return PCI_ERS_RESULT_RECOVERED;
5371}
5372
5373static void be_eeh_resume(struct pci_dev *pdev)
5374{
5375 int status = 0;
5376 struct be_adapter *adapter = pci_get_drvdata(pdev);
5377 struct net_device *netdev = adapter->netdev;
5378
5379 dev_info(&adapter->pdev->dev, "EEH resume\n");
5380
5381 pci_save_state(pdev);
5382
2d177be8 5383 status = be_cmd_reset_function(adapter);
cf588477
SP
5384 if (status)
5385 goto err;
5386
03a58baa
KA
5387 /* On some BE3 FW versions, after a HW reset,
5388 * interrupts will remain disabled for each function.
5389 * So, explicitly enable interrupts
5390 */
5391 be_intr_set(adapter, true);
5392
2d177be8
KA
5393 /* tell fw we're ready to fire cmds */
5394 status = be_cmd_fw_init(adapter);
bf99e50d
PR
5395 if (status)
5396 goto err;
5397
cf588477
SP
5398 status = be_setup(adapter);
5399 if (status)
5400 goto err;
5401
5402 if (netif_running(netdev)) {
5403 status = be_open(netdev);
5404 if (status)
5405 goto err;
5406 }
f67ef7ba
PR
5407
5408 schedule_delayed_work(&adapter->func_recovery_work,
5409 msecs_to_jiffies(1000));
cf588477
SP
5410 netif_device_attach(netdev);
5411 return;
5412err:
5413 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
cf588477
SP
5414}
5415
3646f0e5 5416static const struct pci_error_handlers be_eeh_handlers = {
cf588477
SP
5417 .error_detected = be_eeh_err_detected,
5418 .slot_reset = be_eeh_reset,
5419 .resume = be_eeh_resume,
5420};
5421
6b7c5b94
SP
5422static struct pci_driver be_driver = {
5423 .name = DRV_NAME,
5424 .id_table = be_dev_ids,
5425 .probe = be_probe,
5426 .remove = be_remove,
5427 .suspend = be_suspend,
cf588477 5428 .resume = be_resume,
82456b03 5429 .shutdown = be_shutdown,
cf588477 5430 .err_handler = &be_eeh_handlers
6b7c5b94
SP
5431};
5432
5433static int __init be_init_module(void)
5434{
8e95a202
JP
5435 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
5436 rx_frag_size != 2048) {
6b7c5b94
SP
5437 printk(KERN_WARNING DRV_NAME
5438 " : Module param rx_frag_size must be 2048/4096/8192."
5439 " Using 2048\n");
5440 rx_frag_size = 2048;
5441 }
6b7c5b94
SP
5442
5443 return pci_register_driver(&be_driver);
5444}
5445module_init(be_init_module);
5446
5447static void __exit be_exit_module(void)
5448{
5449 pci_unregister_driver(&be_driver);
5450}
5451module_exit(be_exit_module);