be2net: refactor be_set_rx_mode() and be_vid_config() for readability
[linux-2.6-block.git] / drivers / net / ethernet / emulex / benet / be_main.c
CommitLineData
6b7c5b94 1/*
40263820 2 * Copyright (C) 2005 - 2014 Emulex
6b7c5b94
SP
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
d2145cde 11 * linux-drivers@emulex.com
6b7c5b94 12 *
d2145cde
AK
13 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
6b7c5b94
SP
16 */
17
70c71606 18#include <linux/prefetch.h>
9d9779e7 19#include <linux/module.h>
6b7c5b94 20#include "be.h"
8788fdc2 21#include "be_cmds.h"
65f71b8b 22#include <asm/div64.h>
d6b6d987 23#include <linux/aer.h>
a77dcb8c 24#include <linux/if_bridge.h>
6384a4d0 25#include <net/busy_poll.h>
c9c47142 26#include <net/vxlan.h>
6b7c5b94
SP
27
28MODULE_VERSION(DRV_VER);
29MODULE_DEVICE_TABLE(pci, be_dev_ids);
30MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
00d3d51e 31MODULE_AUTHOR("Emulex Corporation");
6b7c5b94
SP
32MODULE_LICENSE("GPL");
33
ba343c77 34static unsigned int num_vfs;
ba343c77 35module_param(num_vfs, uint, S_IRUGO);
ba343c77 36MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
6b7c5b94 37
11ac75ed
SP
38static ushort rx_frag_size = 2048;
39module_param(rx_frag_size, ushort, S_IRUGO);
40MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
41
9baa3c34 42static const struct pci_device_id be_dev_ids[] = {
c4ca2374 43 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
59fd5d87 44 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
c4ca2374
AK
45 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
46 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
fe6d2a38 47 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
12f4d0a8 48 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
ecedb6ae 49 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
76b73530 50 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID6)},
6b7c5b94
SP
51 { 0 }
52};
53MODULE_DEVICE_TABLE(pci, be_dev_ids);
7c185276 54/* UE Status Low CSR */
42c8b11e 55static const char * const ue_status_low_desc[] = {
7c185276
AK
56 "CEV",
57 "CTX",
58 "DBUF",
59 "ERX",
60 "Host",
61 "MPU",
62 "NDMA",
63 "PTC ",
64 "RDMA ",
65 "RXF ",
66 "RXIPS ",
67 "RXULP0 ",
68 "RXULP1 ",
69 "RXULP2 ",
70 "TIM ",
71 "TPOST ",
72 "TPRE ",
73 "TXIPS ",
74 "TXULP0 ",
75 "TXULP1 ",
76 "UC ",
77 "WDMA ",
78 "TXULP2 ",
79 "HOST1 ",
80 "P0_OB_LINK ",
81 "P1_OB_LINK ",
82 "HOST_GPIO ",
83 "MBOX ",
6bdf8f55
VV
84 "ERX2 ",
85 "SPARE ",
86 "JTAG ",
87 "MPU_INTPEND "
7c185276 88};
e2fb1afa 89
7c185276 90/* UE Status High CSR */
42c8b11e 91static const char * const ue_status_hi_desc[] = {
7c185276
AK
92 "LPCMEMHOST",
93 "MGMT_MAC",
94 "PCS0ONLINE",
95 "MPU_IRAM",
96 "PCS1ONLINE",
97 "PCTL0",
98 "PCTL1",
99 "PMEM",
100 "RR",
101 "TXPB",
102 "RXPP",
103 "XAUI",
104 "TXP",
105 "ARM",
106 "IPC",
107 "HOST2",
108 "HOST3",
109 "HOST4",
110 "HOST5",
111 "HOST6",
112 "HOST7",
6bdf8f55
VV
113 "ECRC",
114 "Poison TLP",
42c8b11e 115 "NETC",
6bdf8f55
VV
116 "PERIPH",
117 "LLTXULP",
118 "D2P",
119 "RCON",
120 "LDMA",
121 "LLTXP",
122 "LLTXPB",
7c185276
AK
123 "Unknown"
124};
6b7c5b94
SP
125
126static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
127{
128 struct be_dma_mem *mem = &q->dma_mem;
03d28ffe 129
1cfafab9 130 if (mem->va) {
2b7bcebf
IV
131 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
132 mem->dma);
1cfafab9
SP
133 mem->va = NULL;
134 }
6b7c5b94
SP
135}
136
137static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
748b539a 138 u16 len, u16 entry_size)
6b7c5b94
SP
139{
140 struct be_dma_mem *mem = &q->dma_mem;
141
142 memset(q, 0, sizeof(*q));
143 q->len = len;
144 q->entry_size = entry_size;
145 mem->size = len * entry_size;
ede23fa8
JP
146 mem->va = dma_zalloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
147 GFP_KERNEL);
6b7c5b94 148 if (!mem->va)
10ef9ab4 149 return -ENOMEM;
6b7c5b94
SP
150 return 0;
151}
152
68c45a2d 153static void be_reg_intr_set(struct be_adapter *adapter, bool enable)
6b7c5b94 154{
db3ea781 155 u32 reg, enabled;
5f0b849e 156
db3ea781 157 pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
748b539a 158 &reg);
db3ea781
SP
159 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
160
5f0b849e 161 if (!enabled && enable)
6b7c5b94 162 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
5f0b849e 163 else if (enabled && !enable)
6b7c5b94 164 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
5f0b849e 165 else
6b7c5b94 166 return;
5f0b849e 167
db3ea781 168 pci_write_config_dword(adapter->pdev,
748b539a 169 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
6b7c5b94
SP
170}
171
68c45a2d
SK
172static void be_intr_set(struct be_adapter *adapter, bool enable)
173{
174 int status = 0;
175
176 /* On lancer interrupts can't be controlled via this register */
177 if (lancer_chip(adapter))
178 return;
179
180 if (adapter->eeh_error)
181 return;
182
183 status = be_cmd_intr_set(adapter, enable);
184 if (status)
185 be_reg_intr_set(adapter, enable);
186}
187
8788fdc2 188static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
6b7c5b94
SP
189{
190 u32 val = 0;
03d28ffe 191
6b7c5b94
SP
192 val |= qid & DB_RQ_RING_ID_MASK;
193 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
f3eb62d2
SP
194
195 wmb();
8788fdc2 196 iowrite32(val, adapter->db + DB_RQ_OFFSET);
6b7c5b94
SP
197}
198
94d73aaa
VV
199static void be_txq_notify(struct be_adapter *adapter, struct be_tx_obj *txo,
200 u16 posted)
6b7c5b94
SP
201{
202 u32 val = 0;
03d28ffe 203
94d73aaa 204 val |= txo->q.id & DB_TXULP_RING_ID_MASK;
6b7c5b94 205 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
f3eb62d2
SP
206
207 wmb();
94d73aaa 208 iowrite32(val, adapter->db + txo->db_offset);
6b7c5b94
SP
209}
210
8788fdc2 211static void be_eq_notify(struct be_adapter *adapter, u16 qid,
748b539a 212 bool arm, bool clear_int, u16 num_popped)
6b7c5b94
SP
213{
214 u32 val = 0;
03d28ffe 215
6b7c5b94 216 val |= qid & DB_EQ_RING_ID_MASK;
748b539a 217 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) << DB_EQ_RING_ID_EXT_MASK_SHIFT);
cf588477 218
f67ef7ba 219 if (adapter->eeh_error)
cf588477
SP
220 return;
221
6b7c5b94
SP
222 if (arm)
223 val |= 1 << DB_EQ_REARM_SHIFT;
224 if (clear_int)
225 val |= 1 << DB_EQ_CLR_SHIFT;
226 val |= 1 << DB_EQ_EVNT_SHIFT;
227 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
8788fdc2 228 iowrite32(val, adapter->db + DB_EQ_OFFSET);
6b7c5b94
SP
229}
230
8788fdc2 231void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
6b7c5b94
SP
232{
233 u32 val = 0;
03d28ffe 234
6b7c5b94 235 val |= qid & DB_CQ_RING_ID_MASK;
fe6d2a38
SP
236 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
237 DB_CQ_RING_ID_EXT_MASK_SHIFT);
cf588477 238
f67ef7ba 239 if (adapter->eeh_error)
cf588477
SP
240 return;
241
6b7c5b94
SP
242 if (arm)
243 val |= 1 << DB_CQ_REARM_SHIFT;
244 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
8788fdc2 245 iowrite32(val, adapter->db + DB_CQ_OFFSET);
6b7c5b94
SP
246}
247
6b7c5b94
SP
248static int be_mac_addr_set(struct net_device *netdev, void *p)
249{
250 struct be_adapter *adapter = netdev_priv(netdev);
5a712c13 251 struct device *dev = &adapter->pdev->dev;
6b7c5b94 252 struct sockaddr *addr = p;
5a712c13
SP
253 int status;
254 u8 mac[ETH_ALEN];
255 u32 old_pmac_id = adapter->pmac_id[0], curr_pmac_id = 0;
6b7c5b94 256
ca9e4988
AK
257 if (!is_valid_ether_addr(addr->sa_data))
258 return -EADDRNOTAVAIL;
259
ff32f8ab
VV
260 /* Proceed further only if, User provided MAC is different
261 * from active MAC
262 */
263 if (ether_addr_equal(addr->sa_data, netdev->dev_addr))
264 return 0;
265
5a712c13
SP
266 /* The PMAC_ADD cmd may fail if the VF doesn't have FILTMGMT
267 * privilege or if PF did not provision the new MAC address.
268 * On BE3, this cmd will always fail if the VF doesn't have the
269 * FILTMGMT privilege. This failure is OK, only if the PF programmed
270 * the MAC for the VF.
704e4c88 271 */
5a712c13
SP
272 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
273 adapter->if_handle, &adapter->pmac_id[0], 0);
274 if (!status) {
275 curr_pmac_id = adapter->pmac_id[0];
276
277 /* Delete the old programmed MAC. This call may fail if the
278 * old MAC was already deleted by the PF driver.
279 */
280 if (adapter->pmac_id[0] != old_pmac_id)
281 be_cmd_pmac_del(adapter, adapter->if_handle,
282 old_pmac_id, 0);
704e4c88
PR
283 }
284
5a712c13
SP
285 /* Decide if the new MAC is successfully activated only after
286 * querying the FW
704e4c88 287 */
b188f090
SR
288 status = be_cmd_get_active_mac(adapter, curr_pmac_id, mac,
289 adapter->if_handle, true, 0);
a65027e4 290 if (status)
e3a7ae2c 291 goto err;
6b7c5b94 292
5a712c13
SP
293 /* The MAC change did not happen, either due to lack of privilege
294 * or PF didn't pre-provision.
295 */
61d23e9f 296 if (!ether_addr_equal(addr->sa_data, mac)) {
5a712c13
SP
297 status = -EPERM;
298 goto err;
299 }
300
e3a7ae2c 301 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
5a712c13 302 dev_info(dev, "MAC address changed to %pM\n", mac);
e3a7ae2c
SK
303 return 0;
304err:
5a712c13 305 dev_warn(dev, "MAC address change to %pM failed\n", addr->sa_data);
6b7c5b94
SP
306 return status;
307}
308
ca34fe38
SP
309/* BE2 supports only v0 cmd */
310static void *hw_stats_from_cmd(struct be_adapter *adapter)
311{
312 if (BE2_chip(adapter)) {
313 struct be_cmd_resp_get_stats_v0 *cmd = adapter->stats_cmd.va;
314
315 return &cmd->hw_stats;
61000861 316 } else if (BE3_chip(adapter)) {
ca34fe38
SP
317 struct be_cmd_resp_get_stats_v1 *cmd = adapter->stats_cmd.va;
318
61000861
AK
319 return &cmd->hw_stats;
320 } else {
321 struct be_cmd_resp_get_stats_v2 *cmd = adapter->stats_cmd.va;
322
ca34fe38
SP
323 return &cmd->hw_stats;
324 }
325}
326
327/* BE2 supports only v0 cmd */
328static void *be_erx_stats_from_cmd(struct be_adapter *adapter)
329{
330 if (BE2_chip(adapter)) {
331 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
332
333 return &hw_stats->erx;
61000861 334 } else if (BE3_chip(adapter)) {
ca34fe38
SP
335 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
336
61000861
AK
337 return &hw_stats->erx;
338 } else {
339 struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
340
ca34fe38
SP
341 return &hw_stats->erx;
342 }
343}
344
345static void populate_be_v0_stats(struct be_adapter *adapter)
89a88ab8 346{
ac124ff9
SP
347 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
348 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
349 struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
89a88ab8 350 struct be_port_rxf_stats_v0 *port_stats =
ac124ff9
SP
351 &rxf_stats->port[adapter->port_num];
352 struct be_drv_stats *drvs = &adapter->drv_stats;
89a88ab8 353
ac124ff9 354 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
89a88ab8
AK
355 drvs->rx_pause_frames = port_stats->rx_pause_frames;
356 drvs->rx_crc_errors = port_stats->rx_crc_errors;
357 drvs->rx_control_frames = port_stats->rx_control_frames;
358 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
359 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
360 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
361 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
362 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
363 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
364 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
365 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
366 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
367 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
368 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
ac124ff9 369 drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
89a88ab8
AK
370 drvs->rx_dropped_header_too_small =
371 port_stats->rx_dropped_header_too_small;
18fb06a1
SR
372 drvs->rx_address_filtered =
373 port_stats->rx_address_filtered +
374 port_stats->rx_vlan_filtered;
89a88ab8
AK
375 drvs->rx_alignment_symbol_errors =
376 port_stats->rx_alignment_symbol_errors;
377
378 drvs->tx_pauseframes = port_stats->tx_pauseframes;
379 drvs->tx_controlframes = port_stats->tx_controlframes;
380
381 if (adapter->port_num)
ac124ff9 382 drvs->jabber_events = rxf_stats->port1_jabber_events;
89a88ab8 383 else
ac124ff9 384 drvs->jabber_events = rxf_stats->port0_jabber_events;
89a88ab8 385 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
89a88ab8 386 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
89a88ab8
AK
387 drvs->forwarded_packets = rxf_stats->forwarded_packets;
388 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
ac124ff9
SP
389 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
390 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
89a88ab8
AK
391 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
392}
393
ca34fe38 394static void populate_be_v1_stats(struct be_adapter *adapter)
89a88ab8 395{
ac124ff9
SP
396 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
397 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
398 struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
89a88ab8 399 struct be_port_rxf_stats_v1 *port_stats =
ac124ff9
SP
400 &rxf_stats->port[adapter->port_num];
401 struct be_drv_stats *drvs = &adapter->drv_stats;
89a88ab8 402
ac124ff9 403 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
02fe7027
AK
404 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
405 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
89a88ab8
AK
406 drvs->rx_pause_frames = port_stats->rx_pause_frames;
407 drvs->rx_crc_errors = port_stats->rx_crc_errors;
408 drvs->rx_control_frames = port_stats->rx_control_frames;
409 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
410 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
411 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
412 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
413 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
414 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
415 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
416 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
417 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
418 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
419 drvs->rx_dropped_header_too_small =
420 port_stats->rx_dropped_header_too_small;
421 drvs->rx_input_fifo_overflow_drop =
422 port_stats->rx_input_fifo_overflow_drop;
18fb06a1 423 drvs->rx_address_filtered = port_stats->rx_address_filtered;
89a88ab8
AK
424 drvs->rx_alignment_symbol_errors =
425 port_stats->rx_alignment_symbol_errors;
ac124ff9 426 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
89a88ab8
AK
427 drvs->tx_pauseframes = port_stats->tx_pauseframes;
428 drvs->tx_controlframes = port_stats->tx_controlframes;
b5adffc4 429 drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
89a88ab8
AK
430 drvs->jabber_events = port_stats->jabber_events;
431 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
89a88ab8 432 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
89a88ab8
AK
433 drvs->forwarded_packets = rxf_stats->forwarded_packets;
434 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
ac124ff9
SP
435 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
436 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
89a88ab8
AK
437 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
438}
439
61000861
AK
440static void populate_be_v2_stats(struct be_adapter *adapter)
441{
442 struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
443 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
444 struct be_rxf_stats_v2 *rxf_stats = &hw_stats->rxf;
445 struct be_port_rxf_stats_v2 *port_stats =
446 &rxf_stats->port[adapter->port_num];
447 struct be_drv_stats *drvs = &adapter->drv_stats;
448
449 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
450 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
451 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
452 drvs->rx_pause_frames = port_stats->rx_pause_frames;
453 drvs->rx_crc_errors = port_stats->rx_crc_errors;
454 drvs->rx_control_frames = port_stats->rx_control_frames;
455 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
456 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
457 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
458 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
459 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
460 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
461 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
462 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
463 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
464 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
465 drvs->rx_dropped_header_too_small =
466 port_stats->rx_dropped_header_too_small;
467 drvs->rx_input_fifo_overflow_drop =
468 port_stats->rx_input_fifo_overflow_drop;
469 drvs->rx_address_filtered = port_stats->rx_address_filtered;
470 drvs->rx_alignment_symbol_errors =
471 port_stats->rx_alignment_symbol_errors;
472 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
473 drvs->tx_pauseframes = port_stats->tx_pauseframes;
474 drvs->tx_controlframes = port_stats->tx_controlframes;
475 drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
476 drvs->jabber_events = port_stats->jabber_events;
477 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
478 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
479 drvs->forwarded_packets = rxf_stats->forwarded_packets;
480 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
481 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
482 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
483 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
748b539a 484 if (be_roce_supported(adapter)) {
461ae379
AK
485 drvs->rx_roce_bytes_lsd = port_stats->roce_bytes_received_lsd;
486 drvs->rx_roce_bytes_msd = port_stats->roce_bytes_received_msd;
487 drvs->rx_roce_frames = port_stats->roce_frames_received;
488 drvs->roce_drops_crc = port_stats->roce_drops_crc;
489 drvs->roce_drops_payload_len =
490 port_stats->roce_drops_payload_len;
491 }
61000861
AK
492}
493
005d5696
SX
494static void populate_lancer_stats(struct be_adapter *adapter)
495{
005d5696 496 struct be_drv_stats *drvs = &adapter->drv_stats;
748b539a 497 struct lancer_pport_stats *pport_stats = pport_stats_from_cmd(adapter);
ac124ff9
SP
498
499 be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
500 drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
501 drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
502 drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
005d5696 503 drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
ac124ff9 504 drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
005d5696
SX
505 drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
506 drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
507 drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
508 drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
509 drvs->rx_dropped_tcp_length =
510 pport_stats->rx_dropped_invalid_tcp_length;
511 drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
512 drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
513 drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
514 drvs->rx_dropped_header_too_small =
515 pport_stats->rx_dropped_header_too_small;
516 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
18fb06a1
SR
517 drvs->rx_address_filtered =
518 pport_stats->rx_address_filtered +
519 pport_stats->rx_vlan_filtered;
ac124ff9 520 drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
005d5696 521 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
ac124ff9
SP
522 drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
523 drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
005d5696 524 drvs->jabber_events = pport_stats->rx_jabbers;
ac124ff9
SP
525 drvs->forwarded_packets = pport_stats->num_forwards_lo;
526 drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
005d5696 527 drvs->rx_drops_too_many_frags =
ac124ff9 528 pport_stats->rx_drops_too_many_frags_lo;
005d5696 529}
89a88ab8 530
09c1c68f
SP
531static void accumulate_16bit_val(u32 *acc, u16 val)
532{
533#define lo(x) (x & 0xFFFF)
534#define hi(x) (x & 0xFFFF0000)
535 bool wrapped = val < lo(*acc);
536 u32 newacc = hi(*acc) + val;
537
538 if (wrapped)
539 newacc += 65536;
540 ACCESS_ONCE(*acc) = newacc;
541}
542
4188e7df 543static void populate_erx_stats(struct be_adapter *adapter,
748b539a 544 struct be_rx_obj *rxo, u32 erx_stat)
a6c578ef
AK
545{
546 if (!BEx_chip(adapter))
547 rx_stats(rxo)->rx_drops_no_frags = erx_stat;
548 else
549 /* below erx HW counter can actually wrap around after
550 * 65535. Driver accumulates a 32-bit value
551 */
552 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
553 (u16)erx_stat);
554}
555
89a88ab8
AK
556void be_parse_stats(struct be_adapter *adapter)
557{
61000861 558 struct be_erx_stats_v2 *erx = be_erx_stats_from_cmd(adapter);
ac124ff9
SP
559 struct be_rx_obj *rxo;
560 int i;
a6c578ef 561 u32 erx_stat;
ac124ff9 562
ca34fe38
SP
563 if (lancer_chip(adapter)) {
564 populate_lancer_stats(adapter);
005d5696 565 } else {
ca34fe38
SP
566 if (BE2_chip(adapter))
567 populate_be_v0_stats(adapter);
61000861
AK
568 else if (BE3_chip(adapter))
569 /* for BE3 */
ca34fe38 570 populate_be_v1_stats(adapter);
61000861
AK
571 else
572 populate_be_v2_stats(adapter);
d51ebd33 573
61000861 574 /* erx_v2 is longer than v0, v1. use v2 for v0, v1 access */
ca34fe38 575 for_all_rx_queues(adapter, rxo, i) {
a6c578ef
AK
576 erx_stat = erx->rx_drops_no_fragments[rxo->q.id];
577 populate_erx_stats(adapter, rxo, erx_stat);
ca34fe38 578 }
09c1c68f 579 }
89a88ab8
AK
580}
581
ab1594e9 582static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
748b539a 583 struct rtnl_link_stats64 *stats)
6b7c5b94 584{
ab1594e9 585 struct be_adapter *adapter = netdev_priv(netdev);
89a88ab8 586 struct be_drv_stats *drvs = &adapter->drv_stats;
3abcdeda 587 struct be_rx_obj *rxo;
3c8def97 588 struct be_tx_obj *txo;
ab1594e9
SP
589 u64 pkts, bytes;
590 unsigned int start;
3abcdeda 591 int i;
6b7c5b94 592
3abcdeda 593 for_all_rx_queues(adapter, rxo, i) {
ab1594e9 594 const struct be_rx_stats *rx_stats = rx_stats(rxo);
03d28ffe 595
ab1594e9 596 do {
57a7744e 597 start = u64_stats_fetch_begin_irq(&rx_stats->sync);
ab1594e9
SP
598 pkts = rx_stats(rxo)->rx_pkts;
599 bytes = rx_stats(rxo)->rx_bytes;
57a7744e 600 } while (u64_stats_fetch_retry_irq(&rx_stats->sync, start));
ab1594e9
SP
601 stats->rx_packets += pkts;
602 stats->rx_bytes += bytes;
603 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
604 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
605 rx_stats(rxo)->rx_drops_no_frags;
3abcdeda
SP
606 }
607
3c8def97 608 for_all_tx_queues(adapter, txo, i) {
ab1594e9 609 const struct be_tx_stats *tx_stats = tx_stats(txo);
03d28ffe 610
ab1594e9 611 do {
57a7744e 612 start = u64_stats_fetch_begin_irq(&tx_stats->sync);
ab1594e9
SP
613 pkts = tx_stats(txo)->tx_pkts;
614 bytes = tx_stats(txo)->tx_bytes;
57a7744e 615 } while (u64_stats_fetch_retry_irq(&tx_stats->sync, start));
ab1594e9
SP
616 stats->tx_packets += pkts;
617 stats->tx_bytes += bytes;
3c8def97 618 }
6b7c5b94
SP
619
620 /* bad pkts received */
ab1594e9 621 stats->rx_errors = drvs->rx_crc_errors +
89a88ab8
AK
622 drvs->rx_alignment_symbol_errors +
623 drvs->rx_in_range_errors +
624 drvs->rx_out_range_errors +
625 drvs->rx_frame_too_long +
626 drvs->rx_dropped_too_small +
627 drvs->rx_dropped_too_short +
628 drvs->rx_dropped_header_too_small +
629 drvs->rx_dropped_tcp_length +
ab1594e9 630 drvs->rx_dropped_runt;
68110868 631
6b7c5b94 632 /* detailed rx errors */
ab1594e9 633 stats->rx_length_errors = drvs->rx_in_range_errors +
89a88ab8
AK
634 drvs->rx_out_range_errors +
635 drvs->rx_frame_too_long;
68110868 636
ab1594e9 637 stats->rx_crc_errors = drvs->rx_crc_errors;
6b7c5b94
SP
638
639 /* frame alignment errors */
ab1594e9 640 stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
68110868 641
6b7c5b94
SP
642 /* receiver fifo overrun */
643 /* drops_no_pbuf is no per i/f, it's per BE card */
ab1594e9 644 stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
89a88ab8
AK
645 drvs->rx_input_fifo_overflow_drop +
646 drvs->rx_drops_no_pbuf;
ab1594e9 647 return stats;
6b7c5b94
SP
648}
649
b236916a 650void be_link_status_update(struct be_adapter *adapter, u8 link_status)
6b7c5b94 651{
6b7c5b94
SP
652 struct net_device *netdev = adapter->netdev;
653
b236916a 654 if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
ea172a01 655 netif_carrier_off(netdev);
b236916a 656 adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
6b7c5b94 657 }
b236916a 658
bdce2ad7 659 if (link_status)
b236916a
AK
660 netif_carrier_on(netdev);
661 else
662 netif_carrier_off(netdev);
6b7c5b94
SP
663}
664
5f07b3c5 665static void be_tx_stats_update(struct be_tx_obj *txo, struct sk_buff *skb)
6b7c5b94 666{
3c8def97
SP
667 struct be_tx_stats *stats = tx_stats(txo);
668
ab1594e9 669 u64_stats_update_begin(&stats->sync);
ac124ff9 670 stats->tx_reqs++;
5f07b3c5
SP
671 stats->tx_bytes += skb->len;
672 stats->tx_pkts += (skb_shinfo(skb)->gso_segs ? : 1);
ab1594e9 673 u64_stats_update_end(&stats->sync);
6b7c5b94
SP
674}
675
5f07b3c5
SP
676/* Returns number of WRBs needed for the skb */
677static u32 skb_wrb_cnt(struct sk_buff *skb)
6b7c5b94 678{
5f07b3c5
SP
679 /* +1 for the header wrb */
680 return 1 + (skb_headlen(skb) ? 1 : 0) + skb_shinfo(skb)->nr_frags;
6b7c5b94
SP
681}
682
683static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
684{
685 wrb->frag_pa_hi = upper_32_bits(addr);
686 wrb->frag_pa_lo = addr & 0xFFFFFFFF;
687 wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
89b1f496 688 wrb->rsvd0 = 0;
6b7c5b94
SP
689}
690
1ded132d 691static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
748b539a 692 struct sk_buff *skb)
1ded132d
AK
693{
694 u8 vlan_prio;
695 u16 vlan_tag;
696
df8a39de 697 vlan_tag = skb_vlan_tag_get(skb);
1ded132d
AK
698 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
699 /* If vlan priority provided by OS is NOT in available bmap */
700 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
701 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
702 adapter->recommended_prio;
703
704 return vlan_tag;
705}
706
c9c47142
SP
707/* Used only for IP tunnel packets */
708static u16 skb_inner_ip_proto(struct sk_buff *skb)
709{
710 return (inner_ip_hdr(skb)->version == 4) ?
711 inner_ip_hdr(skb)->protocol : inner_ipv6_hdr(skb)->nexthdr;
712}
713
714static u16 skb_ip_proto(struct sk_buff *skb)
715{
716 return (ip_hdr(skb)->version == 4) ?
717 ip_hdr(skb)->protocol : ipv6_hdr(skb)->nexthdr;
718}
719
cc4ce020 720static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
748b539a
SP
721 struct sk_buff *skb, u32 wrb_cnt, u32 len,
722 bool skip_hw_vlan)
6b7c5b94 723{
c9c47142 724 u16 vlan_tag, proto;
cc4ce020 725
6b7c5b94
SP
726 memset(hdr, 0, sizeof(*hdr));
727
c3c18bc1 728 SET_TX_WRB_HDR_BITS(crc, hdr, 1);
6b7c5b94 729
49e4b847 730 if (skb_is_gso(skb)) {
c3c18bc1
SP
731 SET_TX_WRB_HDR_BITS(lso, hdr, 1);
732 SET_TX_WRB_HDR_BITS(lso_mss, hdr, skb_shinfo(skb)->gso_size);
fe6d2a38 733 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
c3c18bc1 734 SET_TX_WRB_HDR_BITS(lso6, hdr, 1);
6b7c5b94 735 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
c9c47142 736 if (skb->encapsulation) {
c3c18bc1 737 SET_TX_WRB_HDR_BITS(ipcs, hdr, 1);
c9c47142
SP
738 proto = skb_inner_ip_proto(skb);
739 } else {
740 proto = skb_ip_proto(skb);
741 }
742 if (proto == IPPROTO_TCP)
c3c18bc1 743 SET_TX_WRB_HDR_BITS(tcpcs, hdr, 1);
c9c47142 744 else if (proto == IPPROTO_UDP)
c3c18bc1 745 SET_TX_WRB_HDR_BITS(udpcs, hdr, 1);
6b7c5b94
SP
746 }
747
df8a39de 748 if (skb_vlan_tag_present(skb)) {
c3c18bc1 749 SET_TX_WRB_HDR_BITS(vlan, hdr, 1);
1ded132d 750 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
c3c18bc1 751 SET_TX_WRB_HDR_BITS(vlan_tag, hdr, vlan_tag);
6b7c5b94
SP
752 }
753
c3c18bc1
SP
754 SET_TX_WRB_HDR_BITS(num_wrb, hdr, wrb_cnt);
755 SET_TX_WRB_HDR_BITS(len, hdr, len);
5f07b3c5
SP
756
757 /* Hack to skip HW VLAN tagging needs evt = 1, compl = 0
758 * When this hack is not needed, the evt bit is set while ringing DB
759 */
760 if (skip_hw_vlan)
761 SET_TX_WRB_HDR_BITS(event, hdr, 1);
6b7c5b94
SP
762}
763
2b7bcebf 764static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
748b539a 765 bool unmap_single)
7101e111
SP
766{
767 dma_addr_t dma;
768
769 be_dws_le_to_cpu(wrb, sizeof(*wrb));
770
771 dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
b681ee77 772 if (wrb->frag_len) {
7101e111 773 if (unmap_single)
2b7bcebf
IV
774 dma_unmap_single(dev, dma, wrb->frag_len,
775 DMA_TO_DEVICE);
7101e111 776 else
2b7bcebf 777 dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
7101e111
SP
778 }
779}
6b7c5b94 780
5f07b3c5
SP
781/* Returns the number of WRBs used up by the skb */
782static u32 be_xmit_enqueue(struct be_adapter *adapter, struct be_tx_obj *txo,
783 struct sk_buff *skb, bool skip_hw_vlan)
6b7c5b94 784{
5f07b3c5 785 u32 i, copied = 0, wrb_cnt = skb_wrb_cnt(skb);
2b7bcebf 786 struct device *dev = &adapter->pdev->dev;
5f07b3c5 787 struct be_queue_info *txq = &txo->q;
6b7c5b94 788 struct be_eth_hdr_wrb *hdr;
7101e111 789 bool map_single = false;
5f07b3c5
SP
790 struct be_eth_wrb *wrb;
791 dma_addr_t busaddr;
792 u16 head = txq->head;
6b7c5b94 793
6b7c5b94 794 hdr = queue_head_node(txq);
5f07b3c5
SP
795 wrb_fill_hdr(adapter, hdr, skb, wrb_cnt, skb->len, skip_hw_vlan);
796 be_dws_cpu_to_le(hdr, sizeof(*hdr));
797
6b7c5b94
SP
798 queue_head_inc(txq);
799
ebc8d2ab 800 if (skb->len > skb->data_len) {
e743d313 801 int len = skb_headlen(skb);
03d28ffe 802
2b7bcebf
IV
803 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
804 if (dma_mapping_error(dev, busaddr))
7101e111
SP
805 goto dma_err;
806 map_single = true;
ebc8d2ab
DM
807 wrb = queue_head_node(txq);
808 wrb_fill(wrb, busaddr, len);
809 be_dws_cpu_to_le(wrb, sizeof(*wrb));
810 queue_head_inc(txq);
811 copied += len;
812 }
6b7c5b94 813
ebc8d2ab 814 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
748b539a 815 const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
03d28ffe 816
b061b39e 817 busaddr = skb_frag_dma_map(dev, frag, 0,
9e903e08 818 skb_frag_size(frag), DMA_TO_DEVICE);
2b7bcebf 819 if (dma_mapping_error(dev, busaddr))
7101e111 820 goto dma_err;
ebc8d2ab 821 wrb = queue_head_node(txq);
9e903e08 822 wrb_fill(wrb, busaddr, skb_frag_size(frag));
ebc8d2ab
DM
823 be_dws_cpu_to_le(wrb, sizeof(*wrb));
824 queue_head_inc(txq);
9e903e08 825 copied += skb_frag_size(frag);
6b7c5b94
SP
826 }
827
5f07b3c5
SP
828 BUG_ON(txo->sent_skb_list[head]);
829 txo->sent_skb_list[head] = skb;
830 txo->last_req_hdr = head;
831 atomic_add(wrb_cnt, &txq->used);
832 txo->last_req_wrb_cnt = wrb_cnt;
833 txo->pend_wrb_cnt += wrb_cnt;
6b7c5b94 834
5f07b3c5
SP
835 be_tx_stats_update(txo, skb);
836 return wrb_cnt;
6b7c5b94 837
7101e111 838dma_err:
5f07b3c5
SP
839 /* Bring the queue back to the state it was in before this
840 * routine was invoked.
841 */
842 txq->head = head;
843 /* skip the first wrb (hdr); it's not mapped */
844 queue_head_inc(txq);
7101e111
SP
845 while (copied) {
846 wrb = queue_head_node(txq);
2b7bcebf 847 unmap_tx_frag(dev, wrb, map_single);
7101e111
SP
848 map_single = false;
849 copied -= wrb->frag_len;
d3de1540 850 adapter->drv_stats.dma_map_errors++;
7101e111
SP
851 queue_head_inc(txq);
852 }
5f07b3c5 853 txq->head = head;
7101e111 854 return 0;
6b7c5b94
SP
855}
856
f7062ee5
SP
857static inline int qnq_async_evt_rcvd(struct be_adapter *adapter)
858{
859 return adapter->flags & BE_FLAGS_QNQ_ASYNC_EVT_RCVD;
860}
861
93040ae5 862static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
bc0c3405
AK
863 struct sk_buff *skb,
864 bool *skip_hw_vlan)
93040ae5
SK
865{
866 u16 vlan_tag = 0;
867
868 skb = skb_share_check(skb, GFP_ATOMIC);
869 if (unlikely(!skb))
870 return skb;
871
df8a39de 872 if (skb_vlan_tag_present(skb))
93040ae5 873 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
52fe29e4
SB
874
875 if (qnq_async_evt_rcvd(adapter) && adapter->pvid) {
876 if (!vlan_tag)
877 vlan_tag = adapter->pvid;
878 /* f/w workaround to set skip_hw_vlan = 1, informs the F/W to
879 * skip VLAN insertion
880 */
881 if (skip_hw_vlan)
882 *skip_hw_vlan = true;
883 }
bc0c3405
AK
884
885 if (vlan_tag) {
62749e2c
JP
886 skb = vlan_insert_tag_set_proto(skb, htons(ETH_P_8021Q),
887 vlan_tag);
bc0c3405
AK
888 if (unlikely(!skb))
889 return skb;
bc0c3405
AK
890 skb->vlan_tci = 0;
891 }
892
893 /* Insert the outer VLAN, if any */
894 if (adapter->qnq_vid) {
895 vlan_tag = adapter->qnq_vid;
62749e2c
JP
896 skb = vlan_insert_tag_set_proto(skb, htons(ETH_P_8021Q),
897 vlan_tag);
bc0c3405
AK
898 if (unlikely(!skb))
899 return skb;
900 if (skip_hw_vlan)
901 *skip_hw_vlan = true;
902 }
903
93040ae5
SK
904 return skb;
905}
906
bc0c3405
AK
907static bool be_ipv6_exthdr_check(struct sk_buff *skb)
908{
909 struct ethhdr *eh = (struct ethhdr *)skb->data;
910 u16 offset = ETH_HLEN;
911
912 if (eh->h_proto == htons(ETH_P_IPV6)) {
913 struct ipv6hdr *ip6h = (struct ipv6hdr *)(skb->data + offset);
914
915 offset += sizeof(struct ipv6hdr);
916 if (ip6h->nexthdr != NEXTHDR_TCP &&
917 ip6h->nexthdr != NEXTHDR_UDP) {
918 struct ipv6_opt_hdr *ehdr =
504fbf1e 919 (struct ipv6_opt_hdr *)(skb->data + offset);
bc0c3405
AK
920
921 /* offending pkt: 2nd byte following IPv6 hdr is 0xff */
922 if (ehdr->hdrlen == 0xff)
923 return true;
924 }
925 }
926 return false;
927}
928
929static int be_vlan_tag_tx_chk(struct be_adapter *adapter, struct sk_buff *skb)
930{
df8a39de 931 return skb_vlan_tag_present(skb) || adapter->pvid || adapter->qnq_vid;
bc0c3405
AK
932}
933
748b539a 934static int be_ipv6_tx_stall_chk(struct be_adapter *adapter, struct sk_buff *skb)
bc0c3405 935{
ee9c799c 936 return BE3_chip(adapter) && be_ipv6_exthdr_check(skb);
bc0c3405
AK
937}
938
ec495fac
VV
939static struct sk_buff *be_lancer_xmit_workarounds(struct be_adapter *adapter,
940 struct sk_buff *skb,
941 bool *skip_hw_vlan)
6b7c5b94 942{
d2cb6ce7 943 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
ee9c799c
SP
944 unsigned int eth_hdr_len;
945 struct iphdr *ip;
93040ae5 946
1297f9db
AK
947 /* For padded packets, BE HW modifies tot_len field in IP header
948 * incorrecly when VLAN tag is inserted by HW.
3904dcc4 949 * For padded packets, Lancer computes incorrect checksum.
1ded132d 950 */
ee9c799c
SP
951 eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
952 VLAN_ETH_HLEN : ETH_HLEN;
3904dcc4 953 if (skb->len <= 60 &&
df8a39de 954 (lancer_chip(adapter) || skb_vlan_tag_present(skb)) &&
ee9c799c 955 is_ipv4_pkt(skb)) {
93040ae5
SK
956 ip = (struct iphdr *)ip_hdr(skb);
957 pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len));
958 }
1ded132d 959
d2cb6ce7 960 /* If vlan tag is already inlined in the packet, skip HW VLAN
f93f160b 961 * tagging in pvid-tagging mode
d2cb6ce7 962 */
f93f160b 963 if (be_pvid_tagging_enabled(adapter) &&
d2cb6ce7 964 veh->h_vlan_proto == htons(ETH_P_8021Q))
748b539a 965 *skip_hw_vlan = true;
d2cb6ce7 966
93040ae5
SK
967 /* HW has a bug wherein it will calculate CSUM for VLAN
968 * pkts even though it is disabled.
969 * Manually insert VLAN in pkt.
970 */
971 if (skb->ip_summed != CHECKSUM_PARTIAL &&
df8a39de 972 skb_vlan_tag_present(skb)) {
ee9c799c 973 skb = be_insert_vlan_in_pkt(adapter, skb, skip_hw_vlan);
bc0c3405 974 if (unlikely(!skb))
c9128951 975 goto err;
bc0c3405
AK
976 }
977
978 /* HW may lockup when VLAN HW tagging is requested on
979 * certain ipv6 packets. Drop such pkts if the HW workaround to
980 * skip HW tagging is not enabled by FW.
981 */
982 if (unlikely(be_ipv6_tx_stall_chk(adapter, skb) &&
cd3307aa
KA
983 (adapter->pvid || adapter->qnq_vid) &&
984 !qnq_async_evt_rcvd(adapter)))
bc0c3405
AK
985 goto tx_drop;
986
987 /* Manual VLAN tag insertion to prevent:
988 * ASIC lockup when the ASIC inserts VLAN tag into
989 * certain ipv6 packets. Insert VLAN tags in driver,
990 * and set event, completion, vlan bits accordingly
991 * in the Tx WRB.
992 */
993 if (be_ipv6_tx_stall_chk(adapter, skb) &&
994 be_vlan_tag_tx_chk(adapter, skb)) {
ee9c799c 995 skb = be_insert_vlan_in_pkt(adapter, skb, skip_hw_vlan);
1ded132d 996 if (unlikely(!skb))
c9128951 997 goto err;
1ded132d
AK
998 }
999
ee9c799c
SP
1000 return skb;
1001tx_drop:
1002 dev_kfree_skb_any(skb);
c9128951 1003err:
ee9c799c
SP
1004 return NULL;
1005}
1006
ec495fac
VV
1007static struct sk_buff *be_xmit_workarounds(struct be_adapter *adapter,
1008 struct sk_buff *skb,
1009 bool *skip_hw_vlan)
1010{
1011 /* Lancer, SH-R ASICs have a bug wherein Packets that are 32 bytes or
1012 * less may cause a transmit stall on that port. So the work-around is
1013 * to pad short packets (<= 32 bytes) to a 36-byte length.
1014 */
1015 if (unlikely(!BEx_chip(adapter) && skb->len <= 32)) {
74b6939d 1016 if (skb_put_padto(skb, 36))
ec495fac 1017 return NULL;
ec495fac
VV
1018 }
1019
1020 if (BEx_chip(adapter) || lancer_chip(adapter)) {
1021 skb = be_lancer_xmit_workarounds(adapter, skb, skip_hw_vlan);
1022 if (!skb)
1023 return NULL;
1024 }
1025
1026 return skb;
1027}
1028
5f07b3c5
SP
1029static void be_xmit_flush(struct be_adapter *adapter, struct be_tx_obj *txo)
1030{
1031 struct be_queue_info *txq = &txo->q;
1032 struct be_eth_hdr_wrb *hdr = queue_index_node(txq, txo->last_req_hdr);
1033
1034 /* Mark the last request eventable if it hasn't been marked already */
1035 if (!(hdr->dw[2] & cpu_to_le32(TX_HDR_WRB_EVT)))
1036 hdr->dw[2] |= cpu_to_le32(TX_HDR_WRB_EVT | TX_HDR_WRB_COMPL);
1037
1038 /* compose a dummy wrb if there are odd set of wrbs to notify */
1039 if (!lancer_chip(adapter) && (txo->pend_wrb_cnt & 1)) {
1040 wrb_fill(queue_head_node(txq), 0, 0);
1041 queue_head_inc(txq);
1042 atomic_inc(&txq->used);
1043 txo->pend_wrb_cnt++;
1044 hdr->dw[2] &= ~cpu_to_le32(TX_HDR_WRB_NUM_MASK <<
1045 TX_HDR_WRB_NUM_SHIFT);
1046 hdr->dw[2] |= cpu_to_le32((txo->last_req_wrb_cnt + 1) <<
1047 TX_HDR_WRB_NUM_SHIFT);
1048 }
1049 be_txq_notify(adapter, txo, txo->pend_wrb_cnt);
1050 txo->pend_wrb_cnt = 0;
1051}
1052
ee9c799c
SP
1053static netdev_tx_t be_xmit(struct sk_buff *skb, struct net_device *netdev)
1054{
5f07b3c5 1055 bool skip_hw_vlan = false, flush = !skb->xmit_more;
ee9c799c 1056 struct be_adapter *adapter = netdev_priv(netdev);
5f07b3c5
SP
1057 u16 q_idx = skb_get_queue_mapping(skb);
1058 struct be_tx_obj *txo = &adapter->tx_obj[q_idx];
ee9c799c 1059 struct be_queue_info *txq = &txo->q;
5f07b3c5 1060 u16 wrb_cnt;
ee9c799c
SP
1061
1062 skb = be_xmit_workarounds(adapter, skb, &skip_hw_vlan);
5f07b3c5
SP
1063 if (unlikely(!skb))
1064 goto drop;
6b7c5b94 1065
5f07b3c5
SP
1066 wrb_cnt = be_xmit_enqueue(adapter, txo, skb, skip_hw_vlan);
1067 if (unlikely(!wrb_cnt)) {
1068 dev_kfree_skb_any(skb);
1069 goto drop;
1070 }
cd8f76c0 1071
5f07b3c5
SP
1072 if ((atomic_read(&txq->used) + BE_MAX_TX_FRAG_COUNT) >= txq->len) {
1073 netif_stop_subqueue(netdev, q_idx);
1074 tx_stats(txo)->tx_stops++;
1075 }
c190e3c8 1076
5f07b3c5
SP
1077 if (flush || __netif_subqueue_stopped(netdev, q_idx))
1078 be_xmit_flush(adapter, txo);
6b7c5b94 1079
5f07b3c5
SP
1080 return NETDEV_TX_OK;
1081drop:
1082 tx_stats(txo)->tx_drv_drops++;
1083 /* Flush the already enqueued tx requests */
1084 if (flush && txo->pend_wrb_cnt)
1085 be_xmit_flush(adapter, txo);
6b7c5b94 1086
6b7c5b94
SP
1087 return NETDEV_TX_OK;
1088}
1089
1090static int be_change_mtu(struct net_device *netdev, int new_mtu)
1091{
1092 struct be_adapter *adapter = netdev_priv(netdev);
0d3f5cce
KA
1093 struct device *dev = &adapter->pdev->dev;
1094
1095 if (new_mtu < BE_MIN_MTU || new_mtu > BE_MAX_MTU) {
1096 dev_info(dev, "MTU must be between %d and %d bytes\n",
1097 BE_MIN_MTU, BE_MAX_MTU);
6b7c5b94
SP
1098 return -EINVAL;
1099 }
0d3f5cce
KA
1100
1101 dev_info(dev, "MTU changed from %d to %d bytes\n",
748b539a 1102 netdev->mtu, new_mtu);
6b7c5b94
SP
1103 netdev->mtu = new_mtu;
1104 return 0;
1105}
1106
f66b7cfd
SP
1107static inline bool be_in_all_promisc(struct be_adapter *adapter)
1108{
1109 return (adapter->if_flags & BE_IF_FLAGS_ALL_PROMISCUOUS) ==
1110 BE_IF_FLAGS_ALL_PROMISCUOUS;
1111}
1112
1113static int be_set_vlan_promisc(struct be_adapter *adapter)
1114{
1115 struct device *dev = &adapter->pdev->dev;
1116 int status;
1117
1118 if (adapter->if_flags & BE_IF_FLAGS_VLAN_PROMISCUOUS)
1119 return 0;
1120
1121 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_VLAN_PROMISCUOUS, ON);
1122 if (!status) {
1123 dev_info(dev, "Enabled VLAN promiscuous mode\n");
1124 adapter->if_flags |= BE_IF_FLAGS_VLAN_PROMISCUOUS;
1125 } else {
1126 dev_err(dev, "Failed to enable VLAN promiscuous mode\n");
1127 }
1128 return status;
1129}
1130
1131static int be_clear_vlan_promisc(struct be_adapter *adapter)
1132{
1133 struct device *dev = &adapter->pdev->dev;
1134 int status;
1135
1136 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_VLAN_PROMISCUOUS, OFF);
1137 if (!status) {
1138 dev_info(dev, "Disabling VLAN promiscuous mode\n");
1139 adapter->if_flags &= ~BE_IF_FLAGS_VLAN_PROMISCUOUS;
1140 }
1141 return status;
1142}
1143
6b7c5b94 1144/*
82903e4b
AK
1145 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
1146 * If the user configures more, place BE in vlan promiscuous mode.
6b7c5b94 1147 */
10329df8 1148static int be_vid_config(struct be_adapter *adapter)
6b7c5b94 1149{
50762667 1150 struct device *dev = &adapter->pdev->dev;
10329df8 1151 u16 vids[BE_NUM_VLANS_SUPPORTED];
f6cbd364 1152 u16 num = 0, i = 0;
82903e4b 1153 int status = 0;
1da87b7f 1154
c0e64ef4 1155 /* No need to further configure vids if in promiscuous mode */
f66b7cfd 1156 if (be_in_all_promisc(adapter))
c0e64ef4
SP
1157 return 0;
1158
92bf14ab 1159 if (adapter->vlans_added > be_max_vlans(adapter))
f66b7cfd 1160 return be_set_vlan_promisc(adapter);
0fc16ebf
PR
1161
1162 /* Construct VLAN Table to give to HW */
f6cbd364
RN
1163 for_each_set_bit(i, adapter->vids, VLAN_N_VID)
1164 vids[num++] = cpu_to_le16(i);
0fc16ebf 1165
4d567d97 1166 status = be_cmd_vlan_config(adapter, adapter->if_handle, vids, num);
0fc16ebf 1167 if (status) {
f66b7cfd 1168 dev_err(dev, "Setting HW VLAN filtering failed\n");
d9d604f8 1169 /* Set to VLAN promisc mode as setting VLAN filter failed */
4c60005f
KA
1170 if (addl_status(status) ==
1171 MCC_ADDL_STATUS_INSUFFICIENT_RESOURCES)
f66b7cfd
SP
1172 return be_set_vlan_promisc(adapter);
1173 } else if (adapter->if_flags & BE_IF_FLAGS_VLAN_PROMISCUOUS) {
1174 status = be_clear_vlan_promisc(adapter);
6b7c5b94 1175 }
0fc16ebf 1176 return status;
6b7c5b94
SP
1177}
1178
80d5c368 1179static int be_vlan_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
6b7c5b94
SP
1180{
1181 struct be_adapter *adapter = netdev_priv(netdev);
80817cbf 1182 int status = 0;
6b7c5b94 1183
a85e9986
PR
1184 /* Packets with VID 0 are always received by Lancer by default */
1185 if (lancer_chip(adapter) && vid == 0)
48291c22
VV
1186 return status;
1187
f6cbd364 1188 if (test_bit(vid, adapter->vids))
48291c22 1189 return status;
a85e9986 1190
f6cbd364 1191 set_bit(vid, adapter->vids);
a6b74e01 1192 adapter->vlans_added++;
8e586137 1193
a6b74e01
SK
1194 status = be_vid_config(adapter);
1195 if (status) {
1196 adapter->vlans_added--;
f6cbd364 1197 clear_bit(vid, adapter->vids);
a6b74e01 1198 }
48291c22 1199
80817cbf 1200 return status;
6b7c5b94
SP
1201}
1202
80d5c368 1203static int be_vlan_rem_vid(struct net_device *netdev, __be16 proto, u16 vid)
6b7c5b94
SP
1204{
1205 struct be_adapter *adapter = netdev_priv(netdev);
1206
a85e9986
PR
1207 /* Packets with VID 0 are always received by Lancer by default */
1208 if (lancer_chip(adapter) && vid == 0)
9d4dfe4a 1209 return 0;
a85e9986 1210
f6cbd364 1211 clear_bit(vid, adapter->vids);
9d4dfe4a
KA
1212 adapter->vlans_added--;
1213
1214 return be_vid_config(adapter);
6b7c5b94
SP
1215}
1216
f66b7cfd 1217static void be_clear_all_promisc(struct be_adapter *adapter)
7ad09458 1218{
ac34b743 1219 be_cmd_rx_filter(adapter, BE_IF_FLAGS_ALL_PROMISCUOUS, OFF);
f66b7cfd 1220 adapter->if_flags &= ~BE_IF_FLAGS_ALL_PROMISCUOUS;
7ad09458
S
1221}
1222
f66b7cfd
SP
1223static void be_set_all_promisc(struct be_adapter *adapter)
1224{
1225 be_cmd_rx_filter(adapter, BE_IF_FLAGS_ALL_PROMISCUOUS, ON);
1226 adapter->if_flags |= BE_IF_FLAGS_ALL_PROMISCUOUS;
1227}
1228
1229static void be_set_mc_promisc(struct be_adapter *adapter)
6b7c5b94 1230{
0fc16ebf 1231 int status;
6b7c5b94 1232
f66b7cfd
SP
1233 if (adapter->if_flags & BE_IF_FLAGS_MCAST_PROMISCUOUS)
1234 return;
6b7c5b94 1235
f66b7cfd
SP
1236 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_MCAST_PROMISCUOUS, ON);
1237 if (!status)
1238 adapter->if_flags |= BE_IF_FLAGS_MCAST_PROMISCUOUS;
1239}
1240
1241static void be_set_mc_list(struct be_adapter *adapter)
1242{
1243 int status;
1244
1245 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_MULTICAST, ON);
1246 if (!status)
1247 adapter->if_flags &= ~BE_IF_FLAGS_MCAST_PROMISCUOUS;
1248 else
1249 be_set_mc_promisc(adapter);
1250}
1251
1252static void be_set_uc_list(struct be_adapter *adapter)
1253{
1254 struct netdev_hw_addr *ha;
1255 int i = 1; /* First slot is claimed by the Primary MAC */
1256
1257 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++)
1258 be_cmd_pmac_del(adapter, adapter->if_handle,
1259 adapter->pmac_id[i], 0);
1260
1261 if (netdev_uc_count(adapter->netdev) > be_max_uc(adapter)) {
1262 be_set_all_promisc(adapter);
1263 return;
6b7c5b94
SP
1264 }
1265
f66b7cfd
SP
1266 netdev_for_each_uc_addr(ha, adapter->netdev) {
1267 adapter->uc_macs++; /* First slot is for Primary MAC */
1268 be_cmd_pmac_add(adapter, (u8 *)ha->addr, adapter->if_handle,
1269 &adapter->pmac_id[adapter->uc_macs], 0);
1270 }
1271}
6b7c5b94 1272
f66b7cfd
SP
1273static void be_clear_uc_list(struct be_adapter *adapter)
1274{
1275 int i;
fbc13f01 1276
f66b7cfd
SP
1277 for (i = 1; i < (adapter->uc_macs + 1); i++)
1278 be_cmd_pmac_del(adapter, adapter->if_handle,
1279 adapter->pmac_id[i], 0);
1280 adapter->uc_macs = 0;
1281}
fbc13f01 1282
f66b7cfd
SP
1283static void be_set_rx_mode(struct net_device *netdev)
1284{
1285 struct be_adapter *adapter = netdev_priv(netdev);
fbc13f01 1286
f66b7cfd
SP
1287 if (netdev->flags & IFF_PROMISC) {
1288 be_set_all_promisc(adapter);
1289 return;
fbc13f01
AK
1290 }
1291
f66b7cfd
SP
1292 /* Interface was previously in promiscuous mode; disable it */
1293 if (be_in_all_promisc(adapter)) {
1294 be_clear_all_promisc(adapter);
1295 if (adapter->vlans_added)
1296 be_vid_config(adapter);
0fc16ebf 1297 }
a0794885 1298
f66b7cfd
SP
1299 /* Enable multicast promisc if num configured exceeds what we support */
1300 if (netdev->flags & IFF_ALLMULTI ||
1301 netdev_mc_count(netdev) > be_max_mc(adapter)) {
1302 be_set_mc_promisc(adapter);
a0794885 1303 return;
f66b7cfd 1304 }
a0794885 1305
f66b7cfd
SP
1306 if (netdev_uc_count(netdev) != adapter->uc_macs)
1307 be_set_uc_list(adapter);
1308
1309 be_set_mc_list(adapter);
6b7c5b94
SP
1310}
1311
ba343c77
SB
1312static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
1313{
1314 struct be_adapter *adapter = netdev_priv(netdev);
11ac75ed 1315 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
ba343c77
SB
1316 int status;
1317
11ac75ed 1318 if (!sriov_enabled(adapter))
ba343c77
SB
1319 return -EPERM;
1320
11ac75ed 1321 if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
ba343c77
SB
1322 return -EINVAL;
1323
3c31aaf3
VV
1324 /* Proceed further only if user provided MAC is different
1325 * from active MAC
1326 */
1327 if (ether_addr_equal(mac, vf_cfg->mac_addr))
1328 return 0;
1329
3175d8c2
SP
1330 if (BEx_chip(adapter)) {
1331 be_cmd_pmac_del(adapter, vf_cfg->if_handle, vf_cfg->pmac_id,
1332 vf + 1);
ba343c77 1333
11ac75ed
SP
1334 status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
1335 &vf_cfg->pmac_id, vf + 1);
3175d8c2
SP
1336 } else {
1337 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
1338 vf + 1);
590c391d
PR
1339 }
1340
abccf23e
KA
1341 if (status) {
1342 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed: %#x",
1343 mac, vf, status);
1344 return be_cmd_status(status);
1345 }
64600ea5 1346
abccf23e
KA
1347 ether_addr_copy(vf_cfg->mac_addr, mac);
1348
1349 return 0;
ba343c77
SB
1350}
1351
64600ea5 1352static int be_get_vf_config(struct net_device *netdev, int vf,
748b539a 1353 struct ifla_vf_info *vi)
64600ea5
AK
1354{
1355 struct be_adapter *adapter = netdev_priv(netdev);
11ac75ed 1356 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
64600ea5 1357
11ac75ed 1358 if (!sriov_enabled(adapter))
64600ea5
AK
1359 return -EPERM;
1360
11ac75ed 1361 if (vf >= adapter->num_vfs)
64600ea5
AK
1362 return -EINVAL;
1363
1364 vi->vf = vf;
ed616689
SC
1365 vi->max_tx_rate = vf_cfg->tx_rate;
1366 vi->min_tx_rate = 0;
a60b3a13
AK
1367 vi->vlan = vf_cfg->vlan_tag & VLAN_VID_MASK;
1368 vi->qos = vf_cfg->vlan_tag >> VLAN_PRIO_SHIFT;
11ac75ed 1369 memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
bdce2ad7 1370 vi->linkstate = adapter->vf_cfg[vf].plink_tracking;
64600ea5
AK
1371
1372 return 0;
1373}
1374
748b539a 1375static int be_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos)
1da87b7f
AK
1376{
1377 struct be_adapter *adapter = netdev_priv(netdev);
b9fc0e53 1378 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1da87b7f
AK
1379 int status = 0;
1380
11ac75ed 1381 if (!sriov_enabled(adapter))
1da87b7f
AK
1382 return -EPERM;
1383
b9fc0e53 1384 if (vf >= adapter->num_vfs || vlan > 4095 || qos > 7)
1da87b7f
AK
1385 return -EINVAL;
1386
b9fc0e53
AK
1387 if (vlan || qos) {
1388 vlan |= qos << VLAN_PRIO_SHIFT;
c502224e 1389 if (vf_cfg->vlan_tag != vlan)
b9fc0e53
AK
1390 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
1391 vf_cfg->if_handle, 0);
1da87b7f 1392 } else {
f1f3ee1b 1393 /* Reset Transparent Vlan Tagging. */
c502224e
SK
1394 status = be_cmd_set_hsw_config(adapter, BE_RESET_VLAN_TAG_ID,
1395 vf + 1, vf_cfg->if_handle, 0);
1da87b7f
AK
1396 }
1397
abccf23e
KA
1398 if (status) {
1399 dev_err(&adapter->pdev->dev,
1400 "VLAN %d config on VF %d failed : %#x\n", vlan,
1401 vf, status);
1402 return be_cmd_status(status);
1403 }
1404
1405 vf_cfg->vlan_tag = vlan;
1406
1407 return 0;
1da87b7f
AK
1408}
1409
ed616689
SC
1410static int be_set_vf_tx_rate(struct net_device *netdev, int vf,
1411 int min_tx_rate, int max_tx_rate)
e1d18735
AK
1412{
1413 struct be_adapter *adapter = netdev_priv(netdev);
0f77ba73
RN
1414 struct device *dev = &adapter->pdev->dev;
1415 int percent_rate, status = 0;
1416 u16 link_speed = 0;
1417 u8 link_status;
e1d18735 1418
11ac75ed 1419 if (!sriov_enabled(adapter))
e1d18735
AK
1420 return -EPERM;
1421
94f434c2 1422 if (vf >= adapter->num_vfs)
e1d18735
AK
1423 return -EINVAL;
1424
ed616689
SC
1425 if (min_tx_rate)
1426 return -EINVAL;
1427
0f77ba73
RN
1428 if (!max_tx_rate)
1429 goto config_qos;
1430
1431 status = be_cmd_link_status_query(adapter, &link_speed,
1432 &link_status, 0);
1433 if (status)
1434 goto err;
1435
1436 if (!link_status) {
1437 dev_err(dev, "TX-rate setting not allowed when link is down\n");
940a3fcd 1438 status = -ENETDOWN;
0f77ba73
RN
1439 goto err;
1440 }
1441
1442 if (max_tx_rate < 100 || max_tx_rate > link_speed) {
1443 dev_err(dev, "TX-rate must be between 100 and %d Mbps\n",
1444 link_speed);
1445 status = -EINVAL;
1446 goto err;
1447 }
1448
1449 /* On Skyhawk the QOS setting must be done only as a % value */
1450 percent_rate = link_speed / 100;
1451 if (skyhawk_chip(adapter) && (max_tx_rate % percent_rate)) {
1452 dev_err(dev, "TX-rate must be a multiple of %d Mbps\n",
1453 percent_rate);
1454 status = -EINVAL;
1455 goto err;
94f434c2 1456 }
e1d18735 1457
0f77ba73
RN
1458config_qos:
1459 status = be_cmd_config_qos(adapter, max_tx_rate, link_speed, vf + 1);
e1d18735 1460 if (status)
0f77ba73
RN
1461 goto err;
1462
1463 adapter->vf_cfg[vf].tx_rate = max_tx_rate;
1464 return 0;
1465
1466err:
1467 dev_err(dev, "TX-rate setting of %dMbps on VF%d failed\n",
1468 max_tx_rate, vf);
abccf23e 1469 return be_cmd_status(status);
e1d18735 1470}
e2fb1afa 1471
bdce2ad7
SR
1472static int be_set_vf_link_state(struct net_device *netdev, int vf,
1473 int link_state)
1474{
1475 struct be_adapter *adapter = netdev_priv(netdev);
1476 int status;
1477
1478 if (!sriov_enabled(adapter))
1479 return -EPERM;
1480
1481 if (vf >= adapter->num_vfs)
1482 return -EINVAL;
1483
1484 status = be_cmd_set_logical_link_config(adapter, link_state, vf+1);
abccf23e
KA
1485 if (status) {
1486 dev_err(&adapter->pdev->dev,
1487 "Link state change on VF %d failed: %#x\n", vf, status);
1488 return be_cmd_status(status);
1489 }
bdce2ad7 1490
abccf23e
KA
1491 adapter->vf_cfg[vf].plink_tracking = link_state;
1492
1493 return 0;
bdce2ad7 1494}
e1d18735 1495
2632bafd
SP
1496static void be_aic_update(struct be_aic_obj *aic, u64 rx_pkts, u64 tx_pkts,
1497 ulong now)
6b7c5b94 1498{
2632bafd
SP
1499 aic->rx_pkts_prev = rx_pkts;
1500 aic->tx_reqs_prev = tx_pkts;
1501 aic->jiffies = now;
1502}
ac124ff9 1503
2632bafd
SP
1504static void be_eqd_update(struct be_adapter *adapter)
1505{
1506 struct be_set_eqd set_eqd[MAX_EVT_QS];
1507 int eqd, i, num = 0, start;
1508 struct be_aic_obj *aic;
1509 struct be_eq_obj *eqo;
1510 struct be_rx_obj *rxo;
1511 struct be_tx_obj *txo;
1512 u64 rx_pkts, tx_pkts;
1513 ulong now;
1514 u32 pps, delta;
10ef9ab4 1515
2632bafd
SP
1516 for_all_evt_queues(adapter, eqo, i) {
1517 aic = &adapter->aic_obj[eqo->idx];
1518 if (!aic->enable) {
1519 if (aic->jiffies)
1520 aic->jiffies = 0;
1521 eqd = aic->et_eqd;
1522 goto modify_eqd;
1523 }
6b7c5b94 1524
2632bafd
SP
1525 rxo = &adapter->rx_obj[eqo->idx];
1526 do {
57a7744e 1527 start = u64_stats_fetch_begin_irq(&rxo->stats.sync);
2632bafd 1528 rx_pkts = rxo->stats.rx_pkts;
57a7744e 1529 } while (u64_stats_fetch_retry_irq(&rxo->stats.sync, start));
10ef9ab4 1530
2632bafd
SP
1531 txo = &adapter->tx_obj[eqo->idx];
1532 do {
57a7744e 1533 start = u64_stats_fetch_begin_irq(&txo->stats.sync);
2632bafd 1534 tx_pkts = txo->stats.tx_reqs;
57a7744e 1535 } while (u64_stats_fetch_retry_irq(&txo->stats.sync, start));
6b7c5b94 1536
2632bafd
SP
1537 /* Skip, if wrapped around or first calculation */
1538 now = jiffies;
1539 if (!aic->jiffies || time_before(now, aic->jiffies) ||
1540 rx_pkts < aic->rx_pkts_prev ||
1541 tx_pkts < aic->tx_reqs_prev) {
1542 be_aic_update(aic, rx_pkts, tx_pkts, now);
1543 continue;
1544 }
1545
1546 delta = jiffies_to_msecs(now - aic->jiffies);
1547 pps = (((u32)(rx_pkts - aic->rx_pkts_prev) * 1000) / delta) +
1548 (((u32)(tx_pkts - aic->tx_reqs_prev) * 1000) / delta);
1549 eqd = (pps / 15000) << 2;
10ef9ab4 1550
2632bafd
SP
1551 if (eqd < 8)
1552 eqd = 0;
1553 eqd = min_t(u32, eqd, aic->max_eqd);
1554 eqd = max_t(u32, eqd, aic->min_eqd);
1555
1556 be_aic_update(aic, rx_pkts, tx_pkts, now);
10ef9ab4 1557modify_eqd:
2632bafd
SP
1558 if (eqd != aic->prev_eqd) {
1559 set_eqd[num].delay_multiplier = (eqd * 65)/100;
1560 set_eqd[num].eq_id = eqo->q.id;
1561 aic->prev_eqd = eqd;
1562 num++;
1563 }
ac124ff9 1564 }
2632bafd
SP
1565
1566 if (num)
1567 be_cmd_modify_eqd(adapter, set_eqd, num);
6b7c5b94
SP
1568}
1569
3abcdeda 1570static void be_rx_stats_update(struct be_rx_obj *rxo,
748b539a 1571 struct be_rx_compl_info *rxcp)
4097f663 1572{
ac124ff9 1573 struct be_rx_stats *stats = rx_stats(rxo);
1ef78abe 1574
ab1594e9 1575 u64_stats_update_begin(&stats->sync);
3abcdeda 1576 stats->rx_compl++;
2e588f84 1577 stats->rx_bytes += rxcp->pkt_size;
3abcdeda 1578 stats->rx_pkts++;
2e588f84 1579 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
3abcdeda 1580 stats->rx_mcast_pkts++;
2e588f84 1581 if (rxcp->err)
ac124ff9 1582 stats->rx_compl_err++;
ab1594e9 1583 u64_stats_update_end(&stats->sync);
4097f663
SP
1584}
1585
2e588f84 1586static inline bool csum_passed(struct be_rx_compl_info *rxcp)
728a9972 1587{
19fad86f 1588 /* L4 checksum is not reliable for non TCP/UDP packets.
c9c47142
SP
1589 * Also ignore ipcksm for ipv6 pkts
1590 */
2e588f84 1591 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
c9c47142 1592 (rxcp->ip_csum || rxcp->ipv6) && !rxcp->err;
728a9972
AK
1593}
1594
0b0ef1d0 1595static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo)
6b7c5b94 1596{
10ef9ab4 1597 struct be_adapter *adapter = rxo->adapter;
6b7c5b94 1598 struct be_rx_page_info *rx_page_info;
3abcdeda 1599 struct be_queue_info *rxq = &rxo->q;
0b0ef1d0 1600 u16 frag_idx = rxq->tail;
6b7c5b94 1601
3abcdeda 1602 rx_page_info = &rxo->page_info_tbl[frag_idx];
6b7c5b94
SP
1603 BUG_ON(!rx_page_info->page);
1604
e50287be 1605 if (rx_page_info->last_frag) {
2b7bcebf
IV
1606 dma_unmap_page(&adapter->pdev->dev,
1607 dma_unmap_addr(rx_page_info, bus),
1608 adapter->big_page_size, DMA_FROM_DEVICE);
e50287be
SP
1609 rx_page_info->last_frag = false;
1610 } else {
1611 dma_sync_single_for_cpu(&adapter->pdev->dev,
1612 dma_unmap_addr(rx_page_info, bus),
1613 rx_frag_size, DMA_FROM_DEVICE);
205859a2 1614 }
6b7c5b94 1615
0b0ef1d0 1616 queue_tail_inc(rxq);
6b7c5b94
SP
1617 atomic_dec(&rxq->used);
1618 return rx_page_info;
1619}
1620
1621/* Throwaway the data in the Rx completion */
10ef9ab4
SP
1622static void be_rx_compl_discard(struct be_rx_obj *rxo,
1623 struct be_rx_compl_info *rxcp)
6b7c5b94 1624{
6b7c5b94 1625 struct be_rx_page_info *page_info;
2e588f84 1626 u16 i, num_rcvd = rxcp->num_rcvd;
6b7c5b94 1627
e80d9da6 1628 for (i = 0; i < num_rcvd; i++) {
0b0ef1d0 1629 page_info = get_rx_page_info(rxo);
e80d9da6
PR
1630 put_page(page_info->page);
1631 memset(page_info, 0, sizeof(*page_info));
6b7c5b94
SP
1632 }
1633}
1634
1635/*
1636 * skb_fill_rx_data forms a complete skb for an ether frame
1637 * indicated by rxcp.
1638 */
10ef9ab4
SP
1639static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
1640 struct be_rx_compl_info *rxcp)
6b7c5b94 1641{
6b7c5b94 1642 struct be_rx_page_info *page_info;
2e588f84
SP
1643 u16 i, j;
1644 u16 hdr_len, curr_frag_len, remaining;
6b7c5b94 1645 u8 *start;
6b7c5b94 1646
0b0ef1d0 1647 page_info = get_rx_page_info(rxo);
6b7c5b94
SP
1648 start = page_address(page_info->page) + page_info->page_offset;
1649 prefetch(start);
1650
1651 /* Copy data in the first descriptor of this completion */
2e588f84 1652 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
6b7c5b94 1653
6b7c5b94
SP
1654 skb->len = curr_frag_len;
1655 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
ac1ae5f3 1656 memcpy(skb->data, start, curr_frag_len);
6b7c5b94
SP
1657 /* Complete packet has now been moved to data */
1658 put_page(page_info->page);
1659 skb->data_len = 0;
1660 skb->tail += curr_frag_len;
1661 } else {
ac1ae5f3
ED
1662 hdr_len = ETH_HLEN;
1663 memcpy(skb->data, start, hdr_len);
6b7c5b94 1664 skb_shinfo(skb)->nr_frags = 1;
b061b39e 1665 skb_frag_set_page(skb, 0, page_info->page);
6b7c5b94
SP
1666 skb_shinfo(skb)->frags[0].page_offset =
1667 page_info->page_offset + hdr_len;
748b539a
SP
1668 skb_frag_size_set(&skb_shinfo(skb)->frags[0],
1669 curr_frag_len - hdr_len);
6b7c5b94 1670 skb->data_len = curr_frag_len - hdr_len;
bdb28a97 1671 skb->truesize += rx_frag_size;
6b7c5b94
SP
1672 skb->tail += hdr_len;
1673 }
205859a2 1674 page_info->page = NULL;
6b7c5b94 1675
2e588f84
SP
1676 if (rxcp->pkt_size <= rx_frag_size) {
1677 BUG_ON(rxcp->num_rcvd != 1);
1678 return;
6b7c5b94
SP
1679 }
1680
1681 /* More frags present for this completion */
2e588f84
SP
1682 remaining = rxcp->pkt_size - curr_frag_len;
1683 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
0b0ef1d0 1684 page_info = get_rx_page_info(rxo);
2e588f84 1685 curr_frag_len = min(remaining, rx_frag_size);
6b7c5b94 1686
bd46cb6c
AK
1687 /* Coalesce all frags from the same physical page in one slot */
1688 if (page_info->page_offset == 0) {
1689 /* Fresh page */
1690 j++;
b061b39e 1691 skb_frag_set_page(skb, j, page_info->page);
bd46cb6c
AK
1692 skb_shinfo(skb)->frags[j].page_offset =
1693 page_info->page_offset;
9e903e08 1694 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
bd46cb6c
AK
1695 skb_shinfo(skb)->nr_frags++;
1696 } else {
1697 put_page(page_info->page);
1698 }
1699
9e903e08 1700 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
6b7c5b94
SP
1701 skb->len += curr_frag_len;
1702 skb->data_len += curr_frag_len;
bdb28a97 1703 skb->truesize += rx_frag_size;
2e588f84 1704 remaining -= curr_frag_len;
205859a2 1705 page_info->page = NULL;
6b7c5b94 1706 }
bd46cb6c 1707 BUG_ON(j > MAX_SKB_FRAGS);
6b7c5b94
SP
1708}
1709
5be93b9a 1710/* Process the RX completion indicated by rxcp when GRO is disabled */
6384a4d0 1711static void be_rx_compl_process(struct be_rx_obj *rxo, struct napi_struct *napi,
10ef9ab4 1712 struct be_rx_compl_info *rxcp)
6b7c5b94 1713{
10ef9ab4 1714 struct be_adapter *adapter = rxo->adapter;
6332c8d3 1715 struct net_device *netdev = adapter->netdev;
6b7c5b94 1716 struct sk_buff *skb;
89420424 1717
bb349bb4 1718 skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE);
a058a632 1719 if (unlikely(!skb)) {
ac124ff9 1720 rx_stats(rxo)->rx_drops_no_skbs++;
10ef9ab4 1721 be_rx_compl_discard(rxo, rxcp);
6b7c5b94
SP
1722 return;
1723 }
1724
10ef9ab4 1725 skb_fill_rx_data(rxo, skb, rxcp);
6b7c5b94 1726
6332c8d3 1727 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
728a9972 1728 skb->ip_summed = CHECKSUM_UNNECESSARY;
c6ce2f4b
SK
1729 else
1730 skb_checksum_none_assert(skb);
6b7c5b94 1731
6332c8d3 1732 skb->protocol = eth_type_trans(skb, netdev);
aaa6daec 1733 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
10ef9ab4 1734 if (netdev->features & NETIF_F_RXHASH)
d2464c8c 1735 skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
c9c47142 1736
b6c0e89d 1737 skb->csum_level = rxcp->tunneled;
6384a4d0 1738 skb_mark_napi_id(skb, napi);
6b7c5b94 1739
343e43c0 1740 if (rxcp->vlanf)
86a9bad3 1741 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
4c5102f9
AK
1742
1743 netif_receive_skb(skb);
6b7c5b94
SP
1744}
1745
5be93b9a 1746/* Process the RX completion indicated by rxcp when GRO is enabled */
4188e7df
JH
1747static void be_rx_compl_process_gro(struct be_rx_obj *rxo,
1748 struct napi_struct *napi,
1749 struct be_rx_compl_info *rxcp)
6b7c5b94 1750{
10ef9ab4 1751 struct be_adapter *adapter = rxo->adapter;
6b7c5b94 1752 struct be_rx_page_info *page_info;
5be93b9a 1753 struct sk_buff *skb = NULL;
2e588f84
SP
1754 u16 remaining, curr_frag_len;
1755 u16 i, j;
3968fa1e 1756
10ef9ab4 1757 skb = napi_get_frags(napi);
5be93b9a 1758 if (!skb) {
10ef9ab4 1759 be_rx_compl_discard(rxo, rxcp);
5be93b9a
AK
1760 return;
1761 }
1762
2e588f84
SP
1763 remaining = rxcp->pkt_size;
1764 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
0b0ef1d0 1765 page_info = get_rx_page_info(rxo);
6b7c5b94
SP
1766
1767 curr_frag_len = min(remaining, rx_frag_size);
1768
bd46cb6c
AK
1769 /* Coalesce all frags from the same physical page in one slot */
1770 if (i == 0 || page_info->page_offset == 0) {
1771 /* First frag or Fresh page */
1772 j++;
b061b39e 1773 skb_frag_set_page(skb, j, page_info->page);
5be93b9a
AK
1774 skb_shinfo(skb)->frags[j].page_offset =
1775 page_info->page_offset;
9e903e08 1776 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
bd46cb6c
AK
1777 } else {
1778 put_page(page_info->page);
1779 }
9e903e08 1780 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
bdb28a97 1781 skb->truesize += rx_frag_size;
bd46cb6c 1782 remaining -= curr_frag_len;
6b7c5b94
SP
1783 memset(page_info, 0, sizeof(*page_info));
1784 }
bd46cb6c 1785 BUG_ON(j > MAX_SKB_FRAGS);
6b7c5b94 1786
5be93b9a 1787 skb_shinfo(skb)->nr_frags = j + 1;
2e588f84
SP
1788 skb->len = rxcp->pkt_size;
1789 skb->data_len = rxcp->pkt_size;
5be93b9a 1790 skb->ip_summed = CHECKSUM_UNNECESSARY;
aaa6daec 1791 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
4b972914 1792 if (adapter->netdev->features & NETIF_F_RXHASH)
d2464c8c 1793 skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
c9c47142 1794
b6c0e89d 1795 skb->csum_level = rxcp->tunneled;
6384a4d0 1796 skb_mark_napi_id(skb, napi);
5be93b9a 1797
343e43c0 1798 if (rxcp->vlanf)
86a9bad3 1799 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
4c5102f9 1800
10ef9ab4 1801 napi_gro_frags(napi);
2e588f84
SP
1802}
1803
10ef9ab4
SP
1804static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
1805 struct be_rx_compl_info *rxcp)
2e588f84 1806{
c3c18bc1
SP
1807 rxcp->pkt_size = GET_RX_COMPL_V1_BITS(pktsize, compl);
1808 rxcp->vlanf = GET_RX_COMPL_V1_BITS(vtp, compl);
1809 rxcp->err = GET_RX_COMPL_V1_BITS(err, compl);
1810 rxcp->tcpf = GET_RX_COMPL_V1_BITS(tcpf, compl);
1811 rxcp->udpf = GET_RX_COMPL_V1_BITS(udpf, compl);
1812 rxcp->ip_csum = GET_RX_COMPL_V1_BITS(ipcksm, compl);
1813 rxcp->l4_csum = GET_RX_COMPL_V1_BITS(l4_cksm, compl);
1814 rxcp->ipv6 = GET_RX_COMPL_V1_BITS(ip_version, compl);
1815 rxcp->num_rcvd = GET_RX_COMPL_V1_BITS(numfrags, compl);
1816 rxcp->pkt_type = GET_RX_COMPL_V1_BITS(cast_enc, compl);
1817 rxcp->rss_hash = GET_RX_COMPL_V1_BITS(rsshash, compl);
15d72184 1818 if (rxcp->vlanf) {
c3c18bc1
SP
1819 rxcp->qnq = GET_RX_COMPL_V1_BITS(qnq, compl);
1820 rxcp->vlan_tag = GET_RX_COMPL_V1_BITS(vlan_tag, compl);
15d72184 1821 }
c3c18bc1 1822 rxcp->port = GET_RX_COMPL_V1_BITS(port, compl);
c9c47142 1823 rxcp->tunneled =
c3c18bc1 1824 GET_RX_COMPL_V1_BITS(tunneled, compl);
2e588f84
SP
1825}
1826
10ef9ab4
SP
1827static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
1828 struct be_rx_compl_info *rxcp)
2e588f84 1829{
c3c18bc1
SP
1830 rxcp->pkt_size = GET_RX_COMPL_V0_BITS(pktsize, compl);
1831 rxcp->vlanf = GET_RX_COMPL_V0_BITS(vtp, compl);
1832 rxcp->err = GET_RX_COMPL_V0_BITS(err, compl);
1833 rxcp->tcpf = GET_RX_COMPL_V0_BITS(tcpf, compl);
1834 rxcp->udpf = GET_RX_COMPL_V0_BITS(udpf, compl);
1835 rxcp->ip_csum = GET_RX_COMPL_V0_BITS(ipcksm, compl);
1836 rxcp->l4_csum = GET_RX_COMPL_V0_BITS(l4_cksm, compl);
1837 rxcp->ipv6 = GET_RX_COMPL_V0_BITS(ip_version, compl);
1838 rxcp->num_rcvd = GET_RX_COMPL_V0_BITS(numfrags, compl);
1839 rxcp->pkt_type = GET_RX_COMPL_V0_BITS(cast_enc, compl);
1840 rxcp->rss_hash = GET_RX_COMPL_V0_BITS(rsshash, compl);
15d72184 1841 if (rxcp->vlanf) {
c3c18bc1
SP
1842 rxcp->qnq = GET_RX_COMPL_V0_BITS(qnq, compl);
1843 rxcp->vlan_tag = GET_RX_COMPL_V0_BITS(vlan_tag, compl);
15d72184 1844 }
c3c18bc1
SP
1845 rxcp->port = GET_RX_COMPL_V0_BITS(port, compl);
1846 rxcp->ip_frag = GET_RX_COMPL_V0_BITS(ip_frag, compl);
2e588f84
SP
1847}
1848
1849static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1850{
1851 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1852 struct be_rx_compl_info *rxcp = &rxo->rxcp;
1853 struct be_adapter *adapter = rxo->adapter;
6b7c5b94 1854
2e588f84
SP
1855 /* For checking the valid bit it is Ok to use either definition as the
1856 * valid bit is at the same position in both v0 and v1 Rx compl */
1857 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
1858 return NULL;
6b7c5b94 1859
2e588f84
SP
1860 rmb();
1861 be_dws_le_to_cpu(compl, sizeof(*compl));
6b7c5b94 1862
2e588f84 1863 if (adapter->be3_native)
10ef9ab4 1864 be_parse_rx_compl_v1(compl, rxcp);
2e588f84 1865 else
10ef9ab4 1866 be_parse_rx_compl_v0(compl, rxcp);
6b7c5b94 1867
e38b1706
SK
1868 if (rxcp->ip_frag)
1869 rxcp->l4_csum = 0;
1870
15d72184 1871 if (rxcp->vlanf) {
f93f160b
VV
1872 /* In QNQ modes, if qnq bit is not set, then the packet was
1873 * tagged only with the transparent outer vlan-tag and must
1874 * not be treated as a vlan packet by host
1875 */
1876 if (be_is_qnq_mode(adapter) && !rxcp->qnq)
15d72184 1877 rxcp->vlanf = 0;
6b7c5b94 1878
15d72184 1879 if (!lancer_chip(adapter))
3c709f8f 1880 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
6b7c5b94 1881
939cf306 1882 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
f6cbd364 1883 !test_bit(rxcp->vlan_tag, adapter->vids))
15d72184
SP
1884 rxcp->vlanf = 0;
1885 }
2e588f84
SP
1886
1887 /* As the compl has been parsed, reset it; we wont touch it again */
1888 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
6b7c5b94 1889
3abcdeda 1890 queue_tail_inc(&rxo->cq);
6b7c5b94
SP
1891 return rxcp;
1892}
1893
1829b086 1894static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
6b7c5b94 1895{
6b7c5b94 1896 u32 order = get_order(size);
1829b086 1897
6b7c5b94 1898 if (order > 0)
1829b086
ED
1899 gfp |= __GFP_COMP;
1900 return alloc_pages(gfp, order);
6b7c5b94
SP
1901}
1902
1903/*
1904 * Allocate a page, split it to fragments of size rx_frag_size and post as
1905 * receive buffers to BE
1906 */
c30d7266 1907static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp, u32 frags_needed)
6b7c5b94 1908{
3abcdeda 1909 struct be_adapter *adapter = rxo->adapter;
26d92f92 1910 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
3abcdeda 1911 struct be_queue_info *rxq = &rxo->q;
6b7c5b94 1912 struct page *pagep = NULL;
ba42fad0 1913 struct device *dev = &adapter->pdev->dev;
6b7c5b94
SP
1914 struct be_eth_rx_d *rxd;
1915 u64 page_dmaaddr = 0, frag_dmaaddr;
c30d7266 1916 u32 posted, page_offset = 0, notify = 0;
6b7c5b94 1917
3abcdeda 1918 page_info = &rxo->page_info_tbl[rxq->head];
c30d7266 1919 for (posted = 0; posted < frags_needed && !page_info->page; posted++) {
6b7c5b94 1920 if (!pagep) {
1829b086 1921 pagep = be_alloc_pages(adapter->big_page_size, gfp);
6b7c5b94 1922 if (unlikely(!pagep)) {
ac124ff9 1923 rx_stats(rxo)->rx_post_fail++;
6b7c5b94
SP
1924 break;
1925 }
ba42fad0
IV
1926 page_dmaaddr = dma_map_page(dev, pagep, 0,
1927 adapter->big_page_size,
2b7bcebf 1928 DMA_FROM_DEVICE);
ba42fad0
IV
1929 if (dma_mapping_error(dev, page_dmaaddr)) {
1930 put_page(pagep);
1931 pagep = NULL;
d3de1540 1932 adapter->drv_stats.dma_map_errors++;
ba42fad0
IV
1933 break;
1934 }
e50287be 1935 page_offset = 0;
6b7c5b94
SP
1936 } else {
1937 get_page(pagep);
e50287be 1938 page_offset += rx_frag_size;
6b7c5b94 1939 }
e50287be 1940 page_info->page_offset = page_offset;
6b7c5b94 1941 page_info->page = pagep;
6b7c5b94
SP
1942
1943 rxd = queue_head_node(rxq);
e50287be 1944 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
6b7c5b94
SP
1945 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1946 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
6b7c5b94
SP
1947
1948 /* Any space left in the current big page for another frag? */
1949 if ((page_offset + rx_frag_size + rx_frag_size) >
1950 adapter->big_page_size) {
1951 pagep = NULL;
e50287be
SP
1952 page_info->last_frag = true;
1953 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
1954 } else {
1955 dma_unmap_addr_set(page_info, bus, frag_dmaaddr);
6b7c5b94 1956 }
26d92f92
SP
1957
1958 prev_page_info = page_info;
1959 queue_head_inc(rxq);
10ef9ab4 1960 page_info = &rxo->page_info_tbl[rxq->head];
6b7c5b94 1961 }
e50287be
SP
1962
1963 /* Mark the last frag of a page when we break out of the above loop
1964 * with no more slots available in the RXQ
1965 */
1966 if (pagep) {
1967 prev_page_info->last_frag = true;
1968 dma_unmap_addr_set(prev_page_info, bus, page_dmaaddr);
1969 }
6b7c5b94
SP
1970
1971 if (posted) {
6b7c5b94 1972 atomic_add(posted, &rxq->used);
6384a4d0
SP
1973 if (rxo->rx_post_starved)
1974 rxo->rx_post_starved = false;
c30d7266
AK
1975 do {
1976 notify = min(256u, posted);
1977 be_rxq_notify(adapter, rxq->id, notify);
1978 posted -= notify;
1979 } while (posted);
ea1dae11
SP
1980 } else if (atomic_read(&rxq->used) == 0) {
1981 /* Let be_worker replenish when memory is available */
3abcdeda 1982 rxo->rx_post_starved = true;
6b7c5b94 1983 }
6b7c5b94
SP
1984}
1985
5fb379ee 1986static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
6b7c5b94 1987{
6b7c5b94
SP
1988 struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1989
1990 if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1991 return NULL;
1992
f3eb62d2 1993 rmb();
6b7c5b94
SP
1994 be_dws_le_to_cpu(txcp, sizeof(*txcp));
1995
1996 txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1997
1998 queue_tail_inc(tx_cq);
1999 return txcp;
2000}
2001
3c8def97 2002static u16 be_tx_compl_process(struct be_adapter *adapter,
748b539a 2003 struct be_tx_obj *txo, u16 last_index)
6b7c5b94 2004{
5f07b3c5 2005 struct sk_buff **sent_skbs = txo->sent_skb_list;
3c8def97 2006 struct be_queue_info *txq = &txo->q;
5f07b3c5
SP
2007 u16 frag_index, num_wrbs = 0;
2008 struct sk_buff *skb = NULL;
2009 bool unmap_skb_hdr = false;
a73b796e 2010 struct be_eth_wrb *wrb;
6b7c5b94 2011
ec43b1a6 2012 do {
5f07b3c5
SP
2013 if (sent_skbs[txq->tail]) {
2014 /* Free skb from prev req */
2015 if (skb)
2016 dev_consume_skb_any(skb);
2017 skb = sent_skbs[txq->tail];
2018 sent_skbs[txq->tail] = NULL;
2019 queue_tail_inc(txq); /* skip hdr wrb */
2020 num_wrbs++;
2021 unmap_skb_hdr = true;
2022 }
a73b796e 2023 wrb = queue_tail_node(txq);
5f07b3c5 2024 frag_index = txq->tail;
2b7bcebf 2025 unmap_tx_frag(&adapter->pdev->dev, wrb,
5f07b3c5 2026 (unmap_skb_hdr && skb_headlen(skb)));
ec43b1a6 2027 unmap_skb_hdr = false;
6b7c5b94 2028 queue_tail_inc(txq);
5f07b3c5
SP
2029 num_wrbs++;
2030 } while (frag_index != last_index);
2031 dev_consume_skb_any(skb);
6b7c5b94 2032
4d586b82 2033 return num_wrbs;
6b7c5b94
SP
2034}
2035
10ef9ab4
SP
2036/* Return the number of events in the event queue */
2037static inline int events_get(struct be_eq_obj *eqo)
859b1e4e 2038{
10ef9ab4
SP
2039 struct be_eq_entry *eqe;
2040 int num = 0;
859b1e4e 2041
10ef9ab4
SP
2042 do {
2043 eqe = queue_tail_node(&eqo->q);
2044 if (eqe->evt == 0)
2045 break;
859b1e4e 2046
10ef9ab4
SP
2047 rmb();
2048 eqe->evt = 0;
2049 num++;
2050 queue_tail_inc(&eqo->q);
2051 } while (true);
2052
2053 return num;
859b1e4e
SP
2054}
2055
10ef9ab4
SP
2056/* Leaves the EQ is disarmed state */
2057static void be_eq_clean(struct be_eq_obj *eqo)
859b1e4e 2058{
10ef9ab4 2059 int num = events_get(eqo);
859b1e4e 2060
10ef9ab4 2061 be_eq_notify(eqo->adapter, eqo->q.id, false, true, num);
859b1e4e
SP
2062}
2063
10ef9ab4 2064static void be_rx_cq_clean(struct be_rx_obj *rxo)
6b7c5b94
SP
2065{
2066 struct be_rx_page_info *page_info;
3abcdeda
SP
2067 struct be_queue_info *rxq = &rxo->q;
2068 struct be_queue_info *rx_cq = &rxo->cq;
2e588f84 2069 struct be_rx_compl_info *rxcp;
d23e946c
SP
2070 struct be_adapter *adapter = rxo->adapter;
2071 int flush_wait = 0;
6b7c5b94 2072
d23e946c
SP
2073 /* Consume pending rx completions.
2074 * Wait for the flush completion (identified by zero num_rcvd)
2075 * to arrive. Notify CQ even when there are no more CQ entries
2076 * for HW to flush partially coalesced CQ entries.
2077 * In Lancer, there is no need to wait for flush compl.
2078 */
2079 for (;;) {
2080 rxcp = be_rx_compl_get(rxo);
ddf1169f 2081 if (!rxcp) {
d23e946c
SP
2082 if (lancer_chip(adapter))
2083 break;
2084
2085 if (flush_wait++ > 10 || be_hw_error(adapter)) {
2086 dev_warn(&adapter->pdev->dev,
2087 "did not receive flush compl\n");
2088 break;
2089 }
2090 be_cq_notify(adapter, rx_cq->id, true, 0);
2091 mdelay(1);
2092 } else {
2093 be_rx_compl_discard(rxo, rxcp);
3f5dffe6 2094 be_cq_notify(adapter, rx_cq->id, false, 1);
d23e946c
SP
2095 if (rxcp->num_rcvd == 0)
2096 break;
2097 }
6b7c5b94
SP
2098 }
2099
d23e946c
SP
2100 /* After cleanup, leave the CQ in unarmed state */
2101 be_cq_notify(adapter, rx_cq->id, false, 0);
2102
2103 /* Then free posted rx buffers that were not used */
0b0ef1d0
SR
2104 while (atomic_read(&rxq->used) > 0) {
2105 page_info = get_rx_page_info(rxo);
6b7c5b94
SP
2106 put_page(page_info->page);
2107 memset(page_info, 0, sizeof(*page_info));
2108 }
2109 BUG_ON(atomic_read(&rxq->used));
5f820b6c
KA
2110 rxq->tail = 0;
2111 rxq->head = 0;
6b7c5b94
SP
2112}
2113
0ae57bb3 2114static void be_tx_compl_clean(struct be_adapter *adapter)
6b7c5b94 2115{
5f07b3c5
SP
2116 u16 end_idx, notified_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
2117 struct device *dev = &adapter->pdev->dev;
0ae57bb3
SP
2118 struct be_tx_obj *txo;
2119 struct be_queue_info *txq;
a8e9179a 2120 struct be_eth_tx_compl *txcp;
0ae57bb3 2121 int i, pending_txqs;
a8e9179a 2122
1a3d0717 2123 /* Stop polling for compls when HW has been silent for 10ms */
a8e9179a 2124 do {
0ae57bb3
SP
2125 pending_txqs = adapter->num_tx_qs;
2126
2127 for_all_tx_queues(adapter, txo, i) {
1a3d0717
VV
2128 cmpl = 0;
2129 num_wrbs = 0;
0ae57bb3
SP
2130 txq = &txo->q;
2131 while ((txcp = be_tx_compl_get(&txo->cq))) {
c3c18bc1 2132 end_idx = GET_TX_COMPL_BITS(wrb_index, txcp);
0ae57bb3
SP
2133 num_wrbs += be_tx_compl_process(adapter, txo,
2134 end_idx);
2135 cmpl++;
2136 }
2137 if (cmpl) {
2138 be_cq_notify(adapter, txo->cq.id, false, cmpl);
2139 atomic_sub(num_wrbs, &txq->used);
1a3d0717 2140 timeo = 0;
0ae57bb3 2141 }
5f07b3c5 2142 if (atomic_read(&txq->used) == txo->pend_wrb_cnt)
0ae57bb3 2143 pending_txqs--;
a8e9179a
SP
2144 }
2145
1a3d0717 2146 if (pending_txqs == 0 || ++timeo > 10 || be_hw_error(adapter))
a8e9179a
SP
2147 break;
2148
2149 mdelay(1);
2150 } while (true);
2151
5f07b3c5 2152 /* Free enqueued TX that was never notified to HW */
0ae57bb3
SP
2153 for_all_tx_queues(adapter, txo, i) {
2154 txq = &txo->q;
0ae57bb3 2155
5f07b3c5
SP
2156 if (atomic_read(&txq->used)) {
2157 dev_info(dev, "txq%d: cleaning %d pending tx-wrbs\n",
2158 i, atomic_read(&txq->used));
2159 notified_idx = txq->tail;
0ae57bb3 2160 end_idx = txq->tail;
5f07b3c5
SP
2161 index_adv(&end_idx, atomic_read(&txq->used) - 1,
2162 txq->len);
2163 /* Use the tx-compl process logic to handle requests
2164 * that were not sent to the HW.
2165 */
0ae57bb3
SP
2166 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
2167 atomic_sub(num_wrbs, &txq->used);
5f07b3c5
SP
2168 BUG_ON(atomic_read(&txq->used));
2169 txo->pend_wrb_cnt = 0;
2170 /* Since hw was never notified of these requests,
2171 * reset TXQ indices
2172 */
2173 txq->head = notified_idx;
2174 txq->tail = notified_idx;
0ae57bb3 2175 }
b03388d6 2176 }
6b7c5b94
SP
2177}
2178
10ef9ab4
SP
2179static void be_evt_queues_destroy(struct be_adapter *adapter)
2180{
2181 struct be_eq_obj *eqo;
2182 int i;
2183
2184 for_all_evt_queues(adapter, eqo, i) {
19d59aa7
PR
2185 if (eqo->q.created) {
2186 be_eq_clean(eqo);
10ef9ab4 2187 be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
6384a4d0 2188 napi_hash_del(&eqo->napi);
68d7bdcb 2189 netif_napi_del(&eqo->napi);
19d59aa7 2190 }
10ef9ab4
SP
2191 be_queue_free(adapter, &eqo->q);
2192 }
2193}
2194
2195static int be_evt_queues_create(struct be_adapter *adapter)
2196{
2197 struct be_queue_info *eq;
2198 struct be_eq_obj *eqo;
2632bafd 2199 struct be_aic_obj *aic;
10ef9ab4
SP
2200 int i, rc;
2201
92bf14ab
SP
2202 adapter->num_evt_qs = min_t(u16, num_irqs(adapter),
2203 adapter->cfg_num_qs);
10ef9ab4
SP
2204
2205 for_all_evt_queues(adapter, eqo, i) {
68d7bdcb
SP
2206 netif_napi_add(adapter->netdev, &eqo->napi, be_poll,
2207 BE_NAPI_WEIGHT);
6384a4d0 2208 napi_hash_add(&eqo->napi);
2632bafd 2209 aic = &adapter->aic_obj[i];
10ef9ab4 2210 eqo->adapter = adapter;
10ef9ab4 2211 eqo->idx = i;
2632bafd
SP
2212 aic->max_eqd = BE_MAX_EQD;
2213 aic->enable = true;
10ef9ab4
SP
2214
2215 eq = &eqo->q;
2216 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
748b539a 2217 sizeof(struct be_eq_entry));
10ef9ab4
SP
2218 if (rc)
2219 return rc;
2220
f2f781a7 2221 rc = be_cmd_eq_create(adapter, eqo);
10ef9ab4
SP
2222 if (rc)
2223 return rc;
2224 }
1cfafab9 2225 return 0;
10ef9ab4
SP
2226}
2227
5fb379ee
SP
2228static void be_mcc_queues_destroy(struct be_adapter *adapter)
2229{
2230 struct be_queue_info *q;
5fb379ee 2231
8788fdc2 2232 q = &adapter->mcc_obj.q;
5fb379ee 2233 if (q->created)
8788fdc2 2234 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
5fb379ee
SP
2235 be_queue_free(adapter, q);
2236
8788fdc2 2237 q = &adapter->mcc_obj.cq;
5fb379ee 2238 if (q->created)
8788fdc2 2239 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
5fb379ee
SP
2240 be_queue_free(adapter, q);
2241}
2242
2243/* Must be called only after TX qs are created as MCC shares TX EQ */
2244static int be_mcc_queues_create(struct be_adapter *adapter)
2245{
2246 struct be_queue_info *q, *cq;
5fb379ee 2247
8788fdc2 2248 cq = &adapter->mcc_obj.cq;
5fb379ee 2249 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
748b539a 2250 sizeof(struct be_mcc_compl)))
5fb379ee
SP
2251 goto err;
2252
10ef9ab4
SP
2253 /* Use the default EQ for MCC completions */
2254 if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0))
5fb379ee
SP
2255 goto mcc_cq_free;
2256
8788fdc2 2257 q = &adapter->mcc_obj.q;
5fb379ee
SP
2258 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
2259 goto mcc_cq_destroy;
2260
8788fdc2 2261 if (be_cmd_mccq_create(adapter, q, cq))
5fb379ee
SP
2262 goto mcc_q_free;
2263
2264 return 0;
2265
2266mcc_q_free:
2267 be_queue_free(adapter, q);
2268mcc_cq_destroy:
8788fdc2 2269 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
5fb379ee
SP
2270mcc_cq_free:
2271 be_queue_free(adapter, cq);
2272err:
2273 return -1;
2274}
2275
6b7c5b94
SP
2276static void be_tx_queues_destroy(struct be_adapter *adapter)
2277{
2278 struct be_queue_info *q;
3c8def97
SP
2279 struct be_tx_obj *txo;
2280 u8 i;
6b7c5b94 2281
3c8def97
SP
2282 for_all_tx_queues(adapter, txo, i) {
2283 q = &txo->q;
2284 if (q->created)
2285 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
2286 be_queue_free(adapter, q);
6b7c5b94 2287
3c8def97
SP
2288 q = &txo->cq;
2289 if (q->created)
2290 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2291 be_queue_free(adapter, q);
2292 }
6b7c5b94
SP
2293}
2294
7707133c 2295static int be_tx_qs_create(struct be_adapter *adapter)
6b7c5b94 2296{
10ef9ab4 2297 struct be_queue_info *cq, *eq;
3c8def97 2298 struct be_tx_obj *txo;
92bf14ab 2299 int status, i;
6b7c5b94 2300
92bf14ab 2301 adapter->num_tx_qs = min(adapter->num_evt_qs, be_max_txqs(adapter));
dafc0fe3 2302
10ef9ab4
SP
2303 for_all_tx_queues(adapter, txo, i) {
2304 cq = &txo->cq;
2305 status = be_queue_alloc(adapter, cq, TX_CQ_LEN,
2306 sizeof(struct be_eth_tx_compl));
2307 if (status)
2308 return status;
3c8def97 2309
827da44c
JS
2310 u64_stats_init(&txo->stats.sync);
2311 u64_stats_init(&txo->stats.sync_compl);
2312
10ef9ab4
SP
2313 /* If num_evt_qs is less than num_tx_qs, then more than
2314 * one txq share an eq
2315 */
2316 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2317 status = be_cmd_cq_create(adapter, cq, eq, false, 3);
2318 if (status)
2319 return status;
6b7c5b94 2320
10ef9ab4
SP
2321 status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
2322 sizeof(struct be_eth_wrb));
2323 if (status)
2324 return status;
6b7c5b94 2325
94d73aaa 2326 status = be_cmd_txq_create(adapter, txo);
10ef9ab4
SP
2327 if (status)
2328 return status;
3c8def97 2329 }
6b7c5b94 2330
d379142b
SP
2331 dev_info(&adapter->pdev->dev, "created %d TX queue(s)\n",
2332 adapter->num_tx_qs);
10ef9ab4 2333 return 0;
6b7c5b94
SP
2334}
2335
10ef9ab4 2336static void be_rx_cqs_destroy(struct be_adapter *adapter)
6b7c5b94
SP
2337{
2338 struct be_queue_info *q;
3abcdeda
SP
2339 struct be_rx_obj *rxo;
2340 int i;
2341
2342 for_all_rx_queues(adapter, rxo, i) {
3abcdeda
SP
2343 q = &rxo->cq;
2344 if (q->created)
2345 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2346 be_queue_free(adapter, q);
ac6a0c4a
SP
2347 }
2348}
2349
10ef9ab4 2350static int be_rx_cqs_create(struct be_adapter *adapter)
6b7c5b94 2351{
10ef9ab4 2352 struct be_queue_info *eq, *cq;
3abcdeda
SP
2353 struct be_rx_obj *rxo;
2354 int rc, i;
6b7c5b94 2355
92bf14ab
SP
2356 /* We can create as many RSS rings as there are EQs. */
2357 adapter->num_rx_qs = adapter->num_evt_qs;
2358
2359 /* We'll use RSS only if atleast 2 RSS rings are supported.
2360 * When RSS is used, we'll need a default RXQ for non-IP traffic.
10ef9ab4 2361 */
92bf14ab
SP
2362 if (adapter->num_rx_qs > 1)
2363 adapter->num_rx_qs++;
2364
6b7c5b94 2365 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
3abcdeda
SP
2366 for_all_rx_queues(adapter, rxo, i) {
2367 rxo->adapter = adapter;
3abcdeda
SP
2368 cq = &rxo->cq;
2369 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
748b539a 2370 sizeof(struct be_eth_rx_compl));
3abcdeda 2371 if (rc)
10ef9ab4 2372 return rc;
3abcdeda 2373
827da44c 2374 u64_stats_init(&rxo->stats.sync);
10ef9ab4
SP
2375 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2376 rc = be_cmd_cq_create(adapter, cq, eq, false, 3);
3abcdeda 2377 if (rc)
10ef9ab4 2378 return rc;
3abcdeda 2379 }
6b7c5b94 2380
d379142b
SP
2381 dev_info(&adapter->pdev->dev,
2382 "created %d RSS queue(s) and 1 default RX queue\n",
2383 adapter->num_rx_qs - 1);
10ef9ab4 2384 return 0;
b628bde2
SP
2385}
2386
6b7c5b94
SP
2387static irqreturn_t be_intx(int irq, void *dev)
2388{
e49cc34f
SP
2389 struct be_eq_obj *eqo = dev;
2390 struct be_adapter *adapter = eqo->adapter;
2391 int num_evts = 0;
6b7c5b94 2392
d0b9cec3
SP
2393 /* IRQ is not expected when NAPI is scheduled as the EQ
2394 * will not be armed.
2395 * But, this can happen on Lancer INTx where it takes
2396 * a while to de-assert INTx or in BE2 where occasionaly
2397 * an interrupt may be raised even when EQ is unarmed.
2398 * If NAPI is already scheduled, then counting & notifying
2399 * events will orphan them.
e49cc34f 2400 */
d0b9cec3 2401 if (napi_schedule_prep(&eqo->napi)) {
e49cc34f 2402 num_evts = events_get(eqo);
d0b9cec3
SP
2403 __napi_schedule(&eqo->napi);
2404 if (num_evts)
2405 eqo->spurious_intr = 0;
2406 }
2407 be_eq_notify(adapter, eqo->q.id, false, true, num_evts);
e49cc34f 2408
d0b9cec3
SP
2409 /* Return IRQ_HANDLED only for the the first spurious intr
2410 * after a valid intr to stop the kernel from branding
2411 * this irq as a bad one!
e49cc34f 2412 */
d0b9cec3
SP
2413 if (num_evts || eqo->spurious_intr++ == 0)
2414 return IRQ_HANDLED;
2415 else
2416 return IRQ_NONE;
6b7c5b94
SP
2417}
2418
10ef9ab4 2419static irqreturn_t be_msix(int irq, void *dev)
6b7c5b94 2420{
10ef9ab4 2421 struct be_eq_obj *eqo = dev;
6b7c5b94 2422
0b545a62
SP
2423 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
2424 napi_schedule(&eqo->napi);
6b7c5b94
SP
2425 return IRQ_HANDLED;
2426}
2427
2e588f84 2428static inline bool do_gro(struct be_rx_compl_info *rxcp)
6b7c5b94 2429{
e38b1706 2430 return (rxcp->tcpf && !rxcp->err && rxcp->l4_csum) ? true : false;
6b7c5b94
SP
2431}
2432
10ef9ab4 2433static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
748b539a 2434 int budget, int polling)
6b7c5b94 2435{
3abcdeda
SP
2436 struct be_adapter *adapter = rxo->adapter;
2437 struct be_queue_info *rx_cq = &rxo->cq;
2e588f84 2438 struct be_rx_compl_info *rxcp;
6b7c5b94 2439 u32 work_done;
c30d7266 2440 u32 frags_consumed = 0;
6b7c5b94
SP
2441
2442 for (work_done = 0; work_done < budget; work_done++) {
3abcdeda 2443 rxcp = be_rx_compl_get(rxo);
6b7c5b94
SP
2444 if (!rxcp)
2445 break;
2446
12004ae9
SP
2447 /* Is it a flush compl that has no data */
2448 if (unlikely(rxcp->num_rcvd == 0))
2449 goto loop_continue;
2450
2451 /* Discard compl with partial DMA Lancer B0 */
2452 if (unlikely(!rxcp->pkt_size)) {
10ef9ab4 2453 be_rx_compl_discard(rxo, rxcp);
12004ae9
SP
2454 goto loop_continue;
2455 }
2456
2457 /* On BE drop pkts that arrive due to imperfect filtering in
2458 * promiscuous mode on some skews
2459 */
2460 if (unlikely(rxcp->port != adapter->port_num &&
748b539a 2461 !lancer_chip(adapter))) {
10ef9ab4 2462 be_rx_compl_discard(rxo, rxcp);
12004ae9 2463 goto loop_continue;
64642811 2464 }
009dd872 2465
6384a4d0
SP
2466 /* Don't do gro when we're busy_polling */
2467 if (do_gro(rxcp) && polling != BUSY_POLLING)
10ef9ab4 2468 be_rx_compl_process_gro(rxo, napi, rxcp);
12004ae9 2469 else
6384a4d0
SP
2470 be_rx_compl_process(rxo, napi, rxcp);
2471
12004ae9 2472loop_continue:
c30d7266 2473 frags_consumed += rxcp->num_rcvd;
2e588f84 2474 be_rx_stats_update(rxo, rxcp);
6b7c5b94
SP
2475 }
2476
10ef9ab4
SP
2477 if (work_done) {
2478 be_cq_notify(adapter, rx_cq->id, true, work_done);
9372cacb 2479
6384a4d0
SP
2480 /* When an rx-obj gets into post_starved state, just
2481 * let be_worker do the posting.
2482 */
2483 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM &&
2484 !rxo->rx_post_starved)
c30d7266
AK
2485 be_post_rx_frags(rxo, GFP_ATOMIC,
2486 max_t(u32, MAX_RX_POST,
2487 frags_consumed));
6b7c5b94 2488 }
10ef9ab4 2489
6b7c5b94
SP
2490 return work_done;
2491}
2492
512bb8a2
KA
2493static inline void be_update_tx_err(struct be_tx_obj *txo, u32 status)
2494{
2495 switch (status) {
2496 case BE_TX_COMP_HDR_PARSE_ERR:
2497 tx_stats(txo)->tx_hdr_parse_err++;
2498 break;
2499 case BE_TX_COMP_NDMA_ERR:
2500 tx_stats(txo)->tx_dma_err++;
2501 break;
2502 case BE_TX_COMP_ACL_ERR:
2503 tx_stats(txo)->tx_spoof_check_err++;
2504 break;
2505 }
2506}
2507
2508static inline void lancer_update_tx_err(struct be_tx_obj *txo, u32 status)
2509{
2510 switch (status) {
2511 case LANCER_TX_COMP_LSO_ERR:
2512 tx_stats(txo)->tx_tso_err++;
2513 break;
2514 case LANCER_TX_COMP_HSW_DROP_MAC_ERR:
2515 case LANCER_TX_COMP_HSW_DROP_VLAN_ERR:
2516 tx_stats(txo)->tx_spoof_check_err++;
2517 break;
2518 case LANCER_TX_COMP_QINQ_ERR:
2519 tx_stats(txo)->tx_qinq_err++;
2520 break;
2521 case LANCER_TX_COMP_PARITY_ERR:
2522 tx_stats(txo)->tx_internal_parity_err++;
2523 break;
2524 case LANCER_TX_COMP_DMA_ERR:
2525 tx_stats(txo)->tx_dma_err++;
2526 break;
2527 }
2528}
2529
c8f64615
SP
2530static void be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
2531 int idx)
6b7c5b94 2532{
6b7c5b94 2533 struct be_eth_tx_compl *txcp;
c8f64615 2534 int num_wrbs = 0, work_done = 0;
512bb8a2 2535 u32 compl_status;
c8f64615
SP
2536 u16 last_idx;
2537
2538 while ((txcp = be_tx_compl_get(&txo->cq))) {
2539 last_idx = GET_TX_COMPL_BITS(wrb_index, txcp);
2540 num_wrbs += be_tx_compl_process(adapter, txo, last_idx);
2541 work_done++;
3c8def97 2542
512bb8a2
KA
2543 compl_status = GET_TX_COMPL_BITS(status, txcp);
2544 if (compl_status) {
2545 if (lancer_chip(adapter))
2546 lancer_update_tx_err(txo, compl_status);
2547 else
2548 be_update_tx_err(txo, compl_status);
2549 }
10ef9ab4 2550 }
6b7c5b94 2551
10ef9ab4
SP
2552 if (work_done) {
2553 be_cq_notify(adapter, txo->cq.id, true, work_done);
2554 atomic_sub(num_wrbs, &txo->q.used);
3c8def97 2555
10ef9ab4
SP
2556 /* As Tx wrbs have been freed up, wake up netdev queue
2557 * if it was stopped due to lack of tx wrbs. */
2558 if (__netif_subqueue_stopped(adapter->netdev, idx) &&
748b539a 2559 atomic_read(&txo->q.used) < txo->q.len / 2) {
10ef9ab4 2560 netif_wake_subqueue(adapter->netdev, idx);
3c8def97 2561 }
10ef9ab4
SP
2562
2563 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
2564 tx_stats(txo)->tx_compl += work_done;
2565 u64_stats_update_end(&tx_stats(txo)->sync_compl);
6b7c5b94 2566 }
10ef9ab4 2567}
6b7c5b94 2568
f7062ee5
SP
2569#ifdef CONFIG_NET_RX_BUSY_POLL
2570static inline bool be_lock_napi(struct be_eq_obj *eqo)
2571{
2572 bool status = true;
2573
2574 spin_lock(&eqo->lock); /* BH is already disabled */
2575 if (eqo->state & BE_EQ_LOCKED) {
2576 WARN_ON(eqo->state & BE_EQ_NAPI);
2577 eqo->state |= BE_EQ_NAPI_YIELD;
2578 status = false;
2579 } else {
2580 eqo->state = BE_EQ_NAPI;
2581 }
2582 spin_unlock(&eqo->lock);
2583 return status;
2584}
2585
2586static inline void be_unlock_napi(struct be_eq_obj *eqo)
2587{
2588 spin_lock(&eqo->lock); /* BH is already disabled */
2589
2590 WARN_ON(eqo->state & (BE_EQ_POLL | BE_EQ_NAPI_YIELD));
2591 eqo->state = BE_EQ_IDLE;
2592
2593 spin_unlock(&eqo->lock);
2594}
2595
2596static inline bool be_lock_busy_poll(struct be_eq_obj *eqo)
2597{
2598 bool status = true;
2599
2600 spin_lock_bh(&eqo->lock);
2601 if (eqo->state & BE_EQ_LOCKED) {
2602 eqo->state |= BE_EQ_POLL_YIELD;
2603 status = false;
2604 } else {
2605 eqo->state |= BE_EQ_POLL;
2606 }
2607 spin_unlock_bh(&eqo->lock);
2608 return status;
2609}
2610
2611static inline void be_unlock_busy_poll(struct be_eq_obj *eqo)
2612{
2613 spin_lock_bh(&eqo->lock);
2614
2615 WARN_ON(eqo->state & (BE_EQ_NAPI));
2616 eqo->state = BE_EQ_IDLE;
2617
2618 spin_unlock_bh(&eqo->lock);
2619}
2620
2621static inline void be_enable_busy_poll(struct be_eq_obj *eqo)
2622{
2623 spin_lock_init(&eqo->lock);
2624 eqo->state = BE_EQ_IDLE;
2625}
2626
2627static inline void be_disable_busy_poll(struct be_eq_obj *eqo)
2628{
2629 local_bh_disable();
2630
2631 /* It's enough to just acquire napi lock on the eqo to stop
2632 * be_busy_poll() from processing any queueus.
2633 */
2634 while (!be_lock_napi(eqo))
2635 mdelay(1);
2636
2637 local_bh_enable();
2638}
2639
2640#else /* CONFIG_NET_RX_BUSY_POLL */
2641
2642static inline bool be_lock_napi(struct be_eq_obj *eqo)
2643{
2644 return true;
2645}
2646
2647static inline void be_unlock_napi(struct be_eq_obj *eqo)
2648{
2649}
2650
2651static inline bool be_lock_busy_poll(struct be_eq_obj *eqo)
2652{
2653 return false;
2654}
2655
2656static inline void be_unlock_busy_poll(struct be_eq_obj *eqo)
2657{
2658}
2659
2660static inline void be_enable_busy_poll(struct be_eq_obj *eqo)
2661{
2662}
2663
2664static inline void be_disable_busy_poll(struct be_eq_obj *eqo)
2665{
2666}
2667#endif /* CONFIG_NET_RX_BUSY_POLL */
2668
68d7bdcb 2669int be_poll(struct napi_struct *napi, int budget)
10ef9ab4
SP
2670{
2671 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2672 struct be_adapter *adapter = eqo->adapter;
0b545a62 2673 int max_work = 0, work, i, num_evts;
6384a4d0 2674 struct be_rx_obj *rxo;
a4906ea0 2675 struct be_tx_obj *txo;
f31e50a8 2676
0b545a62
SP
2677 num_evts = events_get(eqo);
2678
a4906ea0
SP
2679 for_all_tx_queues_on_eq(adapter, eqo, txo, i)
2680 be_process_tx(adapter, txo, i);
f31e50a8 2681
6384a4d0
SP
2682 if (be_lock_napi(eqo)) {
2683 /* This loop will iterate twice for EQ0 in which
2684 * completions of the last RXQ (default one) are also processed
2685 * For other EQs the loop iterates only once
2686 */
2687 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
2688 work = be_process_rx(rxo, napi, budget, NAPI_POLLING);
2689 max_work = max(work, max_work);
2690 }
2691 be_unlock_napi(eqo);
2692 } else {
2693 max_work = budget;
10ef9ab4 2694 }
6b7c5b94 2695
10ef9ab4
SP
2696 if (is_mcc_eqo(eqo))
2697 be_process_mcc(adapter);
93c86700 2698
10ef9ab4
SP
2699 if (max_work < budget) {
2700 napi_complete(napi);
0b545a62 2701 be_eq_notify(adapter, eqo->q.id, true, false, num_evts);
10ef9ab4
SP
2702 } else {
2703 /* As we'll continue in polling mode, count and clear events */
0b545a62 2704 be_eq_notify(adapter, eqo->q.id, false, false, num_evts);
93c86700 2705 }
10ef9ab4 2706 return max_work;
6b7c5b94
SP
2707}
2708
6384a4d0
SP
2709#ifdef CONFIG_NET_RX_BUSY_POLL
2710static int be_busy_poll(struct napi_struct *napi)
2711{
2712 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2713 struct be_adapter *adapter = eqo->adapter;
2714 struct be_rx_obj *rxo;
2715 int i, work = 0;
2716
2717 if (!be_lock_busy_poll(eqo))
2718 return LL_FLUSH_BUSY;
2719
2720 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
2721 work = be_process_rx(rxo, napi, 4, BUSY_POLLING);
2722 if (work)
2723 break;
2724 }
2725
2726 be_unlock_busy_poll(eqo);
2727 return work;
2728}
2729#endif
2730
f67ef7ba 2731void be_detect_error(struct be_adapter *adapter)
7c185276 2732{
e1cfb67a
PR
2733 u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
2734 u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
7c185276 2735 u32 i;
eb0eecc1
SK
2736 bool error_detected = false;
2737 struct device *dev = &adapter->pdev->dev;
2738 struct net_device *netdev = adapter->netdev;
7c185276 2739
d23e946c 2740 if (be_hw_error(adapter))
72f02485
SP
2741 return;
2742
e1cfb67a
PR
2743 if (lancer_chip(adapter)) {
2744 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2745 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2746 sliport_err1 = ioread32(adapter->db +
748b539a 2747 SLIPORT_ERROR1_OFFSET);
e1cfb67a 2748 sliport_err2 = ioread32(adapter->db +
748b539a 2749 SLIPORT_ERROR2_OFFSET);
eb0eecc1
SK
2750 adapter->hw_error = true;
2751 /* Do not log error messages if its a FW reset */
2752 if (sliport_err1 == SLIPORT_ERROR_FW_RESET1 &&
2753 sliport_err2 == SLIPORT_ERROR_FW_RESET2) {
2754 dev_info(dev, "Firmware update in progress\n");
2755 } else {
2756 error_detected = true;
2757 dev_err(dev, "Error detected in the card\n");
2758 dev_err(dev, "ERR: sliport status 0x%x\n",
2759 sliport_status);
2760 dev_err(dev, "ERR: sliport error1 0x%x\n",
2761 sliport_err1);
2762 dev_err(dev, "ERR: sliport error2 0x%x\n",
2763 sliport_err2);
2764 }
e1cfb67a
PR
2765 }
2766 } else {
2767 pci_read_config_dword(adapter->pdev,
748b539a 2768 PCICFG_UE_STATUS_LOW, &ue_lo);
e1cfb67a 2769 pci_read_config_dword(adapter->pdev,
748b539a 2770 PCICFG_UE_STATUS_HIGH, &ue_hi);
e1cfb67a 2771 pci_read_config_dword(adapter->pdev,
748b539a 2772 PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask);
e1cfb67a 2773 pci_read_config_dword(adapter->pdev,
748b539a 2774 PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
e1cfb67a 2775
f67ef7ba
PR
2776 ue_lo = (ue_lo & ~ue_lo_mask);
2777 ue_hi = (ue_hi & ~ue_hi_mask);
7c185276 2778
eb0eecc1
SK
2779 /* On certain platforms BE hardware can indicate spurious UEs.
2780 * Allow HW to stop working completely in case of a real UE.
2781 * Hence not setting the hw_error for UE detection.
2782 */
f67ef7ba 2783
eb0eecc1
SK
2784 if (ue_lo || ue_hi) {
2785 error_detected = true;
2786 dev_err(dev,
2787 "Unrecoverable Error detected in the adapter");
2788 dev_err(dev, "Please reboot server to recover");
2789 if (skyhawk_chip(adapter))
2790 adapter->hw_error = true;
2791 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
2792 if (ue_lo & 1)
2793 dev_err(dev, "UE: %s bit set\n",
2794 ue_status_low_desc[i]);
2795 }
2796 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
2797 if (ue_hi & 1)
2798 dev_err(dev, "UE: %s bit set\n",
2799 ue_status_hi_desc[i]);
2800 }
7c185276
AK
2801 }
2802 }
eb0eecc1
SK
2803 if (error_detected)
2804 netif_carrier_off(netdev);
7c185276
AK
2805}
2806
8d56ff11
SP
2807static void be_msix_disable(struct be_adapter *adapter)
2808{
ac6a0c4a 2809 if (msix_enabled(adapter)) {
8d56ff11 2810 pci_disable_msix(adapter->pdev);
ac6a0c4a 2811 adapter->num_msix_vec = 0;
68d7bdcb 2812 adapter->num_msix_roce_vec = 0;
3abcdeda
SP
2813 }
2814}
2815
c2bba3df 2816static int be_msix_enable(struct be_adapter *adapter)
6b7c5b94 2817{
7dc4c064 2818 int i, num_vec;
d379142b 2819 struct device *dev = &adapter->pdev->dev;
6b7c5b94 2820
92bf14ab
SP
2821 /* If RoCE is supported, program the max number of NIC vectors that
2822 * may be configured via set-channels, along with vectors needed for
2823 * RoCe. Else, just program the number we'll use initially.
2824 */
2825 if (be_roce_supported(adapter))
2826 num_vec = min_t(int, 2 * be_max_eqs(adapter),
2827 2 * num_online_cpus());
2828 else
2829 num_vec = adapter->cfg_num_qs;
3abcdeda 2830
ac6a0c4a 2831 for (i = 0; i < num_vec; i++)
6b7c5b94
SP
2832 adapter->msix_entries[i].entry = i;
2833
7dc4c064
AG
2834 num_vec = pci_enable_msix_range(adapter->pdev, adapter->msix_entries,
2835 MIN_MSIX_VECTORS, num_vec);
2836 if (num_vec < 0)
2837 goto fail;
92bf14ab 2838
92bf14ab
SP
2839 if (be_roce_supported(adapter) && num_vec > MIN_MSIX_VECTORS) {
2840 adapter->num_msix_roce_vec = num_vec / 2;
2841 dev_info(dev, "enabled %d MSI-x vector(s) for RoCE\n",
2842 adapter->num_msix_roce_vec);
2843 }
2844
2845 adapter->num_msix_vec = num_vec - adapter->num_msix_roce_vec;
2846
2847 dev_info(dev, "enabled %d MSI-x vector(s) for NIC\n",
2848 adapter->num_msix_vec);
c2bba3df 2849 return 0;
7dc4c064
AG
2850
2851fail:
2852 dev_warn(dev, "MSIx enable failed\n");
2853
2854 /* INTx is not supported in VFs, so fail probe if enable_msix fails */
2855 if (!be_physfn(adapter))
2856 return num_vec;
2857 return 0;
6b7c5b94
SP
2858}
2859
fe6d2a38 2860static inline int be_msix_vec_get(struct be_adapter *adapter,
748b539a 2861 struct be_eq_obj *eqo)
b628bde2 2862{
f2f781a7 2863 return adapter->msix_entries[eqo->msix_idx].vector;
b628bde2 2864}
6b7c5b94 2865
b628bde2
SP
2866static int be_msix_register(struct be_adapter *adapter)
2867{
10ef9ab4
SP
2868 struct net_device *netdev = adapter->netdev;
2869 struct be_eq_obj *eqo;
2870 int status, i, vec;
6b7c5b94 2871
10ef9ab4
SP
2872 for_all_evt_queues(adapter, eqo, i) {
2873 sprintf(eqo->desc, "%s-q%d", netdev->name, i);
2874 vec = be_msix_vec_get(adapter, eqo);
2875 status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
3abcdeda
SP
2876 if (status)
2877 goto err_msix;
2878 }
b628bde2 2879
6b7c5b94 2880 return 0;
3abcdeda 2881err_msix:
10ef9ab4
SP
2882 for (i--, eqo = &adapter->eq_obj[i]; i >= 0; i--, eqo--)
2883 free_irq(be_msix_vec_get(adapter, eqo), eqo);
2884 dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
748b539a 2885 status);
ac6a0c4a 2886 be_msix_disable(adapter);
6b7c5b94
SP
2887 return status;
2888}
2889
2890static int be_irq_register(struct be_adapter *adapter)
2891{
2892 struct net_device *netdev = adapter->netdev;
2893 int status;
2894
ac6a0c4a 2895 if (msix_enabled(adapter)) {
6b7c5b94
SP
2896 status = be_msix_register(adapter);
2897 if (status == 0)
2898 goto done;
ba343c77
SB
2899 /* INTx is not supported for VF */
2900 if (!be_physfn(adapter))
2901 return status;
6b7c5b94
SP
2902 }
2903
e49cc34f 2904 /* INTx: only the first EQ is used */
6b7c5b94
SP
2905 netdev->irq = adapter->pdev->irq;
2906 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
e49cc34f 2907 &adapter->eq_obj[0]);
6b7c5b94
SP
2908 if (status) {
2909 dev_err(&adapter->pdev->dev,
2910 "INTx request IRQ failed - err %d\n", status);
2911 return status;
2912 }
2913done:
2914 adapter->isr_registered = true;
2915 return 0;
2916}
2917
2918static void be_irq_unregister(struct be_adapter *adapter)
2919{
2920 struct net_device *netdev = adapter->netdev;
10ef9ab4 2921 struct be_eq_obj *eqo;
3abcdeda 2922 int i;
6b7c5b94
SP
2923
2924 if (!adapter->isr_registered)
2925 return;
2926
2927 /* INTx */
ac6a0c4a 2928 if (!msix_enabled(adapter)) {
e49cc34f 2929 free_irq(netdev->irq, &adapter->eq_obj[0]);
6b7c5b94
SP
2930 goto done;
2931 }
2932
2933 /* MSIx */
10ef9ab4
SP
2934 for_all_evt_queues(adapter, eqo, i)
2935 free_irq(be_msix_vec_get(adapter, eqo), eqo);
3abcdeda 2936
6b7c5b94
SP
2937done:
2938 adapter->isr_registered = false;
6b7c5b94
SP
2939}
2940
10ef9ab4 2941static void be_rx_qs_destroy(struct be_adapter *adapter)
482c9e79
SP
2942{
2943 struct be_queue_info *q;
2944 struct be_rx_obj *rxo;
2945 int i;
2946
2947 for_all_rx_queues(adapter, rxo, i) {
2948 q = &rxo->q;
2949 if (q->created) {
2950 be_cmd_rxq_destroy(adapter, q);
10ef9ab4 2951 be_rx_cq_clean(rxo);
482c9e79 2952 }
10ef9ab4 2953 be_queue_free(adapter, q);
482c9e79
SP
2954 }
2955}
2956
889cd4b2
SP
2957static int be_close(struct net_device *netdev)
2958{
2959 struct be_adapter *adapter = netdev_priv(netdev);
10ef9ab4
SP
2960 struct be_eq_obj *eqo;
2961 int i;
889cd4b2 2962
e1ad8e33
KA
2963 /* This protection is needed as be_close() may be called even when the
2964 * adapter is in cleared state (after eeh perm failure)
2965 */
2966 if (!(adapter->flags & BE_FLAGS_SETUP_DONE))
2967 return 0;
2968
045508a8
PP
2969 be_roce_dev_close(adapter);
2970
dff345c5
IV
2971 if (adapter->flags & BE_FLAGS_NAPI_ENABLED) {
2972 for_all_evt_queues(adapter, eqo, i) {
04d3d624 2973 napi_disable(&eqo->napi);
6384a4d0
SP
2974 be_disable_busy_poll(eqo);
2975 }
71237b6f 2976 adapter->flags &= ~BE_FLAGS_NAPI_ENABLED;
04d3d624 2977 }
a323d9bf
SP
2978
2979 be_async_mcc_disable(adapter);
2980
2981 /* Wait for all pending tx completions to arrive so that
2982 * all tx skbs are freed.
2983 */
fba87559 2984 netif_tx_disable(netdev);
6e1f9975 2985 be_tx_compl_clean(adapter);
a323d9bf
SP
2986
2987 be_rx_qs_destroy(adapter);
f66b7cfd 2988 be_clear_uc_list(adapter);
d11a347d 2989
a323d9bf 2990 for_all_evt_queues(adapter, eqo, i) {
10ef9ab4
SP
2991 if (msix_enabled(adapter))
2992 synchronize_irq(be_msix_vec_get(adapter, eqo));
2993 else
2994 synchronize_irq(netdev->irq);
2995 be_eq_clean(eqo);
63fcb27f
PR
2996 }
2997
889cd4b2
SP
2998 be_irq_unregister(adapter);
2999
482c9e79
SP
3000 return 0;
3001}
3002
10ef9ab4 3003static int be_rx_qs_create(struct be_adapter *adapter)
482c9e79 3004{
1dcf7b1c
ED
3005 struct rss_info *rss = &adapter->rss_info;
3006 u8 rss_key[RSS_HASH_KEY_LEN];
482c9e79 3007 struct be_rx_obj *rxo;
e9008ee9 3008 int rc, i, j;
482c9e79
SP
3009
3010 for_all_rx_queues(adapter, rxo, i) {
10ef9ab4
SP
3011 rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
3012 sizeof(struct be_eth_rx_d));
3013 if (rc)
3014 return rc;
3015 }
3016
3017 /* The FW would like the default RXQ to be created first */
3018 rxo = default_rxo(adapter);
3019 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id, rx_frag_size,
3020 adapter->if_handle, false, &rxo->rss_id);
3021 if (rc)
3022 return rc;
3023
3024 for_all_rss_queues(adapter, rxo, i) {
482c9e79 3025 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
10ef9ab4
SP
3026 rx_frag_size, adapter->if_handle,
3027 true, &rxo->rss_id);
482c9e79
SP
3028 if (rc)
3029 return rc;
3030 }
3031
3032 if (be_multi_rxq(adapter)) {
e2557877
VD
3033 for (j = 0; j < RSS_INDIR_TABLE_LEN;
3034 j += adapter->num_rx_qs - 1) {
e9008ee9 3035 for_all_rss_queues(adapter, rxo, i) {
e2557877 3036 if ((j + i) >= RSS_INDIR_TABLE_LEN)
e9008ee9 3037 break;
e2557877
VD
3038 rss->rsstable[j + i] = rxo->rss_id;
3039 rss->rss_queue[j + i] = i;
e9008ee9
PR
3040 }
3041 }
e2557877
VD
3042 rss->rss_flags = RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_IPV4 |
3043 RSS_ENABLE_TCP_IPV6 | RSS_ENABLE_IPV6;
594ad54a
SR
3044
3045 if (!BEx_chip(adapter))
e2557877
VD
3046 rss->rss_flags |= RSS_ENABLE_UDP_IPV4 |
3047 RSS_ENABLE_UDP_IPV6;
da1388d6
VV
3048 } else {
3049 /* Disable RSS, if only default RX Q is created */
e2557877 3050 rss->rss_flags = RSS_ENABLE_NONE;
da1388d6 3051 }
594ad54a 3052
1dcf7b1c 3053 netdev_rss_key_fill(rss_key, RSS_HASH_KEY_LEN);
748b539a 3054 rc = be_cmd_rss_config(adapter, rss->rsstable, rss->rss_flags,
1dcf7b1c 3055 128, rss_key);
da1388d6 3056 if (rc) {
e2557877 3057 rss->rss_flags = RSS_ENABLE_NONE;
da1388d6 3058 return rc;
482c9e79
SP
3059 }
3060
1dcf7b1c 3061 memcpy(rss->rss_hkey, rss_key, RSS_HASH_KEY_LEN);
e2557877 3062
482c9e79 3063 /* First time posting */
10ef9ab4 3064 for_all_rx_queues(adapter, rxo, i)
c30d7266 3065 be_post_rx_frags(rxo, GFP_KERNEL, MAX_RX_POST);
889cd4b2
SP
3066 return 0;
3067}
3068
6b7c5b94
SP
3069static int be_open(struct net_device *netdev)
3070{
3071 struct be_adapter *adapter = netdev_priv(netdev);
10ef9ab4 3072 struct be_eq_obj *eqo;
3abcdeda 3073 struct be_rx_obj *rxo;
10ef9ab4 3074 struct be_tx_obj *txo;
b236916a 3075 u8 link_status;
3abcdeda 3076 int status, i;
5fb379ee 3077
10ef9ab4 3078 status = be_rx_qs_create(adapter);
482c9e79
SP
3079 if (status)
3080 goto err;
3081
c2bba3df
SK
3082 status = be_irq_register(adapter);
3083 if (status)
3084 goto err;
5fb379ee 3085
10ef9ab4 3086 for_all_rx_queues(adapter, rxo, i)
3abcdeda 3087 be_cq_notify(adapter, rxo->cq.id, true, 0);
5fb379ee 3088
10ef9ab4
SP
3089 for_all_tx_queues(adapter, txo, i)
3090 be_cq_notify(adapter, txo->cq.id, true, 0);
3091
7a1e9b20
SP
3092 be_async_mcc_enable(adapter);
3093
10ef9ab4
SP
3094 for_all_evt_queues(adapter, eqo, i) {
3095 napi_enable(&eqo->napi);
6384a4d0 3096 be_enable_busy_poll(eqo);
4cad9f3b 3097 be_eq_notify(adapter, eqo->q.id, true, true, 0);
10ef9ab4 3098 }
04d3d624 3099 adapter->flags |= BE_FLAGS_NAPI_ENABLED;
10ef9ab4 3100
323ff71e 3101 status = be_cmd_link_status_query(adapter, NULL, &link_status, 0);
b236916a
AK
3102 if (!status)
3103 be_link_status_update(adapter, link_status);
3104
fba87559 3105 netif_tx_start_all_queues(netdev);
045508a8 3106 be_roce_dev_open(adapter);
c9c47142 3107
c5abe7c0 3108#ifdef CONFIG_BE2NET_VXLAN
c9c47142
SP
3109 if (skyhawk_chip(adapter))
3110 vxlan_get_rx_port(netdev);
c5abe7c0
SP
3111#endif
3112
889cd4b2
SP
3113 return 0;
3114err:
3115 be_close(adapter->netdev);
3116 return -EIO;
5fb379ee
SP
3117}
3118
71d8d1b5
AK
3119static int be_setup_wol(struct be_adapter *adapter, bool enable)
3120{
3121 struct be_dma_mem cmd;
3122 int status = 0;
3123 u8 mac[ETH_ALEN];
3124
3125 memset(mac, 0, ETH_ALEN);
3126
3127 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
ede23fa8
JP
3128 cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
3129 GFP_KERNEL);
ddf1169f 3130 if (!cmd.va)
6b568689 3131 return -ENOMEM;
71d8d1b5
AK
3132
3133 if (enable) {
3134 status = pci_write_config_dword(adapter->pdev,
748b539a
SP
3135 PCICFG_PM_CONTROL_OFFSET,
3136 PCICFG_PM_CONTROL_MASK);
71d8d1b5
AK
3137 if (status) {
3138 dev_err(&adapter->pdev->dev,
2381a55c 3139 "Could not enable Wake-on-lan\n");
2b7bcebf
IV
3140 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
3141 cmd.dma);
71d8d1b5
AK
3142 return status;
3143 }
3144 status = be_cmd_enable_magic_wol(adapter,
748b539a
SP
3145 adapter->netdev->dev_addr,
3146 &cmd);
71d8d1b5
AK
3147 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
3148 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
3149 } else {
3150 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
3151 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
3152 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
3153 }
3154
2b7bcebf 3155 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
71d8d1b5
AK
3156 return status;
3157}
3158
f7062ee5
SP
3159static void be_vf_eth_addr_generate(struct be_adapter *adapter, u8 *mac)
3160{
3161 u32 addr;
3162
3163 addr = jhash(adapter->netdev->dev_addr, ETH_ALEN, 0);
3164
3165 mac[5] = (u8)(addr & 0xFF);
3166 mac[4] = (u8)((addr >> 8) & 0xFF);
3167 mac[3] = (u8)((addr >> 16) & 0xFF);
3168 /* Use the OUI from the current MAC address */
3169 memcpy(mac, adapter->netdev->dev_addr, 3);
3170}
3171
6d87f5c3
AK
3172/*
3173 * Generate a seed MAC address from the PF MAC Address using jhash.
3174 * MAC Address for VFs are assigned incrementally starting from the seed.
3175 * These addresses are programmed in the ASIC by the PF and the VF driver
3176 * queries for the MAC address during its probe.
3177 */
4c876616 3178static int be_vf_eth_addr_config(struct be_adapter *adapter)
6d87f5c3 3179{
f9449ab7 3180 u32 vf;
3abcdeda 3181 int status = 0;
6d87f5c3 3182 u8 mac[ETH_ALEN];
11ac75ed 3183 struct be_vf_cfg *vf_cfg;
6d87f5c3
AK
3184
3185 be_vf_eth_addr_generate(adapter, mac);
3186
11ac75ed 3187 for_all_vfs(adapter, vf_cfg, vf) {
3175d8c2 3188 if (BEx_chip(adapter))
590c391d 3189 status = be_cmd_pmac_add(adapter, mac,
11ac75ed
SP
3190 vf_cfg->if_handle,
3191 &vf_cfg->pmac_id, vf + 1);
3175d8c2
SP
3192 else
3193 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
3194 vf + 1);
590c391d 3195
6d87f5c3
AK
3196 if (status)
3197 dev_err(&adapter->pdev->dev,
748b539a
SP
3198 "Mac address assignment failed for VF %d\n",
3199 vf);
6d87f5c3 3200 else
11ac75ed 3201 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
6d87f5c3
AK
3202
3203 mac[5] += 1;
3204 }
3205 return status;
3206}
3207
4c876616
SP
3208static int be_vfs_mac_query(struct be_adapter *adapter)
3209{
3210 int status, vf;
3211 u8 mac[ETH_ALEN];
3212 struct be_vf_cfg *vf_cfg;
4c876616
SP
3213
3214 for_all_vfs(adapter, vf_cfg, vf) {
b188f090
SR
3215 status = be_cmd_get_active_mac(adapter, vf_cfg->pmac_id,
3216 mac, vf_cfg->if_handle,
3217 false, vf+1);
4c876616
SP
3218 if (status)
3219 return status;
3220 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
3221 }
3222 return 0;
3223}
3224
f9449ab7 3225static void be_vf_clear(struct be_adapter *adapter)
6d87f5c3 3226{
11ac75ed 3227 struct be_vf_cfg *vf_cfg;
6d87f5c3
AK
3228 u32 vf;
3229
257a3feb 3230 if (pci_vfs_assigned(adapter->pdev)) {
4c876616
SP
3231 dev_warn(&adapter->pdev->dev,
3232 "VFs are assigned to VMs: not disabling VFs\n");
39f1d94d
SP
3233 goto done;
3234 }
3235
b4c1df93
SP
3236 pci_disable_sriov(adapter->pdev);
3237
11ac75ed 3238 for_all_vfs(adapter, vf_cfg, vf) {
3175d8c2 3239 if (BEx_chip(adapter))
11ac75ed
SP
3240 be_cmd_pmac_del(adapter, vf_cfg->if_handle,
3241 vf_cfg->pmac_id, vf + 1);
3175d8c2
SP
3242 else
3243 be_cmd_set_mac(adapter, NULL, vf_cfg->if_handle,
3244 vf + 1);
f9449ab7 3245
11ac75ed
SP
3246 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
3247 }
39f1d94d
SP
3248done:
3249 kfree(adapter->vf_cfg);
3250 adapter->num_vfs = 0;
f174c7ec 3251 adapter->flags &= ~BE_FLAGS_SRIOV_ENABLED;
6d87f5c3
AK
3252}
3253
7707133c
SP
3254static void be_clear_queues(struct be_adapter *adapter)
3255{
3256 be_mcc_queues_destroy(adapter);
3257 be_rx_cqs_destroy(adapter);
3258 be_tx_queues_destroy(adapter);
3259 be_evt_queues_destroy(adapter);
3260}
3261
68d7bdcb 3262static void be_cancel_worker(struct be_adapter *adapter)
a54769f5 3263{
191eb756
SP
3264 if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) {
3265 cancel_delayed_work_sync(&adapter->work);
3266 adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED;
3267 }
68d7bdcb
SP
3268}
3269
b05004ad 3270static void be_mac_clear(struct be_adapter *adapter)
68d7bdcb 3271{
b05004ad 3272 if (adapter->pmac_id) {
f66b7cfd
SP
3273 be_cmd_pmac_del(adapter, adapter->if_handle,
3274 adapter->pmac_id[0], 0);
b05004ad
SK
3275 kfree(adapter->pmac_id);
3276 adapter->pmac_id = NULL;
3277 }
3278}
3279
c5abe7c0 3280#ifdef CONFIG_BE2NET_VXLAN
c9c47142
SP
3281static void be_disable_vxlan_offloads(struct be_adapter *adapter)
3282{
630f4b70
SB
3283 struct net_device *netdev = adapter->netdev;
3284
c9c47142
SP
3285 if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS)
3286 be_cmd_manage_iface(adapter, adapter->if_handle,
3287 OP_CONVERT_TUNNEL_TO_NORMAL);
3288
3289 if (adapter->vxlan_port)
3290 be_cmd_set_vxlan_port(adapter, 0);
3291
3292 adapter->flags &= ~BE_FLAGS_VXLAN_OFFLOADS;
3293 adapter->vxlan_port = 0;
630f4b70
SB
3294
3295 netdev->hw_enc_features = 0;
3296 netdev->hw_features &= ~(NETIF_F_GSO_UDP_TUNNEL);
ac9a3d84 3297 netdev->features &= ~(NETIF_F_GSO_UDP_TUNNEL);
c9c47142 3298}
c5abe7c0 3299#endif
c9c47142 3300
b05004ad
SK
3301static int be_clear(struct be_adapter *adapter)
3302{
68d7bdcb 3303 be_cancel_worker(adapter);
191eb756 3304
11ac75ed 3305 if (sriov_enabled(adapter))
f9449ab7
SP
3306 be_vf_clear(adapter);
3307
bec84e6b
VV
3308 /* Re-configure FW to distribute resources evenly across max-supported
3309 * number of VFs, only when VFs are not already enabled.
3310 */
3311 if (be_physfn(adapter) && !pci_vfs_assigned(adapter->pdev))
3312 be_cmd_set_sriov_config(adapter, adapter->pool_res,
3313 pci_sriov_get_totalvfs(adapter->pdev));
3314
c5abe7c0 3315#ifdef CONFIG_BE2NET_VXLAN
c9c47142 3316 be_disable_vxlan_offloads(adapter);
c5abe7c0 3317#endif
2d17f403 3318 /* delete the primary mac along with the uc-mac list */
b05004ad 3319 be_mac_clear(adapter);
fbc13f01 3320
f9449ab7 3321 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
a54769f5 3322
7707133c 3323 be_clear_queues(adapter);
a54769f5 3324
10ef9ab4 3325 be_msix_disable(adapter);
e1ad8e33 3326 adapter->flags &= ~BE_FLAGS_SETUP_DONE;
a54769f5
SP
3327 return 0;
3328}
3329
0700d816
KA
3330static int be_if_create(struct be_adapter *adapter, u32 *if_handle,
3331 u32 cap_flags, u32 vf)
3332{
3333 u32 en_flags;
3334 int status;
3335
3336 en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
3337 BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS |
3338 BE_IF_FLAGS_RSS;
3339
3340 en_flags &= cap_flags;
3341
3342 status = be_cmd_if_create(adapter, cap_flags, en_flags,
3343 if_handle, vf);
3344
3345 return status;
3346}
3347
4c876616 3348static int be_vfs_if_create(struct be_adapter *adapter)
abb93951 3349{
92bf14ab 3350 struct be_resources res = {0};
4c876616 3351 struct be_vf_cfg *vf_cfg;
0700d816
KA
3352 u32 cap_flags, vf;
3353 int status;
abb93951 3354
0700d816 3355 /* If a FW profile exists, then cap_flags are updated */
4c876616
SP
3356 cap_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
3357 BE_IF_FLAGS_MULTICAST;
abb93951 3358
4c876616 3359 for_all_vfs(adapter, vf_cfg, vf) {
92bf14ab
SP
3360 if (!BE3_chip(adapter)) {
3361 status = be_cmd_get_profile_config(adapter, &res,
3362 vf + 1);
3363 if (!status)
3364 cap_flags = res.if_cap_flags;
3365 }
4c876616 3366
0700d816
KA
3367 status = be_if_create(adapter, &vf_cfg->if_handle,
3368 cap_flags, vf + 1);
4c876616 3369 if (status)
0700d816 3370 return status;
4c876616 3371 }
0700d816
KA
3372
3373 return 0;
abb93951
PR
3374}
3375
39f1d94d 3376static int be_vf_setup_init(struct be_adapter *adapter)
30128031 3377{
11ac75ed 3378 struct be_vf_cfg *vf_cfg;
30128031
SP
3379 int vf;
3380
39f1d94d
SP
3381 adapter->vf_cfg = kcalloc(adapter->num_vfs, sizeof(*vf_cfg),
3382 GFP_KERNEL);
3383 if (!adapter->vf_cfg)
3384 return -ENOMEM;
3385
11ac75ed
SP
3386 for_all_vfs(adapter, vf_cfg, vf) {
3387 vf_cfg->if_handle = -1;
3388 vf_cfg->pmac_id = -1;
30128031 3389 }
39f1d94d 3390 return 0;
30128031
SP
3391}
3392
f9449ab7
SP
3393static int be_vf_setup(struct be_adapter *adapter)
3394{
c502224e 3395 struct device *dev = &adapter->pdev->dev;
11ac75ed 3396 struct be_vf_cfg *vf_cfg;
4c876616 3397 int status, old_vfs, vf;
04a06028 3398 u32 privileges;
39f1d94d 3399
257a3feb 3400 old_vfs = pci_num_vf(adapter->pdev);
39f1d94d
SP
3401
3402 status = be_vf_setup_init(adapter);
3403 if (status)
3404 goto err;
30128031 3405
4c876616
SP
3406 if (old_vfs) {
3407 for_all_vfs(adapter, vf_cfg, vf) {
3408 status = be_cmd_get_if_id(adapter, vf_cfg, vf);
3409 if (status)
3410 goto err;
3411 }
f9449ab7 3412
4c876616
SP
3413 status = be_vfs_mac_query(adapter);
3414 if (status)
3415 goto err;
3416 } else {
bec84e6b
VV
3417 status = be_vfs_if_create(adapter);
3418 if (status)
3419 goto err;
3420
39f1d94d
SP
3421 status = be_vf_eth_addr_config(adapter);
3422 if (status)
3423 goto err;
3424 }
f9449ab7 3425
11ac75ed 3426 for_all_vfs(adapter, vf_cfg, vf) {
04a06028
SP
3427 /* Allow VFs to programs MAC/VLAN filters */
3428 status = be_cmd_get_fn_privileges(adapter, &privileges, vf + 1);
3429 if (!status && !(privileges & BE_PRIV_FILTMGMT)) {
3430 status = be_cmd_set_fn_privileges(adapter,
3431 privileges |
3432 BE_PRIV_FILTMGMT,
3433 vf + 1);
3434 if (!status)
3435 dev_info(dev, "VF%d has FILTMGMT privilege\n",
3436 vf);
3437 }
3438
0f77ba73
RN
3439 /* Allow full available bandwidth */
3440 if (!old_vfs)
3441 be_cmd_config_qos(adapter, 0, 0, vf + 1);
f1f3ee1b 3442
bdce2ad7 3443 if (!old_vfs) {
0599863d 3444 be_cmd_enable_vf(adapter, vf + 1);
bdce2ad7
SR
3445 be_cmd_set_logical_link_config(adapter,
3446 IFLA_VF_LINK_STATE_AUTO,
3447 vf+1);
3448 }
f9449ab7 3449 }
b4c1df93
SP
3450
3451 if (!old_vfs) {
3452 status = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
3453 if (status) {
3454 dev_err(dev, "SRIOV enable failed\n");
3455 adapter->num_vfs = 0;
3456 goto err;
3457 }
3458 }
f174c7ec
VV
3459
3460 adapter->flags |= BE_FLAGS_SRIOV_ENABLED;
f9449ab7
SP
3461 return 0;
3462err:
4c876616
SP
3463 dev_err(dev, "VF setup failed\n");
3464 be_vf_clear(adapter);
f9449ab7
SP
3465 return status;
3466}
3467
f93f160b
VV
3468/* Converting function_mode bits on BE3 to SH mc_type enums */
3469
3470static u8 be_convert_mc_type(u32 function_mode)
3471{
66064dbc 3472 if (function_mode & VNIC_MODE && function_mode & QNQ_MODE)
f93f160b 3473 return vNIC1;
66064dbc 3474 else if (function_mode & QNQ_MODE)
f93f160b
VV
3475 return FLEX10;
3476 else if (function_mode & VNIC_MODE)
3477 return vNIC2;
3478 else if (function_mode & UMC_ENABLED)
3479 return UMC;
3480 else
3481 return MC_NONE;
3482}
3483
92bf14ab
SP
3484/* On BE2/BE3 FW does not suggest the supported limits */
3485static void BEx_get_resources(struct be_adapter *adapter,
3486 struct be_resources *res)
3487{
bec84e6b 3488 bool use_sriov = adapter->num_vfs ? 1 : 0;
92bf14ab
SP
3489
3490 if (be_physfn(adapter))
3491 res->max_uc_mac = BE_UC_PMAC_COUNT;
3492 else
3493 res->max_uc_mac = BE_VF_UC_PMAC_COUNT;
3494
f93f160b
VV
3495 adapter->mc_type = be_convert_mc_type(adapter->function_mode);
3496
3497 if (be_is_mc(adapter)) {
3498 /* Assuming that there are 4 channels per port,
3499 * when multi-channel is enabled
3500 */
3501 if (be_is_qnq_mode(adapter))
3502 res->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
3503 else
3504 /* In a non-qnq multichannel mode, the pvid
3505 * takes up one vlan entry
3506 */
3507 res->max_vlans = (BE_NUM_VLANS_SUPPORTED / 4) - 1;
3508 } else {
92bf14ab 3509 res->max_vlans = BE_NUM_VLANS_SUPPORTED;
f93f160b
VV
3510 }
3511
92bf14ab
SP
3512 res->max_mcast_mac = BE_MAX_MC;
3513
a5243dab
VV
3514 /* 1) For BE3 1Gb ports, FW does not support multiple TXQs
3515 * 2) Create multiple TX rings on a BE3-R multi-channel interface
3516 * *only* if it is RSS-capable.
3517 */
3518 if (BE2_chip(adapter) || use_sriov || (adapter->port_num > 1) ||
3519 !be_physfn(adapter) || (be_is_mc(adapter) &&
a28277dc 3520 !(adapter->function_caps & BE_FUNCTION_CAPS_RSS))) {
92bf14ab 3521 res->max_tx_qs = 1;
a28277dc
SR
3522 } else if (adapter->function_caps & BE_FUNCTION_CAPS_SUPER_NIC) {
3523 struct be_resources super_nic_res = {0};
3524
3525 /* On a SuperNIC profile, the driver needs to use the
3526 * GET_PROFILE_CONFIG cmd to query the per-function TXQ limits
3527 */
3528 be_cmd_get_profile_config(adapter, &super_nic_res, 0);
3529 /* Some old versions of BE3 FW don't report max_tx_qs value */
3530 res->max_tx_qs = super_nic_res.max_tx_qs ? : BE3_MAX_TX_QS;
3531 } else {
92bf14ab 3532 res->max_tx_qs = BE3_MAX_TX_QS;
a28277dc 3533 }
92bf14ab
SP
3534
3535 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
3536 !use_sriov && be_physfn(adapter))
3537 res->max_rss_qs = (adapter->be3_native) ?
3538 BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
3539 res->max_rx_qs = res->max_rss_qs + 1;
3540
e3dc867c 3541 if (be_physfn(adapter))
d3518e21 3542 res->max_evt_qs = (be_max_vfs(adapter) > 0) ?
e3dc867c
SR
3543 BE3_SRIOV_MAX_EVT_QS : BE3_MAX_EVT_QS;
3544 else
3545 res->max_evt_qs = 1;
92bf14ab
SP
3546
3547 res->if_cap_flags = BE_IF_CAP_FLAGS_WANT;
3548 if (!(adapter->function_caps & BE_FUNCTION_CAPS_RSS))
3549 res->if_cap_flags &= ~BE_IF_FLAGS_RSS;
3550}
3551
30128031
SP
3552static void be_setup_init(struct be_adapter *adapter)
3553{
3554 adapter->vlan_prio_bmap = 0xff;
42f11cf2 3555 adapter->phy.link_speed = -1;
30128031
SP
3556 adapter->if_handle = -1;
3557 adapter->be3_native = false;
f66b7cfd 3558 adapter->if_flags = 0;
f25b119c
PR
3559 if (be_physfn(adapter))
3560 adapter->cmd_privileges = MAX_PRIVILEGES;
3561 else
3562 adapter->cmd_privileges = MIN_PRIVILEGES;
30128031
SP
3563}
3564
bec84e6b
VV
3565static int be_get_sriov_config(struct be_adapter *adapter)
3566{
3567 struct device *dev = &adapter->pdev->dev;
3568 struct be_resources res = {0};
d3d18312 3569 int max_vfs, old_vfs;
bec84e6b
VV
3570
3571 /* Some old versions of BE3 FW don't report max_vfs value */
d3d18312
SP
3572 be_cmd_get_profile_config(adapter, &res, 0);
3573
bec84e6b
VV
3574 if (BE3_chip(adapter) && !res.max_vfs) {
3575 max_vfs = pci_sriov_get_totalvfs(adapter->pdev);
3576 res.max_vfs = max_vfs > 0 ? min(MAX_VFS, max_vfs) : 0;
3577 }
3578
d3d18312 3579 adapter->pool_res = res;
bec84e6b
VV
3580
3581 if (!be_max_vfs(adapter)) {
3582 if (num_vfs)
50762667 3583 dev_warn(dev, "SRIOV is disabled. Ignoring num_vfs\n");
bec84e6b
VV
3584 adapter->num_vfs = 0;
3585 return 0;
3586 }
3587
d3d18312
SP
3588 pci_sriov_set_totalvfs(adapter->pdev, be_max_vfs(adapter));
3589
bec84e6b
VV
3590 /* validate num_vfs module param */
3591 old_vfs = pci_num_vf(adapter->pdev);
3592 if (old_vfs) {
3593 dev_info(dev, "%d VFs are already enabled\n", old_vfs);
3594 if (old_vfs != num_vfs)
3595 dev_warn(dev, "Ignoring num_vfs=%d setting\n", num_vfs);
3596 adapter->num_vfs = old_vfs;
3597 } else {
3598 if (num_vfs > be_max_vfs(adapter)) {
3599 dev_info(dev, "Resources unavailable to init %d VFs\n",
3600 num_vfs);
3601 dev_info(dev, "Limiting to %d VFs\n",
3602 be_max_vfs(adapter));
3603 }
3604 adapter->num_vfs = min_t(u16, num_vfs, be_max_vfs(adapter));
3605 }
3606
3607 return 0;
3608}
3609
92bf14ab 3610static int be_get_resources(struct be_adapter *adapter)
abb93951 3611{
92bf14ab
SP
3612 struct device *dev = &adapter->pdev->dev;
3613 struct be_resources res = {0};
3614 int status;
abb93951 3615
92bf14ab
SP
3616 if (BEx_chip(adapter)) {
3617 BEx_get_resources(adapter, &res);
3618 adapter->res = res;
abb93951
PR
3619 }
3620
92bf14ab
SP
3621 /* For Lancer, SH etc read per-function resource limits from FW.
3622 * GET_FUNC_CONFIG returns per function guaranteed limits.
3623 * GET_PROFILE_CONFIG returns PCI-E related limits PF-pool limits
3624 */
3625 if (!BEx_chip(adapter)) {
3626 status = be_cmd_get_func_config(adapter, &res);
3627 if (status)
3628 return status;
abb93951 3629
92bf14ab
SP
3630 /* If RoCE may be enabled stash away half the EQs for RoCE */
3631 if (be_roce_supported(adapter))
3632 res.max_evt_qs /= 2;
3633 adapter->res = res;
abb93951 3634 }
4c876616 3635
acbafeb1
SP
3636 dev_info(dev, "Max: txqs %d, rxqs %d, rss %d, eqs %d, vfs %d\n",
3637 be_max_txqs(adapter), be_max_rxqs(adapter),
3638 be_max_rss(adapter), be_max_eqs(adapter),
3639 be_max_vfs(adapter));
3640 dev_info(dev, "Max: uc-macs %d, mc-macs %d, vlans %d\n",
3641 be_max_uc(adapter), be_max_mc(adapter),
3642 be_max_vlans(adapter));
3643
92bf14ab 3644 return 0;
abb93951
PR
3645}
3646
d3d18312
SP
3647static void be_sriov_config(struct be_adapter *adapter)
3648{
3649 struct device *dev = &adapter->pdev->dev;
3650 int status;
3651
3652 status = be_get_sriov_config(adapter);
3653 if (status) {
3654 dev_err(dev, "Failed to query SR-IOV configuration\n");
3655 dev_err(dev, "SR-IOV cannot be enabled\n");
3656 return;
3657 }
3658
3659 /* When the HW is in SRIOV capable configuration, the PF-pool
3660 * resources are equally distributed across the max-number of
3661 * VFs. The user may request only a subset of the max-vfs to be
3662 * enabled. Based on num_vfs, redistribute the resources across
3663 * num_vfs so that each VF will have access to more number of
3664 * resources. This facility is not available in BE3 FW.
3665 * Also, this is done by FW in Lancer chip.
3666 */
3667 if (be_max_vfs(adapter) && !pci_num_vf(adapter->pdev)) {
3668 status = be_cmd_set_sriov_config(adapter,
3669 adapter->pool_res,
3670 adapter->num_vfs);
3671 if (status)
3672 dev_err(dev, "Failed to optimize SR-IOV resources\n");
3673 }
3674}
3675
39f1d94d
SP
3676static int be_get_config(struct be_adapter *adapter)
3677{
542963b7 3678 u16 profile_id;
4c876616 3679 int status;
39f1d94d 3680
e97e3cda 3681 status = be_cmd_query_fw_cfg(adapter);
abb93951 3682 if (status)
92bf14ab 3683 return status;
abb93951 3684
542963b7
VV
3685 if (be_physfn(adapter)) {
3686 status = be_cmd_get_active_profile(adapter, &profile_id);
3687 if (!status)
3688 dev_info(&adapter->pdev->dev,
3689 "Using profile 0x%x\n", profile_id);
962bcb75 3690 }
bec84e6b 3691
d3d18312
SP
3692 if (!BE2_chip(adapter) && be_physfn(adapter))
3693 be_sriov_config(adapter);
542963b7 3694
92bf14ab
SP
3695 status = be_get_resources(adapter);
3696 if (status)
3697 return status;
abb93951 3698
46ee9c14
RN
3699 adapter->pmac_id = kcalloc(be_max_uc(adapter),
3700 sizeof(*adapter->pmac_id), GFP_KERNEL);
92bf14ab
SP
3701 if (!adapter->pmac_id)
3702 return -ENOMEM;
abb93951 3703
92bf14ab
SP
3704 /* Sanitize cfg_num_qs based on HW and platform limits */
3705 adapter->cfg_num_qs = min(adapter->cfg_num_qs, be_max_qs(adapter));
3706
3707 return 0;
39f1d94d
SP
3708}
3709
95046b92
SP
3710static int be_mac_setup(struct be_adapter *adapter)
3711{
3712 u8 mac[ETH_ALEN];
3713 int status;
3714
3715 if (is_zero_ether_addr(adapter->netdev->dev_addr)) {
3716 status = be_cmd_get_perm_mac(adapter, mac);
3717 if (status)
3718 return status;
3719
3720 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
3721 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
3722 } else {
3723 /* Maybe the HW was reset; dev_addr must be re-programmed */
3724 memcpy(mac, adapter->netdev->dev_addr, ETH_ALEN);
3725 }
3726
2c7a9dc1
AK
3727 /* For BE3-R VFs, the PF programs the initial MAC address */
3728 if (!(BEx_chip(adapter) && be_virtfn(adapter)))
3729 be_cmd_pmac_add(adapter, mac, adapter->if_handle,
3730 &adapter->pmac_id[0], 0);
95046b92
SP
3731 return 0;
3732}
3733
68d7bdcb
SP
3734static void be_schedule_worker(struct be_adapter *adapter)
3735{
3736 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
3737 adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
3738}
3739
7707133c 3740static int be_setup_queues(struct be_adapter *adapter)
5fb379ee 3741{
68d7bdcb 3742 struct net_device *netdev = adapter->netdev;
10ef9ab4 3743 int status;
ba343c77 3744
7707133c 3745 status = be_evt_queues_create(adapter);
abb93951
PR
3746 if (status)
3747 goto err;
73d540f2 3748
7707133c 3749 status = be_tx_qs_create(adapter);
c2bba3df
SK
3750 if (status)
3751 goto err;
10ef9ab4 3752
7707133c 3753 status = be_rx_cqs_create(adapter);
10ef9ab4 3754 if (status)
a54769f5 3755 goto err;
6b7c5b94 3756
7707133c 3757 status = be_mcc_queues_create(adapter);
10ef9ab4
SP
3758 if (status)
3759 goto err;
3760
68d7bdcb
SP
3761 status = netif_set_real_num_rx_queues(netdev, adapter->num_rx_qs);
3762 if (status)
3763 goto err;
3764
3765 status = netif_set_real_num_tx_queues(netdev, adapter->num_tx_qs);
3766 if (status)
3767 goto err;
3768
7707133c
SP
3769 return 0;
3770err:
3771 dev_err(&adapter->pdev->dev, "queue_setup failed\n");
3772 return status;
3773}
3774
68d7bdcb
SP
3775int be_update_queues(struct be_adapter *adapter)
3776{
3777 struct net_device *netdev = adapter->netdev;
3778 int status;
3779
3780 if (netif_running(netdev))
3781 be_close(netdev);
3782
3783 be_cancel_worker(adapter);
3784
3785 /* If any vectors have been shared with RoCE we cannot re-program
3786 * the MSIx table.
3787 */
3788 if (!adapter->num_msix_roce_vec)
3789 be_msix_disable(adapter);
3790
3791 be_clear_queues(adapter);
3792
3793 if (!msix_enabled(adapter)) {
3794 status = be_msix_enable(adapter);
3795 if (status)
3796 return status;
3797 }
3798
3799 status = be_setup_queues(adapter);
3800 if (status)
3801 return status;
3802
3803 be_schedule_worker(adapter);
3804
3805 if (netif_running(netdev))
3806 status = be_open(netdev);
3807
3808 return status;
3809}
3810
f7062ee5
SP
3811static inline int fw_major_num(const char *fw_ver)
3812{
3813 int fw_major = 0, i;
3814
3815 i = sscanf(fw_ver, "%d.", &fw_major);
3816 if (i != 1)
3817 return 0;
3818
3819 return fw_major;
3820}
3821
7707133c
SP
3822static int be_setup(struct be_adapter *adapter)
3823{
3824 struct device *dev = &adapter->pdev->dev;
7707133c
SP
3825 int status;
3826
3827 be_setup_init(adapter);
3828
3829 if (!lancer_chip(adapter))
3830 be_cmd_req_native_mode(adapter);
3831
3832 status = be_get_config(adapter);
10ef9ab4 3833 if (status)
a54769f5 3834 goto err;
6b7c5b94 3835
7707133c 3836 status = be_msix_enable(adapter);
10ef9ab4 3837 if (status)
a54769f5 3838 goto err;
6b7c5b94 3839
0700d816
KA
3840 status = be_if_create(adapter, &adapter->if_handle,
3841 be_if_cap_flags(adapter), 0);
7707133c 3842 if (status)
a54769f5 3843 goto err;
6b7c5b94 3844
68d7bdcb
SP
3845 /* Updating real_num_tx/rx_queues() requires rtnl_lock() */
3846 rtnl_lock();
7707133c 3847 status = be_setup_queues(adapter);
68d7bdcb 3848 rtnl_unlock();
95046b92 3849 if (status)
1578e777
PR
3850 goto err;
3851
7707133c 3852 be_cmd_get_fn_privileges(adapter, &adapter->cmd_privileges, 0);
7707133c
SP
3853
3854 status = be_mac_setup(adapter);
10ef9ab4
SP
3855 if (status)
3856 goto err;
3857
e97e3cda 3858 be_cmd_get_fw_ver(adapter);
acbafeb1 3859 dev_info(dev, "FW version is %s\n", adapter->fw_ver);
5a56eb10 3860
e9e2a904 3861 if (BE2_chip(adapter) && fw_major_num(adapter->fw_ver) < 4) {
50762667 3862 dev_err(dev, "Firmware on card is old(%s), IRQs may not work",
e9e2a904
SK
3863 adapter->fw_ver);
3864 dev_err(dev, "Please upgrade firmware to version >= 4.0\n");
3865 }
3866
1d1e9a46 3867 if (adapter->vlans_added)
10329df8 3868 be_vid_config(adapter);
7ab8b0b4 3869
a54769f5 3870 be_set_rx_mode(adapter->netdev);
5fb379ee 3871
76a9e08e
SR
3872 be_cmd_get_acpi_wol_cap(adapter);
3873
00d594c3
KA
3874 status = be_cmd_set_flow_control(adapter, adapter->tx_fc,
3875 adapter->rx_fc);
3876 if (status)
3877 be_cmd_get_flow_control(adapter, &adapter->tx_fc,
3878 &adapter->rx_fc);
590c391d 3879
00d594c3
KA
3880 dev_info(&adapter->pdev->dev, "HW Flow control - TX:%d RX:%d\n",
3881 adapter->tx_fc, adapter->rx_fc);
2dc1deb6 3882
bdce2ad7
SR
3883 if (be_physfn(adapter))
3884 be_cmd_set_logical_link_config(adapter,
3885 IFLA_VF_LINK_STATE_AUTO, 0);
3886
bec84e6b
VV
3887 if (adapter->num_vfs)
3888 be_vf_setup(adapter);
f9449ab7 3889
f25b119c
PR
3890 status = be_cmd_get_phy_info(adapter);
3891 if (!status && be_pause_supported(adapter))
42f11cf2
AK
3892 adapter->phy.fc_autoneg = 1;
3893
68d7bdcb 3894 be_schedule_worker(adapter);
e1ad8e33 3895 adapter->flags |= BE_FLAGS_SETUP_DONE;
f9449ab7 3896 return 0;
a54769f5
SP
3897err:
3898 be_clear(adapter);
3899 return status;
3900}
6b7c5b94 3901
66268739
IV
3902#ifdef CONFIG_NET_POLL_CONTROLLER
3903static void be_netpoll(struct net_device *netdev)
3904{
3905 struct be_adapter *adapter = netdev_priv(netdev);
10ef9ab4 3906 struct be_eq_obj *eqo;
66268739
IV
3907 int i;
3908
e49cc34f
SP
3909 for_all_evt_queues(adapter, eqo, i) {
3910 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
3911 napi_schedule(&eqo->napi);
3912 }
66268739
IV
3913}
3914#endif
3915
96c9b2e4 3916static char flash_cookie[2][16] = {"*** SE FLAS", "H DIRECTORY *** "};
fa9a6fed 3917
306f1348
SP
3918static bool phy_flashing_required(struct be_adapter *adapter)
3919{
e02cfd96 3920 return (adapter->phy.phy_type == PHY_TYPE_TN_8022 &&
42f11cf2 3921 adapter->phy.interface_type == PHY_TYPE_BASET_10GB);
306f1348
SP
3922}
3923
c165541e
PR
3924static bool is_comp_in_ufi(struct be_adapter *adapter,
3925 struct flash_section_info *fsec, int type)
3926{
3927 int i = 0, img_type = 0;
3928 struct flash_section_info_g2 *fsec_g2 = NULL;
3929
ca34fe38 3930 if (BE2_chip(adapter))
c165541e
PR
3931 fsec_g2 = (struct flash_section_info_g2 *)fsec;
3932
3933 for (i = 0; i < MAX_FLASH_COMP; i++) {
3934 if (fsec_g2)
3935 img_type = le32_to_cpu(fsec_g2->fsec_entry[i].type);
3936 else
3937 img_type = le32_to_cpu(fsec->fsec_entry[i].type);
3938
3939 if (img_type == type)
3940 return true;
3941 }
3942 return false;
3943
3944}
3945
4188e7df 3946static struct flash_section_info *get_fsec_info(struct be_adapter *adapter,
748b539a
SP
3947 int header_size,
3948 const struct firmware *fw)
c165541e
PR
3949{
3950 struct flash_section_info *fsec = NULL;
3951 const u8 *p = fw->data;
3952
3953 p += header_size;
3954 while (p < (fw->data + fw->size)) {
3955 fsec = (struct flash_section_info *)p;
3956 if (!memcmp(flash_cookie, fsec->cookie, sizeof(flash_cookie)))
3957 return fsec;
3958 p += 32;
3959 }
3960 return NULL;
3961}
3962
96c9b2e4
VV
3963static int be_check_flash_crc(struct be_adapter *adapter, const u8 *p,
3964 u32 img_offset, u32 img_size, int hdr_size,
3965 u16 img_optype, bool *crc_match)
3966{
3967 u32 crc_offset;
3968 int status;
3969 u8 crc[4];
3970
70a7b525
VV
3971 status = be_cmd_get_flash_crc(adapter, crc, img_optype, img_offset,
3972 img_size - 4);
96c9b2e4
VV
3973 if (status)
3974 return status;
3975
3976 crc_offset = hdr_size + img_offset + img_size - 4;
3977
3978 /* Skip flashing, if crc of flashed region matches */
3979 if (!memcmp(crc, p + crc_offset, 4))
3980 *crc_match = true;
3981 else
3982 *crc_match = false;
3983
3984 return status;
3985}
3986
773a2d7c 3987static int be_flash(struct be_adapter *adapter, const u8 *img,
70a7b525
VV
3988 struct be_dma_mem *flash_cmd, int optype, int img_size,
3989 u32 img_offset)
773a2d7c 3990{
70a7b525 3991 u32 flash_op, num_bytes, total_bytes = img_size, bytes_sent = 0;
773a2d7c 3992 struct be_cmd_write_flashrom *req = flash_cmd->va;
96c9b2e4 3993 int status;
773a2d7c 3994
773a2d7c
PR
3995 while (total_bytes) {
3996 num_bytes = min_t(u32, 32*1024, total_bytes);
3997
3998 total_bytes -= num_bytes;
3999
4000 if (!total_bytes) {
4001 if (optype == OPTYPE_PHY_FW)
4002 flash_op = FLASHROM_OPER_PHY_FLASH;
4003 else
4004 flash_op = FLASHROM_OPER_FLASH;
4005 } else {
4006 if (optype == OPTYPE_PHY_FW)
4007 flash_op = FLASHROM_OPER_PHY_SAVE;
4008 else
4009 flash_op = FLASHROM_OPER_SAVE;
4010 }
4011
be716446 4012 memcpy(req->data_buf, img, num_bytes);
773a2d7c
PR
4013 img += num_bytes;
4014 status = be_cmd_write_flashrom(adapter, flash_cmd, optype,
70a7b525
VV
4015 flash_op, img_offset +
4016 bytes_sent, num_bytes);
4c60005f 4017 if (base_status(status) == MCC_STATUS_ILLEGAL_REQUEST &&
96c9b2e4
VV
4018 optype == OPTYPE_PHY_FW)
4019 break;
4020 else if (status)
773a2d7c 4021 return status;
70a7b525
VV
4022
4023 bytes_sent += num_bytes;
773a2d7c
PR
4024 }
4025 return 0;
4026}
4027
0ad3157e 4028/* For BE2, BE3 and BE3-R */
ca34fe38 4029static int be_flash_BEx(struct be_adapter *adapter,
748b539a
SP
4030 const struct firmware *fw,
4031 struct be_dma_mem *flash_cmd, int num_of_images)
84517482 4032{
c165541e 4033 int img_hdrs_size = (num_of_images * sizeof(struct image_hdr));
96c9b2e4 4034 struct device *dev = &adapter->pdev->dev;
c165541e 4035 struct flash_section_info *fsec = NULL;
96c9b2e4
VV
4036 int status, i, filehdr_size, num_comp;
4037 const struct flash_comp *pflashcomp;
4038 bool crc_match;
4039 const u8 *p;
c165541e
PR
4040
4041 struct flash_comp gen3_flash_types[] = {
4042 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, OPTYPE_ISCSI_ACTIVE,
4043 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_iSCSI},
4044 { FLASH_REDBOOT_START_g3, OPTYPE_REDBOOT,
4045 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3, IMAGE_BOOT_CODE},
4046 { FLASH_iSCSI_BIOS_START_g3, OPTYPE_BIOS,
4047 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_ISCSI},
4048 { FLASH_PXE_BIOS_START_g3, OPTYPE_PXE_BIOS,
4049 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_PXE},
4050 { FLASH_FCoE_BIOS_START_g3, OPTYPE_FCOE_BIOS,
4051 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_FCoE},
4052 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, OPTYPE_ISCSI_BACKUP,
4053 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_iSCSI},
4054 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, OPTYPE_FCOE_FW_ACTIVE,
4055 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_FCoE},
4056 { FLASH_FCoE_BACKUP_IMAGE_START_g3, OPTYPE_FCOE_FW_BACKUP,
4057 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_FCoE},
4058 { FLASH_NCSI_START_g3, OPTYPE_NCSI_FW,
4059 FLASH_NCSI_IMAGE_MAX_SIZE_g3, IMAGE_NCSI},
4060 { FLASH_PHY_FW_START_g3, OPTYPE_PHY_FW,
4061 FLASH_PHY_FW_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_PHY}
3f0d4560 4062 };
c165541e
PR
4063
4064 struct flash_comp gen2_flash_types[] = {
4065 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, OPTYPE_ISCSI_ACTIVE,
4066 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_iSCSI},
4067 { FLASH_REDBOOT_START_g2, OPTYPE_REDBOOT,
4068 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2, IMAGE_BOOT_CODE},
4069 { FLASH_iSCSI_BIOS_START_g2, OPTYPE_BIOS,
4070 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_ISCSI},
4071 { FLASH_PXE_BIOS_START_g2, OPTYPE_PXE_BIOS,
4072 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_PXE},
4073 { FLASH_FCoE_BIOS_START_g2, OPTYPE_FCOE_BIOS,
4074 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_FCoE},
4075 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, OPTYPE_ISCSI_BACKUP,
4076 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_iSCSI},
4077 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, OPTYPE_FCOE_FW_ACTIVE,
4078 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_FCoE},
4079 { FLASH_FCoE_BACKUP_IMAGE_START_g2, OPTYPE_FCOE_FW_BACKUP,
4080 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_FCoE}
3f0d4560
AK
4081 };
4082
ca34fe38 4083 if (BE3_chip(adapter)) {
3f0d4560
AK
4084 pflashcomp = gen3_flash_types;
4085 filehdr_size = sizeof(struct flash_file_hdr_g3);
215faf9c 4086 num_comp = ARRAY_SIZE(gen3_flash_types);
3f0d4560
AK
4087 } else {
4088 pflashcomp = gen2_flash_types;
4089 filehdr_size = sizeof(struct flash_file_hdr_g2);
215faf9c 4090 num_comp = ARRAY_SIZE(gen2_flash_types);
5d3acd0d 4091 img_hdrs_size = 0;
84517482 4092 }
ca34fe38 4093
c165541e
PR
4094 /* Get flash section info*/
4095 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
4096 if (!fsec) {
96c9b2e4 4097 dev_err(dev, "Invalid Cookie. FW image may be corrupted\n");
c165541e
PR
4098 return -1;
4099 }
9fe96934 4100 for (i = 0; i < num_comp; i++) {
c165541e 4101 if (!is_comp_in_ufi(adapter, fsec, pflashcomp[i].img_type))
9fe96934 4102 continue;
c165541e
PR
4103
4104 if ((pflashcomp[i].optype == OPTYPE_NCSI_FW) &&
4105 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
4106 continue;
4107
773a2d7c
PR
4108 if (pflashcomp[i].optype == OPTYPE_PHY_FW &&
4109 !phy_flashing_required(adapter))
306f1348 4110 continue;
c165541e 4111
773a2d7c 4112 if (pflashcomp[i].optype == OPTYPE_REDBOOT) {
96c9b2e4
VV
4113 status = be_check_flash_crc(adapter, fw->data,
4114 pflashcomp[i].offset,
4115 pflashcomp[i].size,
4116 filehdr_size +
4117 img_hdrs_size,
4118 OPTYPE_REDBOOT, &crc_match);
4119 if (status) {
4120 dev_err(dev,
4121 "Could not get CRC for 0x%x region\n",
4122 pflashcomp[i].optype);
4123 continue;
4124 }
4125
4126 if (crc_match)
773a2d7c
PR
4127 continue;
4128 }
c165541e 4129
96c9b2e4
VV
4130 p = fw->data + filehdr_size + pflashcomp[i].offset +
4131 img_hdrs_size;
306f1348
SP
4132 if (p + pflashcomp[i].size > fw->data + fw->size)
4133 return -1;
773a2d7c
PR
4134
4135 status = be_flash(adapter, p, flash_cmd, pflashcomp[i].optype,
70a7b525 4136 pflashcomp[i].size, 0);
773a2d7c 4137 if (status) {
96c9b2e4 4138 dev_err(dev, "Flashing section type 0x%x failed\n",
773a2d7c
PR
4139 pflashcomp[i].img_type);
4140 return status;
84517482 4141 }
84517482 4142 }
84517482
AK
4143 return 0;
4144}
4145
96c9b2e4
VV
4146static u16 be_get_img_optype(struct flash_section_entry fsec_entry)
4147{
4148 u32 img_type = le32_to_cpu(fsec_entry.type);
4149 u16 img_optype = le16_to_cpu(fsec_entry.optype);
4150
4151 if (img_optype != 0xFFFF)
4152 return img_optype;
4153
4154 switch (img_type) {
4155 case IMAGE_FIRMWARE_iSCSI:
4156 img_optype = OPTYPE_ISCSI_ACTIVE;
4157 break;
4158 case IMAGE_BOOT_CODE:
4159 img_optype = OPTYPE_REDBOOT;
4160 break;
4161 case IMAGE_OPTION_ROM_ISCSI:
4162 img_optype = OPTYPE_BIOS;
4163 break;
4164 case IMAGE_OPTION_ROM_PXE:
4165 img_optype = OPTYPE_PXE_BIOS;
4166 break;
4167 case IMAGE_OPTION_ROM_FCoE:
4168 img_optype = OPTYPE_FCOE_BIOS;
4169 break;
4170 case IMAGE_FIRMWARE_BACKUP_iSCSI:
4171 img_optype = OPTYPE_ISCSI_BACKUP;
4172 break;
4173 case IMAGE_NCSI:
4174 img_optype = OPTYPE_NCSI_FW;
4175 break;
4176 case IMAGE_FLASHISM_JUMPVECTOR:
4177 img_optype = OPTYPE_FLASHISM_JUMPVECTOR;
4178 break;
4179 case IMAGE_FIRMWARE_PHY:
4180 img_optype = OPTYPE_SH_PHY_FW;
4181 break;
4182 case IMAGE_REDBOOT_DIR:
4183 img_optype = OPTYPE_REDBOOT_DIR;
4184 break;
4185 case IMAGE_REDBOOT_CONFIG:
4186 img_optype = OPTYPE_REDBOOT_CONFIG;
4187 break;
4188 case IMAGE_UFI_DIR:
4189 img_optype = OPTYPE_UFI_DIR;
4190 break;
4191 default:
4192 break;
4193 }
4194
4195 return img_optype;
4196}
4197
773a2d7c 4198static int be_flash_skyhawk(struct be_adapter *adapter,
748b539a
SP
4199 const struct firmware *fw,
4200 struct be_dma_mem *flash_cmd, int num_of_images)
3f0d4560 4201{
773a2d7c 4202 int img_hdrs_size = num_of_images * sizeof(struct image_hdr);
70a7b525 4203 bool crc_match, old_fw_img, flash_offset_support = true;
96c9b2e4 4204 struct device *dev = &adapter->pdev->dev;
773a2d7c 4205 struct flash_section_info *fsec = NULL;
96c9b2e4 4206 u32 img_offset, img_size, img_type;
70a7b525 4207 u16 img_optype, flash_optype;
96c9b2e4 4208 int status, i, filehdr_size;
96c9b2e4 4209 const u8 *p;
773a2d7c
PR
4210
4211 filehdr_size = sizeof(struct flash_file_hdr_g3);
4212 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
4213 if (!fsec) {
96c9b2e4 4214 dev_err(dev, "Invalid Cookie. FW image may be corrupted\n");
56ace3a0 4215 return -EINVAL;
773a2d7c
PR
4216 }
4217
70a7b525 4218retry_flash:
773a2d7c
PR
4219 for (i = 0; i < le32_to_cpu(fsec->fsec_hdr.num_images); i++) {
4220 img_offset = le32_to_cpu(fsec->fsec_entry[i].offset);
4221 img_size = le32_to_cpu(fsec->fsec_entry[i].pad_size);
96c9b2e4
VV
4222 img_type = le32_to_cpu(fsec->fsec_entry[i].type);
4223 img_optype = be_get_img_optype(fsec->fsec_entry[i]);
4224 old_fw_img = fsec->fsec_entry[i].optype == 0xFFFF;
773a2d7c 4225
96c9b2e4 4226 if (img_optype == 0xFFFF)
773a2d7c 4227 continue;
70a7b525
VV
4228
4229 if (flash_offset_support)
4230 flash_optype = OPTYPE_OFFSET_SPECIFIED;
4231 else
4232 flash_optype = img_optype;
4233
96c9b2e4
VV
4234 /* Don't bother verifying CRC if an old FW image is being
4235 * flashed
4236 */
4237 if (old_fw_img)
4238 goto flash;
4239
4240 status = be_check_flash_crc(adapter, fw->data, img_offset,
4241 img_size, filehdr_size +
70a7b525 4242 img_hdrs_size, flash_optype,
96c9b2e4 4243 &crc_match);
4c60005f
KA
4244 if (base_status(status) == MCC_STATUS_ILLEGAL_REQUEST ||
4245 base_status(status) == MCC_STATUS_ILLEGAL_FIELD) {
70a7b525
VV
4246 /* The current FW image on the card does not support
4247 * OFFSET based flashing. Retry using older mechanism
4248 * of OPTYPE based flashing
4249 */
4250 if (flash_optype == OPTYPE_OFFSET_SPECIFIED) {
4251 flash_offset_support = false;
4252 goto retry_flash;
4253 }
4254
4255 /* The current FW image on the card does not recognize
4256 * the new FLASH op_type. The FW download is partially
4257 * complete. Reboot the server now to enable FW image
4258 * to recognize the new FLASH op_type. To complete the
4259 * remaining process, download the same FW again after
4260 * the reboot.
4261 */
96c9b2e4
VV
4262 dev_err(dev, "Flash incomplete. Reset the server\n");
4263 dev_err(dev, "Download FW image again after reset\n");
4264 return -EAGAIN;
4265 } else if (status) {
4266 dev_err(dev, "Could not get CRC for 0x%x region\n",
4267 img_optype);
4268 return -EFAULT;
773a2d7c
PR
4269 }
4270
96c9b2e4
VV
4271 if (crc_match)
4272 continue;
773a2d7c 4273
96c9b2e4
VV
4274flash:
4275 p = fw->data + filehdr_size + img_offset + img_hdrs_size;
773a2d7c
PR
4276 if (p + img_size > fw->data + fw->size)
4277 return -1;
4278
70a7b525
VV
4279 status = be_flash(adapter, p, flash_cmd, flash_optype, img_size,
4280 img_offset);
4281
4282 /* The current FW image on the card does not support OFFSET
4283 * based flashing. Retry using older mechanism of OPTYPE based
4284 * flashing
4285 */
4286 if (base_status(status) == MCC_STATUS_ILLEGAL_FIELD &&
4287 flash_optype == OPTYPE_OFFSET_SPECIFIED) {
4288 flash_offset_support = false;
4289 goto retry_flash;
4290 }
4291
96c9b2e4
VV
4292 /* For old FW images ignore ILLEGAL_FIELD error or errors on
4293 * UFI_DIR region
4294 */
4c60005f
KA
4295 if (old_fw_img &&
4296 (base_status(status) == MCC_STATUS_ILLEGAL_FIELD ||
4297 (img_optype == OPTYPE_UFI_DIR &&
4298 base_status(status) == MCC_STATUS_FAILED))) {
96c9b2e4
VV
4299 continue;
4300 } else if (status) {
4301 dev_err(dev, "Flashing section type 0x%x failed\n",
4302 img_type);
4303 return -EFAULT;
773a2d7c
PR
4304 }
4305 }
4306 return 0;
3f0d4560
AK
4307}
4308
485bf569 4309static int lancer_fw_download(struct be_adapter *adapter,
748b539a 4310 const struct firmware *fw)
84517482 4311{
485bf569
SN
4312#define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024)
4313#define LANCER_FW_DOWNLOAD_LOCATION "/prg"
bb864e07 4314 struct device *dev = &adapter->pdev->dev;
84517482 4315 struct be_dma_mem flash_cmd;
485bf569
SN
4316 const u8 *data_ptr = NULL;
4317 u8 *dest_image_ptr = NULL;
4318 size_t image_size = 0;
4319 u32 chunk_size = 0;
4320 u32 data_written = 0;
4321 u32 offset = 0;
4322 int status = 0;
4323 u8 add_status = 0;
f67ef7ba 4324 u8 change_status;
84517482 4325
485bf569 4326 if (!IS_ALIGNED(fw->size, sizeof(u32))) {
bb864e07 4327 dev_err(dev, "FW image size should be multiple of 4\n");
3fb8cb80 4328 return -EINVAL;
d9efd2af
SB
4329 }
4330
485bf569
SN
4331 flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
4332 + LANCER_FW_DOWNLOAD_CHUNK;
bb864e07 4333 flash_cmd.va = dma_alloc_coherent(dev, flash_cmd.size,
d0320f75 4334 &flash_cmd.dma, GFP_KERNEL);
3fb8cb80
KA
4335 if (!flash_cmd.va)
4336 return -ENOMEM;
84517482 4337
485bf569
SN
4338 dest_image_ptr = flash_cmd.va +
4339 sizeof(struct lancer_cmd_req_write_object);
4340 image_size = fw->size;
4341 data_ptr = fw->data;
4342
4343 while (image_size) {
4344 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
4345
4346 /* Copy the image chunk content. */
4347 memcpy(dest_image_ptr, data_ptr, chunk_size);
4348
4349 status = lancer_cmd_write_object(adapter, &flash_cmd,
f67ef7ba
PR
4350 chunk_size, offset,
4351 LANCER_FW_DOWNLOAD_LOCATION,
4352 &data_written, &change_status,
4353 &add_status);
485bf569
SN
4354 if (status)
4355 break;
4356
4357 offset += data_written;
4358 data_ptr += data_written;
4359 image_size -= data_written;
4360 }
4361
4362 if (!status) {
4363 /* Commit the FW written */
4364 status = lancer_cmd_write_object(adapter, &flash_cmd,
f67ef7ba
PR
4365 0, offset,
4366 LANCER_FW_DOWNLOAD_LOCATION,
4367 &data_written, &change_status,
4368 &add_status);
485bf569
SN
4369 }
4370
bb864e07 4371 dma_free_coherent(dev, flash_cmd.size, flash_cmd.va, flash_cmd.dma);
485bf569 4372 if (status) {
bb864e07 4373 dev_err(dev, "Firmware load error\n");
3fb8cb80 4374 return be_cmd_status(status);
485bf569
SN
4375 }
4376
bb864e07
KA
4377 dev_info(dev, "Firmware flashed successfully\n");
4378
f67ef7ba 4379 if (change_status == LANCER_FW_RESET_NEEDED) {
bb864e07 4380 dev_info(dev, "Resetting adapter to activate new FW\n");
5c510811
SK
4381 status = lancer_physdev_ctrl(adapter,
4382 PHYSDEV_CONTROL_FW_RESET_MASK);
f67ef7ba 4383 if (status) {
bb864e07
KA
4384 dev_err(dev, "Adapter busy, could not reset FW\n");
4385 dev_err(dev, "Reboot server to activate new FW\n");
f67ef7ba
PR
4386 }
4387 } else if (change_status != LANCER_NO_RESET_NEEDED) {
bb864e07 4388 dev_info(dev, "Reboot server to activate new FW\n");
f67ef7ba 4389 }
3fb8cb80
KA
4390
4391 return 0;
485bf569
SN
4392}
4393
5d3acd0d
VV
4394#define BE2_UFI 2
4395#define BE3_UFI 3
4396#define BE3R_UFI 10
4397#define SH_UFI 4
81a9e226 4398#define SH_P2_UFI 11
5d3acd0d 4399
ca34fe38 4400static int be_get_ufi_type(struct be_adapter *adapter,
0ad3157e 4401 struct flash_file_hdr_g3 *fhdr)
773a2d7c 4402{
5d3acd0d
VV
4403 if (!fhdr) {
4404 dev_err(&adapter->pdev->dev, "Invalid FW UFI file");
4405 return -1;
4406 }
773a2d7c 4407
5d3acd0d
VV
4408 /* First letter of the build version is used to identify
4409 * which chip this image file is meant for.
4410 */
4411 switch (fhdr->build[0]) {
4412 case BLD_STR_UFI_TYPE_SH:
81a9e226
VV
4413 return (fhdr->asic_type_rev == ASIC_REV_P2) ? SH_P2_UFI :
4414 SH_UFI;
5d3acd0d
VV
4415 case BLD_STR_UFI_TYPE_BE3:
4416 return (fhdr->asic_type_rev == ASIC_REV_B0) ? BE3R_UFI :
4417 BE3_UFI;
4418 case BLD_STR_UFI_TYPE_BE2:
4419 return BE2_UFI;
4420 default:
4421 return -1;
4422 }
4423}
773a2d7c 4424
5d3acd0d
VV
4425/* Check if the flash image file is compatible with the adapter that
4426 * is being flashed.
4427 * BE3 chips with asic-rev B0 must be flashed only with BE3R_UFI type.
81a9e226 4428 * Skyhawk chips with asic-rev P2 must be flashed only with SH_P2_UFI type.
5d3acd0d
VV
4429 */
4430static bool be_check_ufi_compatibility(struct be_adapter *adapter,
4431 struct flash_file_hdr_g3 *fhdr)
4432{
4433 int ufi_type = be_get_ufi_type(adapter, fhdr);
4434
4435 switch (ufi_type) {
81a9e226 4436 case SH_P2_UFI:
5d3acd0d 4437 return skyhawk_chip(adapter);
81a9e226
VV
4438 case SH_UFI:
4439 return (skyhawk_chip(adapter) &&
4440 adapter->asic_rev < ASIC_REV_P2);
5d3acd0d
VV
4441 case BE3R_UFI:
4442 return BE3_chip(adapter);
4443 case BE3_UFI:
4444 return (BE3_chip(adapter) && adapter->asic_rev < ASIC_REV_B0);
4445 case BE2_UFI:
4446 return BE2_chip(adapter);
4447 default:
4448 return false;
4449 }
773a2d7c
PR
4450}
4451
485bf569
SN
4452static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
4453{
5d3acd0d 4454 struct device *dev = &adapter->pdev->dev;
485bf569 4455 struct flash_file_hdr_g3 *fhdr3;
5d3acd0d
VV
4456 struct image_hdr *img_hdr_ptr;
4457 int status = 0, i, num_imgs;
485bf569 4458 struct be_dma_mem flash_cmd;
84517482 4459
5d3acd0d
VV
4460 fhdr3 = (struct flash_file_hdr_g3 *)fw->data;
4461 if (!be_check_ufi_compatibility(adapter, fhdr3)) {
4462 dev_err(dev, "Flash image is not compatible with adapter\n");
4463 return -EINVAL;
84517482
AK
4464 }
4465
5d3acd0d
VV
4466 flash_cmd.size = sizeof(struct be_cmd_write_flashrom);
4467 flash_cmd.va = dma_alloc_coherent(dev, flash_cmd.size, &flash_cmd.dma,
4468 GFP_KERNEL);
4469 if (!flash_cmd.va)
4470 return -ENOMEM;
773a2d7c 4471
773a2d7c
PR
4472 num_imgs = le32_to_cpu(fhdr3->num_imgs);
4473 for (i = 0; i < num_imgs; i++) {
4474 img_hdr_ptr = (struct image_hdr *)(fw->data +
4475 (sizeof(struct flash_file_hdr_g3) +
4476 i * sizeof(struct image_hdr)));
5d3acd0d
VV
4477 if (!BE2_chip(adapter) &&
4478 le32_to_cpu(img_hdr_ptr->imageid) != 1)
4479 continue;
84517482 4480
5d3acd0d
VV
4481 if (skyhawk_chip(adapter))
4482 status = be_flash_skyhawk(adapter, fw, &flash_cmd,
4483 num_imgs);
4484 else
4485 status = be_flash_BEx(adapter, fw, &flash_cmd,
4486 num_imgs);
84517482
AK
4487 }
4488
5d3acd0d
VV
4489 dma_free_coherent(dev, flash_cmd.size, flash_cmd.va, flash_cmd.dma);
4490 if (!status)
4491 dev_info(dev, "Firmware flashed successfully\n");
84517482 4492
485bf569
SN
4493 return status;
4494}
4495
4496int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
4497{
4498 const struct firmware *fw;
4499 int status;
4500
4501 if (!netif_running(adapter->netdev)) {
4502 dev_err(&adapter->pdev->dev,
4503 "Firmware load not allowed (interface is down)\n");
940a3fcd 4504 return -ENETDOWN;
485bf569
SN
4505 }
4506
4507 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
4508 if (status)
4509 goto fw_exit;
4510
4511 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
4512
4513 if (lancer_chip(adapter))
4514 status = lancer_fw_download(adapter, fw);
4515 else
4516 status = be_fw_download(adapter, fw);
4517
eeb65ced 4518 if (!status)
e97e3cda 4519 be_cmd_get_fw_ver(adapter);
eeb65ced 4520
84517482
AK
4521fw_exit:
4522 release_firmware(fw);
4523 return status;
4524}
4525
add511b3
RP
4526static int be_ndo_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
4527 u16 flags)
a77dcb8c
AK
4528{
4529 struct be_adapter *adapter = netdev_priv(dev);
4530 struct nlattr *attr, *br_spec;
4531 int rem;
4532 int status = 0;
4533 u16 mode = 0;
4534
4535 if (!sriov_enabled(adapter))
4536 return -EOPNOTSUPP;
4537
4538 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
4ea85e83
TG
4539 if (!br_spec)
4540 return -EINVAL;
a77dcb8c
AK
4541
4542 nla_for_each_nested(attr, br_spec, rem) {
4543 if (nla_type(attr) != IFLA_BRIDGE_MODE)
4544 continue;
4545
b7c1a314
TG
4546 if (nla_len(attr) < sizeof(mode))
4547 return -EINVAL;
4548
a77dcb8c
AK
4549 mode = nla_get_u16(attr);
4550 if (mode != BRIDGE_MODE_VEPA && mode != BRIDGE_MODE_VEB)
4551 return -EINVAL;
4552
4553 status = be_cmd_set_hsw_config(adapter, 0, 0,
4554 adapter->if_handle,
4555 mode == BRIDGE_MODE_VEPA ?
4556 PORT_FWD_TYPE_VEPA :
4557 PORT_FWD_TYPE_VEB);
4558 if (status)
4559 goto err;
4560
4561 dev_info(&adapter->pdev->dev, "enabled switch mode: %s\n",
4562 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
4563
4564 return status;
4565 }
4566err:
4567 dev_err(&adapter->pdev->dev, "Failed to set switch mode %s\n",
4568 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
4569
4570 return status;
4571}
4572
4573static int be_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
748b539a 4574 struct net_device *dev, u32 filter_mask)
a77dcb8c
AK
4575{
4576 struct be_adapter *adapter = netdev_priv(dev);
4577 int status = 0;
4578 u8 hsw_mode;
4579
4580 if (!sriov_enabled(adapter))
4581 return 0;
4582
4583 /* BE and Lancer chips support VEB mode only */
4584 if (BEx_chip(adapter) || lancer_chip(adapter)) {
4585 hsw_mode = PORT_FWD_TYPE_VEB;
4586 } else {
4587 status = be_cmd_get_hsw_config(adapter, NULL, 0,
4588 adapter->if_handle, &hsw_mode);
4589 if (status)
4590 return 0;
4591 }
4592
4593 return ndo_dflt_bridge_getlink(skb, pid, seq, dev,
4594 hsw_mode == PORT_FWD_TYPE_VEPA ?
2c3c031c
SF
4595 BRIDGE_MODE_VEPA : BRIDGE_MODE_VEB,
4596 0, 0);
a77dcb8c
AK
4597}
4598
c5abe7c0 4599#ifdef CONFIG_BE2NET_VXLAN
630f4b70
SB
4600/* VxLAN offload Notes:
4601 *
4602 * The stack defines tunnel offload flags (hw_enc_features) for IP and doesn't
4603 * distinguish various types of transports (VxLAN, GRE, NVGRE ..). So, offload
4604 * is expected to work across all types of IP tunnels once exported. Skyhawk
4605 * supports offloads for either VxLAN or NVGRE, exclusively. So we export VxLAN
16dde0d6
SB
4606 * offloads in hw_enc_features only when a VxLAN port is added. If other (non
4607 * VxLAN) tunnels are configured while VxLAN offloads are enabled, offloads for
4608 * those other tunnels are unexported on the fly through ndo_features_check().
630f4b70
SB
4609 *
4610 * Skyhawk supports VxLAN offloads only for one UDP dport. So, if the stack
4611 * adds more than one port, disable offloads and don't re-enable them again
4612 * until after all the tunnels are removed.
4613 */
c9c47142
SP
4614static void be_add_vxlan_port(struct net_device *netdev, sa_family_t sa_family,
4615 __be16 port)
4616{
4617 struct be_adapter *adapter = netdev_priv(netdev);
4618 struct device *dev = &adapter->pdev->dev;
4619 int status;
4620
4621 if (lancer_chip(adapter) || BEx_chip(adapter))
4622 return;
4623
4624 if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS) {
c9c47142
SP
4625 dev_info(dev,
4626 "Only one UDP port supported for VxLAN offloads\n");
630f4b70
SB
4627 dev_info(dev, "Disabling VxLAN offloads\n");
4628 adapter->vxlan_port_count++;
4629 goto err;
c9c47142
SP
4630 }
4631
630f4b70
SB
4632 if (adapter->vxlan_port_count++ >= 1)
4633 return;
4634
c9c47142
SP
4635 status = be_cmd_manage_iface(adapter, adapter->if_handle,
4636 OP_CONVERT_NORMAL_TO_TUNNEL);
4637 if (status) {
4638 dev_warn(dev, "Failed to convert normal interface to tunnel\n");
4639 goto err;
4640 }
4641
4642 status = be_cmd_set_vxlan_port(adapter, port);
4643 if (status) {
4644 dev_warn(dev, "Failed to add VxLAN port\n");
4645 goto err;
4646 }
4647 adapter->flags |= BE_FLAGS_VXLAN_OFFLOADS;
4648 adapter->vxlan_port = port;
4649
630f4b70
SB
4650 netdev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
4651 NETIF_F_TSO | NETIF_F_TSO6 |
4652 NETIF_F_GSO_UDP_TUNNEL;
4653 netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL;
ac9a3d84 4654 netdev->features |= NETIF_F_GSO_UDP_TUNNEL;
630f4b70 4655
c9c47142
SP
4656 dev_info(dev, "Enabled VxLAN offloads for UDP port %d\n",
4657 be16_to_cpu(port));
4658 return;
4659err:
4660 be_disable_vxlan_offloads(adapter);
c9c47142
SP
4661}
4662
4663static void be_del_vxlan_port(struct net_device *netdev, sa_family_t sa_family,
4664 __be16 port)
4665{
4666 struct be_adapter *adapter = netdev_priv(netdev);
4667
4668 if (lancer_chip(adapter) || BEx_chip(adapter))
4669 return;
4670
4671 if (adapter->vxlan_port != port)
630f4b70 4672 goto done;
c9c47142
SP
4673
4674 be_disable_vxlan_offloads(adapter);
4675
4676 dev_info(&adapter->pdev->dev,
4677 "Disabled VxLAN offloads for UDP port %d\n",
4678 be16_to_cpu(port));
630f4b70
SB
4679done:
4680 adapter->vxlan_port_count--;
c9c47142 4681}
725d548f 4682
5f35227e
JG
4683static netdev_features_t be_features_check(struct sk_buff *skb,
4684 struct net_device *dev,
4685 netdev_features_t features)
725d548f 4686{
16dde0d6
SB
4687 struct be_adapter *adapter = netdev_priv(dev);
4688 u8 l4_hdr = 0;
4689
4690 /* The code below restricts offload features for some tunneled packets.
4691 * Offload features for normal (non tunnel) packets are unchanged.
4692 */
4693 if (!skb->encapsulation ||
4694 !(adapter->flags & BE_FLAGS_VXLAN_OFFLOADS))
4695 return features;
4696
4697 /* It's an encapsulated packet and VxLAN offloads are enabled. We
4698 * should disable tunnel offload features if it's not a VxLAN packet,
4699 * as tunnel offloads have been enabled only for VxLAN. This is done to
4700 * allow other tunneled traffic like GRE work fine while VxLAN
4701 * offloads are configured in Skyhawk-R.
4702 */
4703 switch (vlan_get_protocol(skb)) {
4704 case htons(ETH_P_IP):
4705 l4_hdr = ip_hdr(skb)->protocol;
4706 break;
4707 case htons(ETH_P_IPV6):
4708 l4_hdr = ipv6_hdr(skb)->nexthdr;
4709 break;
4710 default:
4711 return features;
4712 }
4713
4714 if (l4_hdr != IPPROTO_UDP ||
4715 skb->inner_protocol_type != ENCAP_TYPE_ETHER ||
4716 skb->inner_protocol != htons(ETH_P_TEB) ||
4717 skb_inner_mac_header(skb) - skb_transport_header(skb) !=
4718 sizeof(struct udphdr) + sizeof(struct vxlanhdr))
4719 return features & ~(NETIF_F_ALL_CSUM | NETIF_F_GSO_MASK);
4720
4721 return features;
725d548f 4722}
c5abe7c0 4723#endif
c9c47142 4724
e5686ad8 4725static const struct net_device_ops be_netdev_ops = {
6b7c5b94
SP
4726 .ndo_open = be_open,
4727 .ndo_stop = be_close,
4728 .ndo_start_xmit = be_xmit,
a54769f5 4729 .ndo_set_rx_mode = be_set_rx_mode,
6b7c5b94
SP
4730 .ndo_set_mac_address = be_mac_addr_set,
4731 .ndo_change_mtu = be_change_mtu,
ab1594e9 4732 .ndo_get_stats64 = be_get_stats64,
6b7c5b94 4733 .ndo_validate_addr = eth_validate_addr,
6b7c5b94
SP
4734 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
4735 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
64600ea5 4736 .ndo_set_vf_mac = be_set_vf_mac,
1da87b7f 4737 .ndo_set_vf_vlan = be_set_vf_vlan,
ed616689 4738 .ndo_set_vf_rate = be_set_vf_tx_rate,
66268739 4739 .ndo_get_vf_config = be_get_vf_config,
bdce2ad7 4740 .ndo_set_vf_link_state = be_set_vf_link_state,
66268739
IV
4741#ifdef CONFIG_NET_POLL_CONTROLLER
4742 .ndo_poll_controller = be_netpoll,
4743#endif
a77dcb8c
AK
4744 .ndo_bridge_setlink = be_ndo_bridge_setlink,
4745 .ndo_bridge_getlink = be_ndo_bridge_getlink,
6384a4d0 4746#ifdef CONFIG_NET_RX_BUSY_POLL
c9c47142 4747 .ndo_busy_poll = be_busy_poll,
6384a4d0 4748#endif
c5abe7c0 4749#ifdef CONFIG_BE2NET_VXLAN
c9c47142
SP
4750 .ndo_add_vxlan_port = be_add_vxlan_port,
4751 .ndo_del_vxlan_port = be_del_vxlan_port,
5f35227e 4752 .ndo_features_check = be_features_check,
c5abe7c0 4753#endif
6b7c5b94
SP
4754};
4755
4756static void be_netdev_init(struct net_device *netdev)
4757{
4758 struct be_adapter *adapter = netdev_priv(netdev);
4759
6332c8d3 4760 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
8b8ddc68 4761 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
f646968f 4762 NETIF_F_HW_VLAN_CTAG_TX;
8b8ddc68
MM
4763 if (be_multi_rxq(adapter))
4764 netdev->hw_features |= NETIF_F_RXHASH;
6332c8d3
MM
4765
4766 netdev->features |= netdev->hw_features |
f646968f 4767 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER;
4b972914 4768
eb8a50d9 4769 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
79032644 4770 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
51c59870 4771
fbc13f01
AK
4772 netdev->priv_flags |= IFF_UNICAST_FLT;
4773
6b7c5b94
SP
4774 netdev->flags |= IFF_MULTICAST;
4775
b7e5887e 4776 netif_set_gso_max_size(netdev, 65535 - ETH_HLEN);
c190e3c8 4777
10ef9ab4 4778 netdev->netdev_ops = &be_netdev_ops;
6b7c5b94 4779
7ad24ea4 4780 netdev->ethtool_ops = &be_ethtool_ops;
6b7c5b94
SP
4781}
4782
4783static void be_unmap_pci_bars(struct be_adapter *adapter)
4784{
c5b3ad4c
SP
4785 if (adapter->csr)
4786 pci_iounmap(adapter->pdev, adapter->csr);
8788fdc2 4787 if (adapter->db)
ce66f781 4788 pci_iounmap(adapter->pdev, adapter->db);
045508a8
PP
4789}
4790
ce66f781
SP
4791static int db_bar(struct be_adapter *adapter)
4792{
4793 if (lancer_chip(adapter) || !be_physfn(adapter))
4794 return 0;
4795 else
4796 return 4;
4797}
4798
4799static int be_roce_map_pci_bars(struct be_adapter *adapter)
045508a8 4800{
dbf0f2a7 4801 if (skyhawk_chip(adapter)) {
ce66f781
SP
4802 adapter->roce_db.size = 4096;
4803 adapter->roce_db.io_addr = pci_resource_start(adapter->pdev,
4804 db_bar(adapter));
4805 adapter->roce_db.total_size = pci_resource_len(adapter->pdev,
4806 db_bar(adapter));
4807 }
045508a8 4808 return 0;
6b7c5b94
SP
4809}
4810
4811static int be_map_pci_bars(struct be_adapter *adapter)
4812{
4813 u8 __iomem *addr;
fe6d2a38 4814
c5b3ad4c
SP
4815 if (BEx_chip(adapter) && be_physfn(adapter)) {
4816 adapter->csr = pci_iomap(adapter->pdev, 2, 0);
ddf1169f 4817 if (!adapter->csr)
c5b3ad4c
SP
4818 return -ENOMEM;
4819 }
4820
ce66f781 4821 addr = pci_iomap(adapter->pdev, db_bar(adapter), 0);
ddf1169f 4822 if (!addr)
6b7c5b94 4823 goto pci_map_err;
ba343c77 4824 adapter->db = addr;
ce66f781
SP
4825
4826 be_roce_map_pci_bars(adapter);
6b7c5b94 4827 return 0;
ce66f781 4828
6b7c5b94 4829pci_map_err:
acbafeb1 4830 dev_err(&adapter->pdev->dev, "Error in mapping PCI BARs\n");
6b7c5b94
SP
4831 be_unmap_pci_bars(adapter);
4832 return -ENOMEM;
4833}
4834
6b7c5b94
SP
4835static void be_ctrl_cleanup(struct be_adapter *adapter)
4836{
8788fdc2 4837 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
6b7c5b94
SP
4838
4839 be_unmap_pci_bars(adapter);
4840
4841 if (mem->va)
2b7bcebf
IV
4842 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
4843 mem->dma);
e7b909a6 4844
5b8821b7 4845 mem = &adapter->rx_filter;
e7b909a6 4846 if (mem->va)
2b7bcebf
IV
4847 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
4848 mem->dma);
6b7c5b94
SP
4849}
4850
6b7c5b94
SP
4851static int be_ctrl_init(struct be_adapter *adapter)
4852{
8788fdc2
SP
4853 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
4854 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
5b8821b7 4855 struct be_dma_mem *rx_filter = &adapter->rx_filter;
ce66f781 4856 u32 sli_intf;
6b7c5b94 4857 int status;
6b7c5b94 4858
ce66f781
SP
4859 pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
4860 adapter->sli_family = (sli_intf & SLI_INTF_FAMILY_MASK) >>
4861 SLI_INTF_FAMILY_SHIFT;
4862 adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
4863
6b7c5b94
SP
4864 status = be_map_pci_bars(adapter);
4865 if (status)
e7b909a6 4866 goto done;
6b7c5b94
SP
4867
4868 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
2b7bcebf
IV
4869 mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
4870 mbox_mem_alloc->size,
4871 &mbox_mem_alloc->dma,
4872 GFP_KERNEL);
6b7c5b94 4873 if (!mbox_mem_alloc->va) {
e7b909a6
SP
4874 status = -ENOMEM;
4875 goto unmap_pci_bars;
6b7c5b94
SP
4876 }
4877 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
4878 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
4879 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
4880 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
e7b909a6 4881
5b8821b7 4882 rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
ede23fa8
JP
4883 rx_filter->va = dma_zalloc_coherent(&adapter->pdev->dev,
4884 rx_filter->size, &rx_filter->dma,
4885 GFP_KERNEL);
ddf1169f 4886 if (!rx_filter->va) {
e7b909a6
SP
4887 status = -ENOMEM;
4888 goto free_mbox;
4889 }
1f9061d2 4890
2984961c 4891 mutex_init(&adapter->mbox_lock);
8788fdc2
SP
4892 spin_lock_init(&adapter->mcc_lock);
4893 spin_lock_init(&adapter->mcc_cq_lock);
a8f447bd 4894
5eeff635 4895 init_completion(&adapter->et_cmd_compl);
cf588477 4896 pci_save_state(adapter->pdev);
6b7c5b94 4897 return 0;
e7b909a6
SP
4898
4899free_mbox:
2b7bcebf
IV
4900 dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
4901 mbox_mem_alloc->va, mbox_mem_alloc->dma);
e7b909a6
SP
4902
4903unmap_pci_bars:
4904 be_unmap_pci_bars(adapter);
4905
4906done:
4907 return status;
6b7c5b94
SP
4908}
4909
4910static void be_stats_cleanup(struct be_adapter *adapter)
4911{
3abcdeda 4912 struct be_dma_mem *cmd = &adapter->stats_cmd;
6b7c5b94
SP
4913
4914 if (cmd->va)
2b7bcebf
IV
4915 dma_free_coherent(&adapter->pdev->dev, cmd->size,
4916 cmd->va, cmd->dma);
6b7c5b94
SP
4917}
4918
4919static int be_stats_init(struct be_adapter *adapter)
4920{
3abcdeda 4921 struct be_dma_mem *cmd = &adapter->stats_cmd;
6b7c5b94 4922
ca34fe38
SP
4923 if (lancer_chip(adapter))
4924 cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
4925 else if (BE2_chip(adapter))
89a88ab8 4926 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
61000861 4927 else if (BE3_chip(adapter))
ca34fe38 4928 cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
61000861
AK
4929 else
4930 /* ALL non-BE ASICs */
4931 cmd->size = sizeof(struct be_cmd_req_get_stats_v2);
ca34fe38 4932
ede23fa8
JP
4933 cmd->va = dma_zalloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
4934 GFP_KERNEL);
ddf1169f 4935 if (!cmd->va)
6b568689 4936 return -ENOMEM;
6b7c5b94
SP
4937 return 0;
4938}
4939
3bc6b06c 4940static void be_remove(struct pci_dev *pdev)
6b7c5b94
SP
4941{
4942 struct be_adapter *adapter = pci_get_drvdata(pdev);
8d56ff11 4943
6b7c5b94
SP
4944 if (!adapter)
4945 return;
4946
045508a8 4947 be_roce_dev_remove(adapter);
8cef7a78 4948 be_intr_set(adapter, false);
045508a8 4949
f67ef7ba
PR
4950 cancel_delayed_work_sync(&adapter->func_recovery_work);
4951
6b7c5b94
SP
4952 unregister_netdev(adapter->netdev);
4953
5fb379ee
SP
4954 be_clear(adapter);
4955
bf99e50d
PR
4956 /* tell fw we're done with firing cmds */
4957 be_cmd_fw_clean(adapter);
4958
6b7c5b94
SP
4959 be_stats_cleanup(adapter);
4960
4961 be_ctrl_cleanup(adapter);
4962
d6b6d987
SP
4963 pci_disable_pcie_error_reporting(pdev);
4964
6b7c5b94
SP
4965 pci_release_regions(pdev);
4966 pci_disable_device(pdev);
4967
4968 free_netdev(adapter->netdev);
4969}
4970
39f1d94d 4971static int be_get_initial_config(struct be_adapter *adapter)
6b7c5b94 4972{
baaa08d1 4973 int status, level;
6b7c5b94 4974
9e1453c5
AK
4975 status = be_cmd_get_cntl_attributes(adapter);
4976 if (status)
4977 return status;
4978
7aeb2156
PR
4979 /* Must be a power of 2 or else MODULO will BUG_ON */
4980 adapter->be_get_temp_freq = 64;
4981
baaa08d1
VV
4982 if (BEx_chip(adapter)) {
4983 level = be_cmd_get_fw_log_level(adapter);
4984 adapter->msg_enable =
4985 level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0;
4986 }
941a77d5 4987
92bf14ab 4988 adapter->cfg_num_qs = netif_get_num_default_rss_queues();
2243e2e9 4989 return 0;
6b7c5b94
SP
4990}
4991
f67ef7ba 4992static int lancer_recover_func(struct be_adapter *adapter)
d8110f62 4993{
01e5b2c4 4994 struct device *dev = &adapter->pdev->dev;
d8110f62 4995 int status;
d8110f62 4996
f67ef7ba
PR
4997 status = lancer_test_and_set_rdy_state(adapter);
4998 if (status)
4999 goto err;
d8110f62 5000
f67ef7ba
PR
5001 if (netif_running(adapter->netdev))
5002 be_close(adapter->netdev);
d8110f62 5003
f67ef7ba
PR
5004 be_clear(adapter);
5005
01e5b2c4 5006 be_clear_all_error(adapter);
f67ef7ba
PR
5007
5008 status = be_setup(adapter);
5009 if (status)
5010 goto err;
d8110f62 5011
f67ef7ba
PR
5012 if (netif_running(adapter->netdev)) {
5013 status = be_open(adapter->netdev);
d8110f62
PR
5014 if (status)
5015 goto err;
f67ef7ba 5016 }
d8110f62 5017
4bebb56a 5018 dev_err(dev, "Adapter recovery successful\n");
f67ef7ba
PR
5019 return 0;
5020err:
01e5b2c4
SK
5021 if (status == -EAGAIN)
5022 dev_err(dev, "Waiting for resource provisioning\n");
5023 else
4bebb56a 5024 dev_err(dev, "Adapter recovery failed\n");
d8110f62 5025
f67ef7ba
PR
5026 return status;
5027}
5028
5029static void be_func_recovery_task(struct work_struct *work)
5030{
5031 struct be_adapter *adapter =
5032 container_of(work, struct be_adapter, func_recovery_work.work);
01e5b2c4 5033 int status = 0;
d8110f62 5034
f67ef7ba 5035 be_detect_error(adapter);
d8110f62 5036
f67ef7ba 5037 if (adapter->hw_error && lancer_chip(adapter)) {
f67ef7ba
PR
5038 rtnl_lock();
5039 netif_device_detach(adapter->netdev);
5040 rtnl_unlock();
d8110f62 5041
f67ef7ba 5042 status = lancer_recover_func(adapter);
f67ef7ba
PR
5043 if (!status)
5044 netif_device_attach(adapter->netdev);
d8110f62 5045 }
f67ef7ba 5046
01e5b2c4
SK
5047 /* In Lancer, for all errors other than provisioning error (-EAGAIN),
5048 * no need to attempt further recovery.
5049 */
5050 if (!status || status == -EAGAIN)
5051 schedule_delayed_work(&adapter->func_recovery_work,
5052 msecs_to_jiffies(1000));
d8110f62
PR
5053}
5054
5055static void be_worker(struct work_struct *work)
5056{
5057 struct be_adapter *adapter =
5058 container_of(work, struct be_adapter, work.work);
5059 struct be_rx_obj *rxo;
5060 int i;
5061
d8110f62
PR
5062 /* when interrupts are not yet enabled, just reap any pending
5063 * mcc completions */
5064 if (!netif_running(adapter->netdev)) {
072a9c48 5065 local_bh_disable();
10ef9ab4 5066 be_process_mcc(adapter);
072a9c48 5067 local_bh_enable();
d8110f62
PR
5068 goto reschedule;
5069 }
5070
5071 if (!adapter->stats_cmd_sent) {
5072 if (lancer_chip(adapter))
5073 lancer_cmd_get_pport_stats(adapter,
cd3307aa 5074 &adapter->stats_cmd);
d8110f62
PR
5075 else
5076 be_cmd_get_stats(adapter, &adapter->stats_cmd);
5077 }
5078
d696b5e2
VV
5079 if (be_physfn(adapter) &&
5080 MODULO(adapter->work_counter, adapter->be_get_temp_freq) == 0)
7aeb2156
PR
5081 be_cmd_get_die_temperature(adapter);
5082
d8110f62 5083 for_all_rx_queues(adapter, rxo, i) {
6384a4d0
SP
5084 /* Replenish RX-queues starved due to memory
5085 * allocation failures.
5086 */
5087 if (rxo->rx_post_starved)
c30d7266 5088 be_post_rx_frags(rxo, GFP_KERNEL, MAX_RX_POST);
d8110f62
PR
5089 }
5090
2632bafd 5091 be_eqd_update(adapter);
10ef9ab4 5092
d8110f62
PR
5093reschedule:
5094 adapter->work_counter++;
5095 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
5096}
5097
257a3feb 5098/* If any VFs are already enabled don't FLR the PF */
39f1d94d
SP
5099static bool be_reset_required(struct be_adapter *adapter)
5100{
257a3feb 5101 return pci_num_vf(adapter->pdev) ? false : true;
39f1d94d
SP
5102}
5103
d379142b
SP
5104static char *mc_name(struct be_adapter *adapter)
5105{
f93f160b
VV
5106 char *str = ""; /* default */
5107
5108 switch (adapter->mc_type) {
5109 case UMC:
5110 str = "UMC";
5111 break;
5112 case FLEX10:
5113 str = "FLEX10";
5114 break;
5115 case vNIC1:
5116 str = "vNIC-1";
5117 break;
5118 case nPAR:
5119 str = "nPAR";
5120 break;
5121 case UFP:
5122 str = "UFP";
5123 break;
5124 case vNIC2:
5125 str = "vNIC-2";
5126 break;
5127 default:
5128 str = "";
5129 }
5130
5131 return str;
d379142b
SP
5132}
5133
5134static inline char *func_name(struct be_adapter *adapter)
5135{
5136 return be_physfn(adapter) ? "PF" : "VF";
5137}
5138
f7062ee5
SP
5139static inline char *nic_name(struct pci_dev *pdev)
5140{
5141 switch (pdev->device) {
5142 case OC_DEVICE_ID1:
5143 return OC_NAME;
5144 case OC_DEVICE_ID2:
5145 return OC_NAME_BE;
5146 case OC_DEVICE_ID3:
5147 case OC_DEVICE_ID4:
5148 return OC_NAME_LANCER;
5149 case BE_DEVICE_ID2:
5150 return BE3_NAME;
5151 case OC_DEVICE_ID5:
5152 case OC_DEVICE_ID6:
5153 return OC_NAME_SH;
5154 default:
5155 return BE_NAME;
5156 }
5157}
5158
1dd06ae8 5159static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id)
6b7c5b94
SP
5160{
5161 int status = 0;
5162 struct be_adapter *adapter;
5163 struct net_device *netdev;
b4e32a71 5164 char port_name;
6b7c5b94 5165
acbafeb1
SP
5166 dev_info(&pdev->dev, "%s version is %s\n", DRV_NAME, DRV_VER);
5167
6b7c5b94
SP
5168 status = pci_enable_device(pdev);
5169 if (status)
5170 goto do_none;
5171
5172 status = pci_request_regions(pdev, DRV_NAME);
5173 if (status)
5174 goto disable_dev;
5175 pci_set_master(pdev);
5176
7f640062 5177 netdev = alloc_etherdev_mqs(sizeof(*adapter), MAX_TX_QS, MAX_RX_QS);
ddf1169f 5178 if (!netdev) {
6b7c5b94
SP
5179 status = -ENOMEM;
5180 goto rel_reg;
5181 }
5182 adapter = netdev_priv(netdev);
5183 adapter->pdev = pdev;
5184 pci_set_drvdata(pdev, adapter);
5185 adapter->netdev = netdev;
2243e2e9 5186 SET_NETDEV_DEV(netdev, &pdev->dev);
6b7c5b94 5187
4c15c243 5188 status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
6b7c5b94
SP
5189 if (!status) {
5190 netdev->features |= NETIF_F_HIGHDMA;
5191 } else {
4c15c243 5192 status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
6b7c5b94
SP
5193 if (status) {
5194 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
5195 goto free_netdev;
5196 }
5197 }
5198
2f951a9a
KA
5199 status = pci_enable_pcie_error_reporting(pdev);
5200 if (!status)
5201 dev_info(&pdev->dev, "PCIe error reporting enabled\n");
d6b6d987 5202
6b7c5b94
SP
5203 status = be_ctrl_init(adapter);
5204 if (status)
39f1d94d 5205 goto free_netdev;
6b7c5b94 5206
2243e2e9 5207 /* sync up with fw's ready state */
ba343c77 5208 if (be_physfn(adapter)) {
bf99e50d 5209 status = be_fw_wait_ready(adapter);
ba343c77
SB
5210 if (status)
5211 goto ctrl_clean;
ba343c77 5212 }
6b7c5b94 5213
39f1d94d
SP
5214 if (be_reset_required(adapter)) {
5215 status = be_cmd_reset_function(adapter);
5216 if (status)
5217 goto ctrl_clean;
556ae191 5218
2d177be8
KA
5219 /* Wait for interrupts to quiesce after an FLR */
5220 msleep(100);
5221 }
8cef7a78
SK
5222
5223 /* Allow interrupts for other ULPs running on NIC function */
5224 be_intr_set(adapter, true);
10ef9ab4 5225
2d177be8
KA
5226 /* tell fw we're ready to fire cmds */
5227 status = be_cmd_fw_init(adapter);
5228 if (status)
5229 goto ctrl_clean;
5230
2243e2e9
SP
5231 status = be_stats_init(adapter);
5232 if (status)
5233 goto ctrl_clean;
5234
39f1d94d 5235 status = be_get_initial_config(adapter);
6b7c5b94
SP
5236 if (status)
5237 goto stats_clean;
6b7c5b94
SP
5238
5239 INIT_DELAYED_WORK(&adapter->work, be_worker);
f67ef7ba 5240 INIT_DELAYED_WORK(&adapter->func_recovery_work, be_func_recovery_task);
5f820b6c
KA
5241 adapter->rx_fc = true;
5242 adapter->tx_fc = true;
6b7c5b94 5243
5fb379ee
SP
5244 status = be_setup(adapter);
5245 if (status)
55f5c3c5 5246 goto stats_clean;
2243e2e9 5247
3abcdeda 5248 be_netdev_init(netdev);
6b7c5b94
SP
5249 status = register_netdev(netdev);
5250 if (status != 0)
5fb379ee 5251 goto unsetup;
6b7c5b94 5252
045508a8
PP
5253 be_roce_dev_add(adapter);
5254
f67ef7ba
PR
5255 schedule_delayed_work(&adapter->func_recovery_work,
5256 msecs_to_jiffies(1000));
b4e32a71
PR
5257
5258 be_cmd_query_port_name(adapter, &port_name);
5259
d379142b
SP
5260 dev_info(&pdev->dev, "%s: %s %s port %c\n", nic_name(pdev),
5261 func_name(adapter), mc_name(adapter), port_name);
34b1ef04 5262
6b7c5b94
SP
5263 return 0;
5264
5fb379ee
SP
5265unsetup:
5266 be_clear(adapter);
6b7c5b94
SP
5267stats_clean:
5268 be_stats_cleanup(adapter);
5269ctrl_clean:
5270 be_ctrl_cleanup(adapter);
f9449ab7 5271free_netdev:
fe6d2a38 5272 free_netdev(netdev);
6b7c5b94
SP
5273rel_reg:
5274 pci_release_regions(pdev);
5275disable_dev:
5276 pci_disable_device(pdev);
5277do_none:
c4ca2374 5278 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
6b7c5b94
SP
5279 return status;
5280}
5281
5282static int be_suspend(struct pci_dev *pdev, pm_message_t state)
5283{
5284 struct be_adapter *adapter = pci_get_drvdata(pdev);
5285 struct net_device *netdev = adapter->netdev;
5286
76a9e08e 5287 if (adapter->wol_en)
71d8d1b5
AK
5288 be_setup_wol(adapter, true);
5289
d4360d6f 5290 be_intr_set(adapter, false);
f67ef7ba
PR
5291 cancel_delayed_work_sync(&adapter->func_recovery_work);
5292
6b7c5b94
SP
5293 netif_device_detach(netdev);
5294 if (netif_running(netdev)) {
5295 rtnl_lock();
5296 be_close(netdev);
5297 rtnl_unlock();
5298 }
9b0365f1 5299 be_clear(adapter);
6b7c5b94
SP
5300
5301 pci_save_state(pdev);
5302 pci_disable_device(pdev);
5303 pci_set_power_state(pdev, pci_choose_state(pdev, state));
5304 return 0;
5305}
5306
5307static int be_resume(struct pci_dev *pdev)
5308{
5309 int status = 0;
5310 struct be_adapter *adapter = pci_get_drvdata(pdev);
5311 struct net_device *netdev = adapter->netdev;
5312
5313 netif_device_detach(netdev);
5314
5315 status = pci_enable_device(pdev);
5316 if (status)
5317 return status;
5318
1ca01512 5319 pci_set_power_state(pdev, PCI_D0);
6b7c5b94
SP
5320 pci_restore_state(pdev);
5321
dd5746bf
SB
5322 status = be_fw_wait_ready(adapter);
5323 if (status)
5324 return status;
5325
9a6d73d9
KA
5326 status = be_cmd_reset_function(adapter);
5327 if (status)
5328 return status;
5329
d4360d6f 5330 be_intr_set(adapter, true);
2243e2e9
SP
5331 /* tell fw we're ready to fire cmds */
5332 status = be_cmd_fw_init(adapter);
5333 if (status)
5334 return status;
5335
9b0365f1 5336 be_setup(adapter);
6b7c5b94
SP
5337 if (netif_running(netdev)) {
5338 rtnl_lock();
5339 be_open(netdev);
5340 rtnl_unlock();
5341 }
f67ef7ba
PR
5342
5343 schedule_delayed_work(&adapter->func_recovery_work,
5344 msecs_to_jiffies(1000));
6b7c5b94 5345 netif_device_attach(netdev);
71d8d1b5 5346
76a9e08e 5347 if (adapter->wol_en)
71d8d1b5 5348 be_setup_wol(adapter, false);
a4ca055f 5349
6b7c5b94
SP
5350 return 0;
5351}
5352
82456b03
SP
5353/*
5354 * An FLR will stop BE from DMAing any data.
5355 */
5356static void be_shutdown(struct pci_dev *pdev)
5357{
5358 struct be_adapter *adapter = pci_get_drvdata(pdev);
82456b03 5359
2d5d4154
AK
5360 if (!adapter)
5361 return;
82456b03 5362
d114f99a 5363 be_roce_dev_shutdown(adapter);
0f4a6828 5364 cancel_delayed_work_sync(&adapter->work);
f67ef7ba 5365 cancel_delayed_work_sync(&adapter->func_recovery_work);
a4ca055f 5366
2d5d4154 5367 netif_device_detach(adapter->netdev);
82456b03 5368
57841869
AK
5369 be_cmd_reset_function(adapter);
5370
82456b03 5371 pci_disable_device(pdev);
82456b03
SP
5372}
5373
cf588477 5374static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
748b539a 5375 pci_channel_state_t state)
cf588477
SP
5376{
5377 struct be_adapter *adapter = pci_get_drvdata(pdev);
5378 struct net_device *netdev = adapter->netdev;
5379
5380 dev_err(&adapter->pdev->dev, "EEH error detected\n");
5381
01e5b2c4
SK
5382 if (!adapter->eeh_error) {
5383 adapter->eeh_error = true;
cf588477 5384
01e5b2c4 5385 cancel_delayed_work_sync(&adapter->func_recovery_work);
cf588477 5386
cf588477 5387 rtnl_lock();
01e5b2c4
SK
5388 netif_device_detach(netdev);
5389 if (netif_running(netdev))
5390 be_close(netdev);
cf588477 5391 rtnl_unlock();
01e5b2c4
SK
5392
5393 be_clear(adapter);
cf588477 5394 }
cf588477
SP
5395
5396 if (state == pci_channel_io_perm_failure)
5397 return PCI_ERS_RESULT_DISCONNECT;
5398
5399 pci_disable_device(pdev);
5400
eeb7fc7b
SK
5401 /* The error could cause the FW to trigger a flash debug dump.
5402 * Resetting the card while flash dump is in progress
c8a54163
PR
5403 * can cause it not to recover; wait for it to finish.
5404 * Wait only for first function as it is needed only once per
5405 * adapter.
eeb7fc7b 5406 */
c8a54163
PR
5407 if (pdev->devfn == 0)
5408 ssleep(30);
5409
cf588477
SP
5410 return PCI_ERS_RESULT_NEED_RESET;
5411}
5412
5413static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
5414{
5415 struct be_adapter *adapter = pci_get_drvdata(pdev);
5416 int status;
5417
5418 dev_info(&adapter->pdev->dev, "EEH reset\n");
cf588477
SP
5419
5420 status = pci_enable_device(pdev);
5421 if (status)
5422 return PCI_ERS_RESULT_DISCONNECT;
5423
5424 pci_set_master(pdev);
1ca01512 5425 pci_set_power_state(pdev, PCI_D0);
cf588477
SP
5426 pci_restore_state(pdev);
5427
5428 /* Check if card is ok and fw is ready */
c5b3ad4c
SP
5429 dev_info(&adapter->pdev->dev,
5430 "Waiting for FW to be ready after EEH reset\n");
bf99e50d 5431 status = be_fw_wait_ready(adapter);
cf588477
SP
5432 if (status)
5433 return PCI_ERS_RESULT_DISCONNECT;
5434
d6b6d987 5435 pci_cleanup_aer_uncorrect_error_status(pdev);
01e5b2c4 5436 be_clear_all_error(adapter);
cf588477
SP
5437 return PCI_ERS_RESULT_RECOVERED;
5438}
5439
5440static void be_eeh_resume(struct pci_dev *pdev)
5441{
5442 int status = 0;
5443 struct be_adapter *adapter = pci_get_drvdata(pdev);
5444 struct net_device *netdev = adapter->netdev;
5445
5446 dev_info(&adapter->pdev->dev, "EEH resume\n");
5447
5448 pci_save_state(pdev);
5449
2d177be8 5450 status = be_cmd_reset_function(adapter);
cf588477
SP
5451 if (status)
5452 goto err;
5453
03a58baa
KA
5454 /* On some BE3 FW versions, after a HW reset,
5455 * interrupts will remain disabled for each function.
5456 * So, explicitly enable interrupts
5457 */
5458 be_intr_set(adapter, true);
5459
2d177be8
KA
5460 /* tell fw we're ready to fire cmds */
5461 status = be_cmd_fw_init(adapter);
bf99e50d
PR
5462 if (status)
5463 goto err;
5464
cf588477
SP
5465 status = be_setup(adapter);
5466 if (status)
5467 goto err;
5468
5469 if (netif_running(netdev)) {
5470 status = be_open(netdev);
5471 if (status)
5472 goto err;
5473 }
f67ef7ba
PR
5474
5475 schedule_delayed_work(&adapter->func_recovery_work,
5476 msecs_to_jiffies(1000));
cf588477
SP
5477 netif_device_attach(netdev);
5478 return;
5479err:
5480 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
cf588477
SP
5481}
5482
3646f0e5 5483static const struct pci_error_handlers be_eeh_handlers = {
cf588477
SP
5484 .error_detected = be_eeh_err_detected,
5485 .slot_reset = be_eeh_reset,
5486 .resume = be_eeh_resume,
5487};
5488
6b7c5b94
SP
5489static struct pci_driver be_driver = {
5490 .name = DRV_NAME,
5491 .id_table = be_dev_ids,
5492 .probe = be_probe,
5493 .remove = be_remove,
5494 .suspend = be_suspend,
cf588477 5495 .resume = be_resume,
82456b03 5496 .shutdown = be_shutdown,
cf588477 5497 .err_handler = &be_eeh_handlers
6b7c5b94
SP
5498};
5499
5500static int __init be_init_module(void)
5501{
8e95a202
JP
5502 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
5503 rx_frag_size != 2048) {
6b7c5b94
SP
5504 printk(KERN_WARNING DRV_NAME
5505 " : Module param rx_frag_size must be 2048/4096/8192."
5506 " Using 2048\n");
5507 rx_frag_size = 2048;
5508 }
6b7c5b94
SP
5509
5510 return pci_register_driver(&be_driver);
5511}
5512module_init(be_init_module);
5513
5514static void __exit be_exit_module(void)
5515{
5516 pci_unregister_driver(&be_driver);
5517}
5518module_exit(be_exit_module);