be2net: assign CPU affinity hints to be2net IRQs
[linux-2.6-block.git] / drivers / net / ethernet / emulex / benet / be_main.c
CommitLineData
6b7c5b94 1/*
40263820 2 * Copyright (C) 2005 - 2014 Emulex
6b7c5b94
SP
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
d2145cde 11 * linux-drivers@emulex.com
6b7c5b94 12 *
d2145cde
AK
13 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
6b7c5b94
SP
16 */
17
70c71606 18#include <linux/prefetch.h>
9d9779e7 19#include <linux/module.h>
6b7c5b94 20#include "be.h"
8788fdc2 21#include "be_cmds.h"
65f71b8b 22#include <asm/div64.h>
d6b6d987 23#include <linux/aer.h>
a77dcb8c 24#include <linux/if_bridge.h>
6384a4d0 25#include <net/busy_poll.h>
c9c47142 26#include <net/vxlan.h>
6b7c5b94
SP
27
28MODULE_VERSION(DRV_VER);
6b7c5b94 29MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
00d3d51e 30MODULE_AUTHOR("Emulex Corporation");
6b7c5b94
SP
31MODULE_LICENSE("GPL");
32
ace40aff
VV
33/* num_vfs module param is obsolete.
34 * Use sysfs method to enable/disable VFs.
35 */
ba343c77 36static unsigned int num_vfs;
ba343c77 37module_param(num_vfs, uint, S_IRUGO);
ba343c77 38MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
6b7c5b94 39
11ac75ed
SP
40static ushort rx_frag_size = 2048;
41module_param(rx_frag_size, ushort, S_IRUGO);
42MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
43
9baa3c34 44static const struct pci_device_id be_dev_ids[] = {
c4ca2374 45 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
59fd5d87 46 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
c4ca2374
AK
47 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
48 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
fe6d2a38 49 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
12f4d0a8 50 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
ecedb6ae 51 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
76b73530 52 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID6)},
6b7c5b94
SP
53 { 0 }
54};
55MODULE_DEVICE_TABLE(pci, be_dev_ids);
7c185276 56/* UE Status Low CSR */
42c8b11e 57static const char * const ue_status_low_desc[] = {
7c185276
AK
58 "CEV",
59 "CTX",
60 "DBUF",
61 "ERX",
62 "Host",
63 "MPU",
64 "NDMA",
65 "PTC ",
66 "RDMA ",
67 "RXF ",
68 "RXIPS ",
69 "RXULP0 ",
70 "RXULP1 ",
71 "RXULP2 ",
72 "TIM ",
73 "TPOST ",
74 "TPRE ",
75 "TXIPS ",
76 "TXULP0 ",
77 "TXULP1 ",
78 "UC ",
79 "WDMA ",
80 "TXULP2 ",
81 "HOST1 ",
82 "P0_OB_LINK ",
83 "P1_OB_LINK ",
84 "HOST_GPIO ",
85 "MBOX ",
6bdf8f55
VV
86 "ERX2 ",
87 "SPARE ",
88 "JTAG ",
89 "MPU_INTPEND "
7c185276 90};
e2fb1afa 91
7c185276 92/* UE Status High CSR */
42c8b11e 93static const char * const ue_status_hi_desc[] = {
7c185276
AK
94 "LPCMEMHOST",
95 "MGMT_MAC",
96 "PCS0ONLINE",
97 "MPU_IRAM",
98 "PCS1ONLINE",
99 "PCTL0",
100 "PCTL1",
101 "PMEM",
102 "RR",
103 "TXPB",
104 "RXPP",
105 "XAUI",
106 "TXP",
107 "ARM",
108 "IPC",
109 "HOST2",
110 "HOST3",
111 "HOST4",
112 "HOST5",
113 "HOST6",
114 "HOST7",
6bdf8f55
VV
115 "ECRC",
116 "Poison TLP",
42c8b11e 117 "NETC",
6bdf8f55
VV
118 "PERIPH",
119 "LLTXULP",
120 "D2P",
121 "RCON",
122 "LDMA",
123 "LLTXP",
124 "LLTXPB",
7c185276
AK
125 "Unknown"
126};
6b7c5b94
SP
127
128static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
129{
130 struct be_dma_mem *mem = &q->dma_mem;
03d28ffe 131
1cfafab9 132 if (mem->va) {
2b7bcebf
IV
133 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
134 mem->dma);
1cfafab9
SP
135 mem->va = NULL;
136 }
6b7c5b94
SP
137}
138
139static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
748b539a 140 u16 len, u16 entry_size)
6b7c5b94
SP
141{
142 struct be_dma_mem *mem = &q->dma_mem;
143
144 memset(q, 0, sizeof(*q));
145 q->len = len;
146 q->entry_size = entry_size;
147 mem->size = len * entry_size;
ede23fa8
JP
148 mem->va = dma_zalloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
149 GFP_KERNEL);
6b7c5b94 150 if (!mem->va)
10ef9ab4 151 return -ENOMEM;
6b7c5b94
SP
152 return 0;
153}
154
68c45a2d 155static void be_reg_intr_set(struct be_adapter *adapter, bool enable)
6b7c5b94 156{
db3ea781 157 u32 reg, enabled;
5f0b849e 158
db3ea781 159 pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
748b539a 160 &reg);
db3ea781
SP
161 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
162
5f0b849e 163 if (!enabled && enable)
6b7c5b94 164 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
5f0b849e 165 else if (enabled && !enable)
6b7c5b94 166 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
5f0b849e 167 else
6b7c5b94 168 return;
5f0b849e 169
db3ea781 170 pci_write_config_dword(adapter->pdev,
748b539a 171 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
6b7c5b94
SP
172}
173
68c45a2d
SK
174static void be_intr_set(struct be_adapter *adapter, bool enable)
175{
176 int status = 0;
177
178 /* On lancer interrupts can't be controlled via this register */
179 if (lancer_chip(adapter))
180 return;
181
182 if (adapter->eeh_error)
183 return;
184
185 status = be_cmd_intr_set(adapter, enable);
186 if (status)
187 be_reg_intr_set(adapter, enable);
188}
189
8788fdc2 190static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
6b7c5b94
SP
191{
192 u32 val = 0;
03d28ffe 193
6b7c5b94
SP
194 val |= qid & DB_RQ_RING_ID_MASK;
195 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
f3eb62d2
SP
196
197 wmb();
8788fdc2 198 iowrite32(val, adapter->db + DB_RQ_OFFSET);
6b7c5b94
SP
199}
200
94d73aaa
VV
201static void be_txq_notify(struct be_adapter *adapter, struct be_tx_obj *txo,
202 u16 posted)
6b7c5b94
SP
203{
204 u32 val = 0;
03d28ffe 205
94d73aaa 206 val |= txo->q.id & DB_TXULP_RING_ID_MASK;
6b7c5b94 207 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
f3eb62d2
SP
208
209 wmb();
94d73aaa 210 iowrite32(val, adapter->db + txo->db_offset);
6b7c5b94
SP
211}
212
8788fdc2 213static void be_eq_notify(struct be_adapter *adapter, u16 qid,
748b539a 214 bool arm, bool clear_int, u16 num_popped)
6b7c5b94
SP
215{
216 u32 val = 0;
03d28ffe 217
6b7c5b94 218 val |= qid & DB_EQ_RING_ID_MASK;
748b539a 219 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) << DB_EQ_RING_ID_EXT_MASK_SHIFT);
cf588477 220
f67ef7ba 221 if (adapter->eeh_error)
cf588477
SP
222 return;
223
6b7c5b94
SP
224 if (arm)
225 val |= 1 << DB_EQ_REARM_SHIFT;
226 if (clear_int)
227 val |= 1 << DB_EQ_CLR_SHIFT;
228 val |= 1 << DB_EQ_EVNT_SHIFT;
229 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
8788fdc2 230 iowrite32(val, adapter->db + DB_EQ_OFFSET);
6b7c5b94
SP
231}
232
8788fdc2 233void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
6b7c5b94
SP
234{
235 u32 val = 0;
03d28ffe 236
6b7c5b94 237 val |= qid & DB_CQ_RING_ID_MASK;
fe6d2a38
SP
238 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
239 DB_CQ_RING_ID_EXT_MASK_SHIFT);
cf588477 240
f67ef7ba 241 if (adapter->eeh_error)
cf588477
SP
242 return;
243
6b7c5b94
SP
244 if (arm)
245 val |= 1 << DB_CQ_REARM_SHIFT;
246 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
8788fdc2 247 iowrite32(val, adapter->db + DB_CQ_OFFSET);
6b7c5b94
SP
248}
249
6b7c5b94
SP
250static int be_mac_addr_set(struct net_device *netdev, void *p)
251{
252 struct be_adapter *adapter = netdev_priv(netdev);
5a712c13 253 struct device *dev = &adapter->pdev->dev;
6b7c5b94 254 struct sockaddr *addr = p;
5a712c13
SP
255 int status;
256 u8 mac[ETH_ALEN];
257 u32 old_pmac_id = adapter->pmac_id[0], curr_pmac_id = 0;
6b7c5b94 258
ca9e4988
AK
259 if (!is_valid_ether_addr(addr->sa_data))
260 return -EADDRNOTAVAIL;
261
ff32f8ab
VV
262 /* Proceed further only if, User provided MAC is different
263 * from active MAC
264 */
265 if (ether_addr_equal(addr->sa_data, netdev->dev_addr))
266 return 0;
267
5a712c13
SP
268 /* The PMAC_ADD cmd may fail if the VF doesn't have FILTMGMT
269 * privilege or if PF did not provision the new MAC address.
270 * On BE3, this cmd will always fail if the VF doesn't have the
271 * FILTMGMT privilege. This failure is OK, only if the PF programmed
272 * the MAC for the VF.
704e4c88 273 */
5a712c13
SP
274 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
275 adapter->if_handle, &adapter->pmac_id[0], 0);
276 if (!status) {
277 curr_pmac_id = adapter->pmac_id[0];
278
279 /* Delete the old programmed MAC. This call may fail if the
280 * old MAC was already deleted by the PF driver.
281 */
282 if (adapter->pmac_id[0] != old_pmac_id)
283 be_cmd_pmac_del(adapter, adapter->if_handle,
284 old_pmac_id, 0);
704e4c88
PR
285 }
286
5a712c13
SP
287 /* Decide if the new MAC is successfully activated only after
288 * querying the FW
704e4c88 289 */
b188f090
SR
290 status = be_cmd_get_active_mac(adapter, curr_pmac_id, mac,
291 adapter->if_handle, true, 0);
a65027e4 292 if (status)
e3a7ae2c 293 goto err;
6b7c5b94 294
5a712c13
SP
295 /* The MAC change did not happen, either due to lack of privilege
296 * or PF didn't pre-provision.
297 */
61d23e9f 298 if (!ether_addr_equal(addr->sa_data, mac)) {
5a712c13
SP
299 status = -EPERM;
300 goto err;
301 }
302
e3a7ae2c 303 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
5a712c13 304 dev_info(dev, "MAC address changed to %pM\n", mac);
e3a7ae2c
SK
305 return 0;
306err:
5a712c13 307 dev_warn(dev, "MAC address change to %pM failed\n", addr->sa_data);
6b7c5b94
SP
308 return status;
309}
310
ca34fe38
SP
311/* BE2 supports only v0 cmd */
312static void *hw_stats_from_cmd(struct be_adapter *adapter)
313{
314 if (BE2_chip(adapter)) {
315 struct be_cmd_resp_get_stats_v0 *cmd = adapter->stats_cmd.va;
316
317 return &cmd->hw_stats;
61000861 318 } else if (BE3_chip(adapter)) {
ca34fe38
SP
319 struct be_cmd_resp_get_stats_v1 *cmd = adapter->stats_cmd.va;
320
61000861
AK
321 return &cmd->hw_stats;
322 } else {
323 struct be_cmd_resp_get_stats_v2 *cmd = adapter->stats_cmd.va;
324
ca34fe38
SP
325 return &cmd->hw_stats;
326 }
327}
328
329/* BE2 supports only v0 cmd */
330static void *be_erx_stats_from_cmd(struct be_adapter *adapter)
331{
332 if (BE2_chip(adapter)) {
333 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
334
335 return &hw_stats->erx;
61000861 336 } else if (BE3_chip(adapter)) {
ca34fe38
SP
337 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
338
61000861
AK
339 return &hw_stats->erx;
340 } else {
341 struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
342
ca34fe38
SP
343 return &hw_stats->erx;
344 }
345}
346
347static void populate_be_v0_stats(struct be_adapter *adapter)
89a88ab8 348{
ac124ff9
SP
349 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
350 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
351 struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
89a88ab8 352 struct be_port_rxf_stats_v0 *port_stats =
ac124ff9
SP
353 &rxf_stats->port[adapter->port_num];
354 struct be_drv_stats *drvs = &adapter->drv_stats;
89a88ab8 355
ac124ff9 356 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
89a88ab8
AK
357 drvs->rx_pause_frames = port_stats->rx_pause_frames;
358 drvs->rx_crc_errors = port_stats->rx_crc_errors;
359 drvs->rx_control_frames = port_stats->rx_control_frames;
360 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
361 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
362 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
363 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
364 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
365 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
366 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
367 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
368 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
369 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
370 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
ac124ff9 371 drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
89a88ab8
AK
372 drvs->rx_dropped_header_too_small =
373 port_stats->rx_dropped_header_too_small;
18fb06a1
SR
374 drvs->rx_address_filtered =
375 port_stats->rx_address_filtered +
376 port_stats->rx_vlan_filtered;
89a88ab8
AK
377 drvs->rx_alignment_symbol_errors =
378 port_stats->rx_alignment_symbol_errors;
379
380 drvs->tx_pauseframes = port_stats->tx_pauseframes;
381 drvs->tx_controlframes = port_stats->tx_controlframes;
382
383 if (adapter->port_num)
ac124ff9 384 drvs->jabber_events = rxf_stats->port1_jabber_events;
89a88ab8 385 else
ac124ff9 386 drvs->jabber_events = rxf_stats->port0_jabber_events;
89a88ab8 387 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
89a88ab8 388 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
89a88ab8
AK
389 drvs->forwarded_packets = rxf_stats->forwarded_packets;
390 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
ac124ff9
SP
391 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
392 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
89a88ab8
AK
393 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
394}
395
ca34fe38 396static void populate_be_v1_stats(struct be_adapter *adapter)
89a88ab8 397{
ac124ff9
SP
398 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
399 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
400 struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
89a88ab8 401 struct be_port_rxf_stats_v1 *port_stats =
ac124ff9
SP
402 &rxf_stats->port[adapter->port_num];
403 struct be_drv_stats *drvs = &adapter->drv_stats;
89a88ab8 404
ac124ff9 405 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
02fe7027
AK
406 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
407 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
89a88ab8
AK
408 drvs->rx_pause_frames = port_stats->rx_pause_frames;
409 drvs->rx_crc_errors = port_stats->rx_crc_errors;
410 drvs->rx_control_frames = port_stats->rx_control_frames;
411 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
412 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
413 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
414 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
415 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
416 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
417 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
418 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
419 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
420 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
421 drvs->rx_dropped_header_too_small =
422 port_stats->rx_dropped_header_too_small;
423 drvs->rx_input_fifo_overflow_drop =
424 port_stats->rx_input_fifo_overflow_drop;
18fb06a1 425 drvs->rx_address_filtered = port_stats->rx_address_filtered;
89a88ab8
AK
426 drvs->rx_alignment_symbol_errors =
427 port_stats->rx_alignment_symbol_errors;
ac124ff9 428 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
89a88ab8
AK
429 drvs->tx_pauseframes = port_stats->tx_pauseframes;
430 drvs->tx_controlframes = port_stats->tx_controlframes;
b5adffc4 431 drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
89a88ab8
AK
432 drvs->jabber_events = port_stats->jabber_events;
433 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
89a88ab8 434 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
89a88ab8
AK
435 drvs->forwarded_packets = rxf_stats->forwarded_packets;
436 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
ac124ff9
SP
437 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
438 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
89a88ab8
AK
439 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
440}
441
61000861
AK
442static void populate_be_v2_stats(struct be_adapter *adapter)
443{
444 struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
445 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
446 struct be_rxf_stats_v2 *rxf_stats = &hw_stats->rxf;
447 struct be_port_rxf_stats_v2 *port_stats =
448 &rxf_stats->port[adapter->port_num];
449 struct be_drv_stats *drvs = &adapter->drv_stats;
450
451 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
452 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
453 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
454 drvs->rx_pause_frames = port_stats->rx_pause_frames;
455 drvs->rx_crc_errors = port_stats->rx_crc_errors;
456 drvs->rx_control_frames = port_stats->rx_control_frames;
457 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
458 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
459 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
460 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
461 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
462 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
463 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
464 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
465 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
466 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
467 drvs->rx_dropped_header_too_small =
468 port_stats->rx_dropped_header_too_small;
469 drvs->rx_input_fifo_overflow_drop =
470 port_stats->rx_input_fifo_overflow_drop;
471 drvs->rx_address_filtered = port_stats->rx_address_filtered;
472 drvs->rx_alignment_symbol_errors =
473 port_stats->rx_alignment_symbol_errors;
474 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
475 drvs->tx_pauseframes = port_stats->tx_pauseframes;
476 drvs->tx_controlframes = port_stats->tx_controlframes;
477 drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
478 drvs->jabber_events = port_stats->jabber_events;
479 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
480 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
481 drvs->forwarded_packets = rxf_stats->forwarded_packets;
482 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
483 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
484 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
485 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
748b539a 486 if (be_roce_supported(adapter)) {
461ae379
AK
487 drvs->rx_roce_bytes_lsd = port_stats->roce_bytes_received_lsd;
488 drvs->rx_roce_bytes_msd = port_stats->roce_bytes_received_msd;
489 drvs->rx_roce_frames = port_stats->roce_frames_received;
490 drvs->roce_drops_crc = port_stats->roce_drops_crc;
491 drvs->roce_drops_payload_len =
492 port_stats->roce_drops_payload_len;
493 }
61000861
AK
494}
495
005d5696
SX
496static void populate_lancer_stats(struct be_adapter *adapter)
497{
005d5696 498 struct be_drv_stats *drvs = &adapter->drv_stats;
748b539a 499 struct lancer_pport_stats *pport_stats = pport_stats_from_cmd(adapter);
ac124ff9
SP
500
501 be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
502 drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
503 drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
504 drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
005d5696 505 drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
ac124ff9 506 drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
005d5696
SX
507 drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
508 drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
509 drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
510 drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
511 drvs->rx_dropped_tcp_length =
512 pport_stats->rx_dropped_invalid_tcp_length;
513 drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
514 drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
515 drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
516 drvs->rx_dropped_header_too_small =
517 pport_stats->rx_dropped_header_too_small;
518 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
18fb06a1
SR
519 drvs->rx_address_filtered =
520 pport_stats->rx_address_filtered +
521 pport_stats->rx_vlan_filtered;
ac124ff9 522 drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
005d5696 523 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
ac124ff9
SP
524 drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
525 drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
005d5696 526 drvs->jabber_events = pport_stats->rx_jabbers;
ac124ff9
SP
527 drvs->forwarded_packets = pport_stats->num_forwards_lo;
528 drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
005d5696 529 drvs->rx_drops_too_many_frags =
ac124ff9 530 pport_stats->rx_drops_too_many_frags_lo;
005d5696 531}
89a88ab8 532
09c1c68f
SP
533static void accumulate_16bit_val(u32 *acc, u16 val)
534{
535#define lo(x) (x & 0xFFFF)
536#define hi(x) (x & 0xFFFF0000)
537 bool wrapped = val < lo(*acc);
538 u32 newacc = hi(*acc) + val;
539
540 if (wrapped)
541 newacc += 65536;
542 ACCESS_ONCE(*acc) = newacc;
543}
544
4188e7df 545static void populate_erx_stats(struct be_adapter *adapter,
748b539a 546 struct be_rx_obj *rxo, u32 erx_stat)
a6c578ef
AK
547{
548 if (!BEx_chip(adapter))
549 rx_stats(rxo)->rx_drops_no_frags = erx_stat;
550 else
551 /* below erx HW counter can actually wrap around after
552 * 65535. Driver accumulates a 32-bit value
553 */
554 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
555 (u16)erx_stat);
556}
557
89a88ab8
AK
558void be_parse_stats(struct be_adapter *adapter)
559{
61000861 560 struct be_erx_stats_v2 *erx = be_erx_stats_from_cmd(adapter);
ac124ff9
SP
561 struct be_rx_obj *rxo;
562 int i;
a6c578ef 563 u32 erx_stat;
ac124ff9 564
ca34fe38
SP
565 if (lancer_chip(adapter)) {
566 populate_lancer_stats(adapter);
005d5696 567 } else {
ca34fe38
SP
568 if (BE2_chip(adapter))
569 populate_be_v0_stats(adapter);
61000861
AK
570 else if (BE3_chip(adapter))
571 /* for BE3 */
ca34fe38 572 populate_be_v1_stats(adapter);
61000861
AK
573 else
574 populate_be_v2_stats(adapter);
d51ebd33 575
61000861 576 /* erx_v2 is longer than v0, v1. use v2 for v0, v1 access */
ca34fe38 577 for_all_rx_queues(adapter, rxo, i) {
a6c578ef
AK
578 erx_stat = erx->rx_drops_no_fragments[rxo->q.id];
579 populate_erx_stats(adapter, rxo, erx_stat);
ca34fe38 580 }
09c1c68f 581 }
89a88ab8
AK
582}
583
ab1594e9 584static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
748b539a 585 struct rtnl_link_stats64 *stats)
6b7c5b94 586{
ab1594e9 587 struct be_adapter *adapter = netdev_priv(netdev);
89a88ab8 588 struct be_drv_stats *drvs = &adapter->drv_stats;
3abcdeda 589 struct be_rx_obj *rxo;
3c8def97 590 struct be_tx_obj *txo;
ab1594e9
SP
591 u64 pkts, bytes;
592 unsigned int start;
3abcdeda 593 int i;
6b7c5b94 594
3abcdeda 595 for_all_rx_queues(adapter, rxo, i) {
ab1594e9 596 const struct be_rx_stats *rx_stats = rx_stats(rxo);
03d28ffe 597
ab1594e9 598 do {
57a7744e 599 start = u64_stats_fetch_begin_irq(&rx_stats->sync);
ab1594e9
SP
600 pkts = rx_stats(rxo)->rx_pkts;
601 bytes = rx_stats(rxo)->rx_bytes;
57a7744e 602 } while (u64_stats_fetch_retry_irq(&rx_stats->sync, start));
ab1594e9
SP
603 stats->rx_packets += pkts;
604 stats->rx_bytes += bytes;
605 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
606 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
607 rx_stats(rxo)->rx_drops_no_frags;
3abcdeda
SP
608 }
609
3c8def97 610 for_all_tx_queues(adapter, txo, i) {
ab1594e9 611 const struct be_tx_stats *tx_stats = tx_stats(txo);
03d28ffe 612
ab1594e9 613 do {
57a7744e 614 start = u64_stats_fetch_begin_irq(&tx_stats->sync);
ab1594e9
SP
615 pkts = tx_stats(txo)->tx_pkts;
616 bytes = tx_stats(txo)->tx_bytes;
57a7744e 617 } while (u64_stats_fetch_retry_irq(&tx_stats->sync, start));
ab1594e9
SP
618 stats->tx_packets += pkts;
619 stats->tx_bytes += bytes;
3c8def97 620 }
6b7c5b94
SP
621
622 /* bad pkts received */
ab1594e9 623 stats->rx_errors = drvs->rx_crc_errors +
89a88ab8
AK
624 drvs->rx_alignment_symbol_errors +
625 drvs->rx_in_range_errors +
626 drvs->rx_out_range_errors +
627 drvs->rx_frame_too_long +
628 drvs->rx_dropped_too_small +
629 drvs->rx_dropped_too_short +
630 drvs->rx_dropped_header_too_small +
631 drvs->rx_dropped_tcp_length +
ab1594e9 632 drvs->rx_dropped_runt;
68110868 633
6b7c5b94 634 /* detailed rx errors */
ab1594e9 635 stats->rx_length_errors = drvs->rx_in_range_errors +
89a88ab8
AK
636 drvs->rx_out_range_errors +
637 drvs->rx_frame_too_long;
68110868 638
ab1594e9 639 stats->rx_crc_errors = drvs->rx_crc_errors;
6b7c5b94
SP
640
641 /* frame alignment errors */
ab1594e9 642 stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
68110868 643
6b7c5b94
SP
644 /* receiver fifo overrun */
645 /* drops_no_pbuf is no per i/f, it's per BE card */
ab1594e9 646 stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
89a88ab8
AK
647 drvs->rx_input_fifo_overflow_drop +
648 drvs->rx_drops_no_pbuf;
ab1594e9 649 return stats;
6b7c5b94
SP
650}
651
b236916a 652void be_link_status_update(struct be_adapter *adapter, u8 link_status)
6b7c5b94 653{
6b7c5b94
SP
654 struct net_device *netdev = adapter->netdev;
655
b236916a 656 if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
ea172a01 657 netif_carrier_off(netdev);
b236916a 658 adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
6b7c5b94 659 }
b236916a 660
bdce2ad7 661 if (link_status)
b236916a
AK
662 netif_carrier_on(netdev);
663 else
664 netif_carrier_off(netdev);
6b7c5b94
SP
665}
666
5f07b3c5 667static void be_tx_stats_update(struct be_tx_obj *txo, struct sk_buff *skb)
6b7c5b94 668{
3c8def97
SP
669 struct be_tx_stats *stats = tx_stats(txo);
670
ab1594e9 671 u64_stats_update_begin(&stats->sync);
ac124ff9 672 stats->tx_reqs++;
5f07b3c5
SP
673 stats->tx_bytes += skb->len;
674 stats->tx_pkts += (skb_shinfo(skb)->gso_segs ? : 1);
ab1594e9 675 u64_stats_update_end(&stats->sync);
6b7c5b94
SP
676}
677
5f07b3c5
SP
678/* Returns number of WRBs needed for the skb */
679static u32 skb_wrb_cnt(struct sk_buff *skb)
6b7c5b94 680{
5f07b3c5
SP
681 /* +1 for the header wrb */
682 return 1 + (skb_headlen(skb) ? 1 : 0) + skb_shinfo(skb)->nr_frags;
6b7c5b94
SP
683}
684
685static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
686{
f986afcb
SP
687 wrb->frag_pa_hi = cpu_to_le32(upper_32_bits(addr));
688 wrb->frag_pa_lo = cpu_to_le32(lower_32_bits(addr));
689 wrb->frag_len = cpu_to_le32(len & ETH_WRB_FRAG_LEN_MASK);
690 wrb->rsvd0 = 0;
691}
692
693/* A dummy wrb is just all zeros. Using a separate routine for dummy-wrb
694 * to avoid the swap and shift/mask operations in wrb_fill().
695 */
696static inline void wrb_fill_dummy(struct be_eth_wrb *wrb)
697{
698 wrb->frag_pa_hi = 0;
699 wrb->frag_pa_lo = 0;
700 wrb->frag_len = 0;
89b1f496 701 wrb->rsvd0 = 0;
6b7c5b94
SP
702}
703
1ded132d 704static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
748b539a 705 struct sk_buff *skb)
1ded132d
AK
706{
707 u8 vlan_prio;
708 u16 vlan_tag;
709
df8a39de 710 vlan_tag = skb_vlan_tag_get(skb);
1ded132d
AK
711 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
712 /* If vlan priority provided by OS is NOT in available bmap */
713 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
714 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
715 adapter->recommended_prio;
716
717 return vlan_tag;
718}
719
c9c47142
SP
720/* Used only for IP tunnel packets */
721static u16 skb_inner_ip_proto(struct sk_buff *skb)
722{
723 return (inner_ip_hdr(skb)->version == 4) ?
724 inner_ip_hdr(skb)->protocol : inner_ipv6_hdr(skb)->nexthdr;
725}
726
727static u16 skb_ip_proto(struct sk_buff *skb)
728{
729 return (ip_hdr(skb)->version == 4) ?
730 ip_hdr(skb)->protocol : ipv6_hdr(skb)->nexthdr;
731}
732
cf5671e6
SB
733static inline bool be_is_txq_full(struct be_tx_obj *txo)
734{
735 return atomic_read(&txo->q.used) + BE_MAX_TX_FRAG_COUNT >= txo->q.len;
736}
737
738static inline bool be_can_txq_wake(struct be_tx_obj *txo)
739{
740 return atomic_read(&txo->q.used) < txo->q.len / 2;
741}
742
743static inline bool be_is_tx_compl_pending(struct be_tx_obj *txo)
744{
745 return atomic_read(&txo->q.used) > txo->pend_wrb_cnt;
746}
747
804abcdb
SB
748static void be_get_wrb_params_from_skb(struct be_adapter *adapter,
749 struct sk_buff *skb,
750 struct be_wrb_params *wrb_params)
6b7c5b94 751{
804abcdb 752 u16 proto;
6b7c5b94 753
49e4b847 754 if (skb_is_gso(skb)) {
804abcdb
SB
755 BE_WRB_F_SET(wrb_params->features, LSO, 1);
756 wrb_params->lso_mss = skb_shinfo(skb)->gso_size;
fe6d2a38 757 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
804abcdb 758 BE_WRB_F_SET(wrb_params->features, LSO6, 1);
6b7c5b94 759 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
c9c47142 760 if (skb->encapsulation) {
804abcdb 761 BE_WRB_F_SET(wrb_params->features, IPCS, 1);
c9c47142
SP
762 proto = skb_inner_ip_proto(skb);
763 } else {
764 proto = skb_ip_proto(skb);
765 }
766 if (proto == IPPROTO_TCP)
804abcdb 767 BE_WRB_F_SET(wrb_params->features, TCPCS, 1);
c9c47142 768 else if (proto == IPPROTO_UDP)
804abcdb 769 BE_WRB_F_SET(wrb_params->features, UDPCS, 1);
6b7c5b94
SP
770 }
771
df8a39de 772 if (skb_vlan_tag_present(skb)) {
804abcdb
SB
773 BE_WRB_F_SET(wrb_params->features, VLAN, 1);
774 wrb_params->vlan_tag = be_get_tx_vlan_tag(adapter, skb);
6b7c5b94
SP
775 }
776
804abcdb
SB
777 BE_WRB_F_SET(wrb_params->features, CRC, 1);
778}
5f07b3c5 779
804abcdb
SB
780static void wrb_fill_hdr(struct be_adapter *adapter,
781 struct be_eth_hdr_wrb *hdr,
782 struct be_wrb_params *wrb_params,
783 struct sk_buff *skb)
784{
785 memset(hdr, 0, sizeof(*hdr));
786
787 SET_TX_WRB_HDR_BITS(crc, hdr,
788 BE_WRB_F_GET(wrb_params->features, CRC));
789 SET_TX_WRB_HDR_BITS(ipcs, hdr,
790 BE_WRB_F_GET(wrb_params->features, IPCS));
791 SET_TX_WRB_HDR_BITS(tcpcs, hdr,
792 BE_WRB_F_GET(wrb_params->features, TCPCS));
793 SET_TX_WRB_HDR_BITS(udpcs, hdr,
794 BE_WRB_F_GET(wrb_params->features, UDPCS));
795
796 SET_TX_WRB_HDR_BITS(lso, hdr,
797 BE_WRB_F_GET(wrb_params->features, LSO));
798 SET_TX_WRB_HDR_BITS(lso6, hdr,
799 BE_WRB_F_GET(wrb_params->features, LSO6));
800 SET_TX_WRB_HDR_BITS(lso_mss, hdr, wrb_params->lso_mss);
801
802 /* Hack to skip HW VLAN tagging needs evt = 1, compl = 0. When this
803 * hack is not needed, the evt bit is set while ringing DB.
5f07b3c5 804 */
804abcdb
SB
805 SET_TX_WRB_HDR_BITS(event, hdr,
806 BE_WRB_F_GET(wrb_params->features, VLAN_SKIP_HW));
807 SET_TX_WRB_HDR_BITS(vlan, hdr,
808 BE_WRB_F_GET(wrb_params->features, VLAN));
809 SET_TX_WRB_HDR_BITS(vlan_tag, hdr, wrb_params->vlan_tag);
810
811 SET_TX_WRB_HDR_BITS(num_wrb, hdr, skb_wrb_cnt(skb));
812 SET_TX_WRB_HDR_BITS(len, hdr, skb->len);
6b7c5b94
SP
813}
814
2b7bcebf 815static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
748b539a 816 bool unmap_single)
7101e111
SP
817{
818 dma_addr_t dma;
f986afcb 819 u32 frag_len = le32_to_cpu(wrb->frag_len);
7101e111 820
7101e111 821
f986afcb
SP
822 dma = (u64)le32_to_cpu(wrb->frag_pa_hi) << 32 |
823 (u64)le32_to_cpu(wrb->frag_pa_lo);
824 if (frag_len) {
7101e111 825 if (unmap_single)
f986afcb 826 dma_unmap_single(dev, dma, frag_len, DMA_TO_DEVICE);
7101e111 827 else
f986afcb 828 dma_unmap_page(dev, dma, frag_len, DMA_TO_DEVICE);
7101e111
SP
829 }
830}
6b7c5b94 831
79a0d7d8
SB
832/* Grab a WRB header for xmit */
833static u16 be_tx_get_wrb_hdr(struct be_tx_obj *txo)
834{
835 u16 head = txo->q.head;
836
837 queue_head_inc(&txo->q);
838 return head;
839}
840
841/* Set up the WRB header for xmit */
842static void be_tx_setup_wrb_hdr(struct be_adapter *adapter,
843 struct be_tx_obj *txo,
844 struct be_wrb_params *wrb_params,
845 struct sk_buff *skb, u16 head)
846{
847 u32 num_frags = skb_wrb_cnt(skb);
848 struct be_queue_info *txq = &txo->q;
849 struct be_eth_hdr_wrb *hdr = queue_index_node(txq, head);
850
851 wrb_fill_hdr(adapter, hdr, wrb_params, skb);
852 be_dws_cpu_to_le(hdr, sizeof(*hdr));
853
854 BUG_ON(txo->sent_skb_list[head]);
855 txo->sent_skb_list[head] = skb;
856 txo->last_req_hdr = head;
857 atomic_add(num_frags, &txq->used);
858 txo->last_req_wrb_cnt = num_frags;
859 txo->pend_wrb_cnt += num_frags;
860}
861
862/* Setup a WRB fragment (buffer descriptor) for xmit */
863static void be_tx_setup_wrb_frag(struct be_tx_obj *txo, dma_addr_t busaddr,
864 int len)
865{
866 struct be_eth_wrb *wrb;
867 struct be_queue_info *txq = &txo->q;
868
869 wrb = queue_head_node(txq);
870 wrb_fill(wrb, busaddr, len);
871 queue_head_inc(txq);
872}
873
874/* Bring the queue back to the state it was in before be_xmit_enqueue() routine
875 * was invoked. The producer index is restored to the previous packet and the
876 * WRBs of the current packet are unmapped. Invoked to handle tx setup errors.
877 */
878static void be_xmit_restore(struct be_adapter *adapter,
879 struct be_tx_obj *txo, u16 head, bool map_single,
880 u32 copied)
881{
882 struct device *dev;
883 struct be_eth_wrb *wrb;
884 struct be_queue_info *txq = &txo->q;
885
886 dev = &adapter->pdev->dev;
887 txq->head = head;
888
889 /* skip the first wrb (hdr); it's not mapped */
890 queue_head_inc(txq);
891 while (copied) {
892 wrb = queue_head_node(txq);
893 unmap_tx_frag(dev, wrb, map_single);
894 map_single = false;
895 copied -= le32_to_cpu(wrb->frag_len);
896 queue_head_inc(txq);
897 }
898
899 txq->head = head;
900}
901
902/* Enqueue the given packet for transmit. This routine allocates WRBs for the
903 * packet, dma maps the packet buffers and sets up the WRBs. Returns the number
904 * of WRBs used up by the packet.
905 */
5f07b3c5 906static u32 be_xmit_enqueue(struct be_adapter *adapter, struct be_tx_obj *txo,
804abcdb
SB
907 struct sk_buff *skb,
908 struct be_wrb_params *wrb_params)
6b7c5b94 909{
5f07b3c5 910 u32 i, copied = 0, wrb_cnt = skb_wrb_cnt(skb);
2b7bcebf 911 struct device *dev = &adapter->pdev->dev;
5f07b3c5 912 struct be_queue_info *txq = &txo->q;
7101e111 913 bool map_single = false;
5f07b3c5 914 u16 head = txq->head;
79a0d7d8
SB
915 dma_addr_t busaddr;
916 int len;
6b7c5b94 917
79a0d7d8 918 head = be_tx_get_wrb_hdr(txo);
6b7c5b94 919
ebc8d2ab 920 if (skb->len > skb->data_len) {
79a0d7d8 921 len = skb_headlen(skb);
03d28ffe 922
2b7bcebf
IV
923 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
924 if (dma_mapping_error(dev, busaddr))
7101e111
SP
925 goto dma_err;
926 map_single = true;
79a0d7d8 927 be_tx_setup_wrb_frag(txo, busaddr, len);
ebc8d2ab
DM
928 copied += len;
929 }
6b7c5b94 930
ebc8d2ab 931 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
748b539a 932 const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
79a0d7d8 933 len = skb_frag_size(frag);
03d28ffe 934
79a0d7d8 935 busaddr = skb_frag_dma_map(dev, frag, 0, len, DMA_TO_DEVICE);
2b7bcebf 936 if (dma_mapping_error(dev, busaddr))
7101e111 937 goto dma_err;
79a0d7d8
SB
938 be_tx_setup_wrb_frag(txo, busaddr, len);
939 copied += len;
6b7c5b94
SP
940 }
941
79a0d7d8 942 be_tx_setup_wrb_hdr(adapter, txo, wrb_params, skb, head);
6b7c5b94 943
5f07b3c5
SP
944 be_tx_stats_update(txo, skb);
945 return wrb_cnt;
6b7c5b94 946
7101e111 947dma_err:
79a0d7d8
SB
948 adapter->drv_stats.dma_map_errors++;
949 be_xmit_restore(adapter, txo, head, map_single, copied);
7101e111 950 return 0;
6b7c5b94
SP
951}
952
f7062ee5
SP
953static inline int qnq_async_evt_rcvd(struct be_adapter *adapter)
954{
955 return adapter->flags & BE_FLAGS_QNQ_ASYNC_EVT_RCVD;
956}
957
93040ae5 958static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
bc0c3405 959 struct sk_buff *skb,
804abcdb
SB
960 struct be_wrb_params
961 *wrb_params)
93040ae5
SK
962{
963 u16 vlan_tag = 0;
964
965 skb = skb_share_check(skb, GFP_ATOMIC);
966 if (unlikely(!skb))
967 return skb;
968
df8a39de 969 if (skb_vlan_tag_present(skb))
93040ae5 970 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
52fe29e4
SB
971
972 if (qnq_async_evt_rcvd(adapter) && adapter->pvid) {
973 if (!vlan_tag)
974 vlan_tag = adapter->pvid;
975 /* f/w workaround to set skip_hw_vlan = 1, informs the F/W to
976 * skip VLAN insertion
977 */
804abcdb 978 BE_WRB_F_SET(wrb_params->features, VLAN_SKIP_HW, 1);
52fe29e4 979 }
bc0c3405
AK
980
981 if (vlan_tag) {
62749e2c
JP
982 skb = vlan_insert_tag_set_proto(skb, htons(ETH_P_8021Q),
983 vlan_tag);
bc0c3405
AK
984 if (unlikely(!skb))
985 return skb;
bc0c3405
AK
986 skb->vlan_tci = 0;
987 }
988
989 /* Insert the outer VLAN, if any */
990 if (adapter->qnq_vid) {
991 vlan_tag = adapter->qnq_vid;
62749e2c
JP
992 skb = vlan_insert_tag_set_proto(skb, htons(ETH_P_8021Q),
993 vlan_tag);
bc0c3405
AK
994 if (unlikely(!skb))
995 return skb;
804abcdb 996 BE_WRB_F_SET(wrb_params->features, VLAN_SKIP_HW, 1);
bc0c3405
AK
997 }
998
93040ae5
SK
999 return skb;
1000}
1001
bc0c3405
AK
1002static bool be_ipv6_exthdr_check(struct sk_buff *skb)
1003{
1004 struct ethhdr *eh = (struct ethhdr *)skb->data;
1005 u16 offset = ETH_HLEN;
1006
1007 if (eh->h_proto == htons(ETH_P_IPV6)) {
1008 struct ipv6hdr *ip6h = (struct ipv6hdr *)(skb->data + offset);
1009
1010 offset += sizeof(struct ipv6hdr);
1011 if (ip6h->nexthdr != NEXTHDR_TCP &&
1012 ip6h->nexthdr != NEXTHDR_UDP) {
1013 struct ipv6_opt_hdr *ehdr =
504fbf1e 1014 (struct ipv6_opt_hdr *)(skb->data + offset);
bc0c3405
AK
1015
1016 /* offending pkt: 2nd byte following IPv6 hdr is 0xff */
1017 if (ehdr->hdrlen == 0xff)
1018 return true;
1019 }
1020 }
1021 return false;
1022}
1023
1024static int be_vlan_tag_tx_chk(struct be_adapter *adapter, struct sk_buff *skb)
1025{
df8a39de 1026 return skb_vlan_tag_present(skb) || adapter->pvid || adapter->qnq_vid;
bc0c3405
AK
1027}
1028
748b539a 1029static int be_ipv6_tx_stall_chk(struct be_adapter *adapter, struct sk_buff *skb)
bc0c3405 1030{
ee9c799c 1031 return BE3_chip(adapter) && be_ipv6_exthdr_check(skb);
bc0c3405
AK
1032}
1033
ec495fac
VV
1034static struct sk_buff *be_lancer_xmit_workarounds(struct be_adapter *adapter,
1035 struct sk_buff *skb,
804abcdb
SB
1036 struct be_wrb_params
1037 *wrb_params)
6b7c5b94 1038{
d2cb6ce7 1039 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
ee9c799c
SP
1040 unsigned int eth_hdr_len;
1041 struct iphdr *ip;
93040ae5 1042
1297f9db
AK
1043 /* For padded packets, BE HW modifies tot_len field in IP header
1044 * incorrecly when VLAN tag is inserted by HW.
3904dcc4 1045 * For padded packets, Lancer computes incorrect checksum.
1ded132d 1046 */
ee9c799c
SP
1047 eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
1048 VLAN_ETH_HLEN : ETH_HLEN;
3904dcc4 1049 if (skb->len <= 60 &&
df8a39de 1050 (lancer_chip(adapter) || skb_vlan_tag_present(skb)) &&
ee9c799c 1051 is_ipv4_pkt(skb)) {
93040ae5
SK
1052 ip = (struct iphdr *)ip_hdr(skb);
1053 pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len));
1054 }
1ded132d 1055
d2cb6ce7 1056 /* If vlan tag is already inlined in the packet, skip HW VLAN
f93f160b 1057 * tagging in pvid-tagging mode
d2cb6ce7 1058 */
f93f160b 1059 if (be_pvid_tagging_enabled(adapter) &&
d2cb6ce7 1060 veh->h_vlan_proto == htons(ETH_P_8021Q))
804abcdb 1061 BE_WRB_F_SET(wrb_params->features, VLAN_SKIP_HW, 1);
d2cb6ce7 1062
93040ae5
SK
1063 /* HW has a bug wherein it will calculate CSUM for VLAN
1064 * pkts even though it is disabled.
1065 * Manually insert VLAN in pkt.
1066 */
1067 if (skb->ip_summed != CHECKSUM_PARTIAL &&
df8a39de 1068 skb_vlan_tag_present(skb)) {
804abcdb 1069 skb = be_insert_vlan_in_pkt(adapter, skb, wrb_params);
bc0c3405 1070 if (unlikely(!skb))
c9128951 1071 goto err;
bc0c3405
AK
1072 }
1073
1074 /* HW may lockup when VLAN HW tagging is requested on
1075 * certain ipv6 packets. Drop such pkts if the HW workaround to
1076 * skip HW tagging is not enabled by FW.
1077 */
1078 if (unlikely(be_ipv6_tx_stall_chk(adapter, skb) &&
cd3307aa
KA
1079 (adapter->pvid || adapter->qnq_vid) &&
1080 !qnq_async_evt_rcvd(adapter)))
bc0c3405
AK
1081 goto tx_drop;
1082
1083 /* Manual VLAN tag insertion to prevent:
1084 * ASIC lockup when the ASIC inserts VLAN tag into
1085 * certain ipv6 packets. Insert VLAN tags in driver,
1086 * and set event, completion, vlan bits accordingly
1087 * in the Tx WRB.
1088 */
1089 if (be_ipv6_tx_stall_chk(adapter, skb) &&
1090 be_vlan_tag_tx_chk(adapter, skb)) {
804abcdb 1091 skb = be_insert_vlan_in_pkt(adapter, skb, wrb_params);
1ded132d 1092 if (unlikely(!skb))
c9128951 1093 goto err;
1ded132d
AK
1094 }
1095
ee9c799c
SP
1096 return skb;
1097tx_drop:
1098 dev_kfree_skb_any(skb);
c9128951 1099err:
ee9c799c
SP
1100 return NULL;
1101}
1102
ec495fac
VV
1103static struct sk_buff *be_xmit_workarounds(struct be_adapter *adapter,
1104 struct sk_buff *skb,
804abcdb 1105 struct be_wrb_params *wrb_params)
ec495fac
VV
1106{
1107 /* Lancer, SH-R ASICs have a bug wherein Packets that are 32 bytes or
1108 * less may cause a transmit stall on that port. So the work-around is
1109 * to pad short packets (<= 32 bytes) to a 36-byte length.
1110 */
1111 if (unlikely(!BEx_chip(adapter) && skb->len <= 32)) {
74b6939d 1112 if (skb_put_padto(skb, 36))
ec495fac 1113 return NULL;
ec495fac
VV
1114 }
1115
1116 if (BEx_chip(adapter) || lancer_chip(adapter)) {
804abcdb 1117 skb = be_lancer_xmit_workarounds(adapter, skb, wrb_params);
ec495fac
VV
1118 if (!skb)
1119 return NULL;
1120 }
1121
1122 return skb;
1123}
1124
5f07b3c5
SP
1125static void be_xmit_flush(struct be_adapter *adapter, struct be_tx_obj *txo)
1126{
1127 struct be_queue_info *txq = &txo->q;
1128 struct be_eth_hdr_wrb *hdr = queue_index_node(txq, txo->last_req_hdr);
1129
1130 /* Mark the last request eventable if it hasn't been marked already */
1131 if (!(hdr->dw[2] & cpu_to_le32(TX_HDR_WRB_EVT)))
1132 hdr->dw[2] |= cpu_to_le32(TX_HDR_WRB_EVT | TX_HDR_WRB_COMPL);
1133
1134 /* compose a dummy wrb if there are odd set of wrbs to notify */
1135 if (!lancer_chip(adapter) && (txo->pend_wrb_cnt & 1)) {
f986afcb 1136 wrb_fill_dummy(queue_head_node(txq));
5f07b3c5
SP
1137 queue_head_inc(txq);
1138 atomic_inc(&txq->used);
1139 txo->pend_wrb_cnt++;
1140 hdr->dw[2] &= ~cpu_to_le32(TX_HDR_WRB_NUM_MASK <<
1141 TX_HDR_WRB_NUM_SHIFT);
1142 hdr->dw[2] |= cpu_to_le32((txo->last_req_wrb_cnt + 1) <<
1143 TX_HDR_WRB_NUM_SHIFT);
1144 }
1145 be_txq_notify(adapter, txo, txo->pend_wrb_cnt);
1146 txo->pend_wrb_cnt = 0;
1147}
1148
ee9c799c
SP
1149static netdev_tx_t be_xmit(struct sk_buff *skb, struct net_device *netdev)
1150{
1151 struct be_adapter *adapter = netdev_priv(netdev);
5f07b3c5
SP
1152 u16 q_idx = skb_get_queue_mapping(skb);
1153 struct be_tx_obj *txo = &adapter->tx_obj[q_idx];
804abcdb 1154 struct be_wrb_params wrb_params = { 0 };
804abcdb 1155 bool flush = !skb->xmit_more;
5f07b3c5 1156 u16 wrb_cnt;
ee9c799c 1157
804abcdb 1158 skb = be_xmit_workarounds(adapter, skb, &wrb_params);
5f07b3c5
SP
1159 if (unlikely(!skb))
1160 goto drop;
6b7c5b94 1161
804abcdb
SB
1162 be_get_wrb_params_from_skb(adapter, skb, &wrb_params);
1163
1164 wrb_cnt = be_xmit_enqueue(adapter, txo, skb, &wrb_params);
5f07b3c5
SP
1165 if (unlikely(!wrb_cnt)) {
1166 dev_kfree_skb_any(skb);
1167 goto drop;
1168 }
cd8f76c0 1169
cf5671e6 1170 if (be_is_txq_full(txo)) {
5f07b3c5
SP
1171 netif_stop_subqueue(netdev, q_idx);
1172 tx_stats(txo)->tx_stops++;
1173 }
c190e3c8 1174
5f07b3c5
SP
1175 if (flush || __netif_subqueue_stopped(netdev, q_idx))
1176 be_xmit_flush(adapter, txo);
6b7c5b94 1177
5f07b3c5
SP
1178 return NETDEV_TX_OK;
1179drop:
1180 tx_stats(txo)->tx_drv_drops++;
1181 /* Flush the already enqueued tx requests */
1182 if (flush && txo->pend_wrb_cnt)
1183 be_xmit_flush(adapter, txo);
6b7c5b94 1184
6b7c5b94
SP
1185 return NETDEV_TX_OK;
1186}
1187
1188static int be_change_mtu(struct net_device *netdev, int new_mtu)
1189{
1190 struct be_adapter *adapter = netdev_priv(netdev);
0d3f5cce
KA
1191 struct device *dev = &adapter->pdev->dev;
1192
1193 if (new_mtu < BE_MIN_MTU || new_mtu > BE_MAX_MTU) {
1194 dev_info(dev, "MTU must be between %d and %d bytes\n",
1195 BE_MIN_MTU, BE_MAX_MTU);
6b7c5b94
SP
1196 return -EINVAL;
1197 }
0d3f5cce
KA
1198
1199 dev_info(dev, "MTU changed from %d to %d bytes\n",
748b539a 1200 netdev->mtu, new_mtu);
6b7c5b94
SP
1201 netdev->mtu = new_mtu;
1202 return 0;
1203}
1204
f66b7cfd
SP
1205static inline bool be_in_all_promisc(struct be_adapter *adapter)
1206{
1207 return (adapter->if_flags & BE_IF_FLAGS_ALL_PROMISCUOUS) ==
1208 BE_IF_FLAGS_ALL_PROMISCUOUS;
1209}
1210
1211static int be_set_vlan_promisc(struct be_adapter *adapter)
1212{
1213 struct device *dev = &adapter->pdev->dev;
1214 int status;
1215
1216 if (adapter->if_flags & BE_IF_FLAGS_VLAN_PROMISCUOUS)
1217 return 0;
1218
1219 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_VLAN_PROMISCUOUS, ON);
1220 if (!status) {
1221 dev_info(dev, "Enabled VLAN promiscuous mode\n");
1222 adapter->if_flags |= BE_IF_FLAGS_VLAN_PROMISCUOUS;
1223 } else {
1224 dev_err(dev, "Failed to enable VLAN promiscuous mode\n");
1225 }
1226 return status;
1227}
1228
1229static int be_clear_vlan_promisc(struct be_adapter *adapter)
1230{
1231 struct device *dev = &adapter->pdev->dev;
1232 int status;
1233
1234 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_VLAN_PROMISCUOUS, OFF);
1235 if (!status) {
1236 dev_info(dev, "Disabling VLAN promiscuous mode\n");
1237 adapter->if_flags &= ~BE_IF_FLAGS_VLAN_PROMISCUOUS;
1238 }
1239 return status;
1240}
1241
6b7c5b94 1242/*
82903e4b
AK
1243 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
1244 * If the user configures more, place BE in vlan promiscuous mode.
6b7c5b94 1245 */
10329df8 1246static int be_vid_config(struct be_adapter *adapter)
6b7c5b94 1247{
50762667 1248 struct device *dev = &adapter->pdev->dev;
10329df8 1249 u16 vids[BE_NUM_VLANS_SUPPORTED];
f6cbd364 1250 u16 num = 0, i = 0;
82903e4b 1251 int status = 0;
1da87b7f 1252
c0e64ef4 1253 /* No need to further configure vids if in promiscuous mode */
f66b7cfd 1254 if (be_in_all_promisc(adapter))
c0e64ef4
SP
1255 return 0;
1256
92bf14ab 1257 if (adapter->vlans_added > be_max_vlans(adapter))
f66b7cfd 1258 return be_set_vlan_promisc(adapter);
0fc16ebf
PR
1259
1260 /* Construct VLAN Table to give to HW */
f6cbd364
RN
1261 for_each_set_bit(i, adapter->vids, VLAN_N_VID)
1262 vids[num++] = cpu_to_le16(i);
0fc16ebf 1263
435452aa 1264 status = be_cmd_vlan_config(adapter, adapter->if_handle, vids, num, 0);
0fc16ebf 1265 if (status) {
f66b7cfd 1266 dev_err(dev, "Setting HW VLAN filtering failed\n");
d9d604f8 1267 /* Set to VLAN promisc mode as setting VLAN filter failed */
4c60005f
KA
1268 if (addl_status(status) ==
1269 MCC_ADDL_STATUS_INSUFFICIENT_RESOURCES)
f66b7cfd
SP
1270 return be_set_vlan_promisc(adapter);
1271 } else if (adapter->if_flags & BE_IF_FLAGS_VLAN_PROMISCUOUS) {
1272 status = be_clear_vlan_promisc(adapter);
6b7c5b94 1273 }
0fc16ebf 1274 return status;
6b7c5b94
SP
1275}
1276
80d5c368 1277static int be_vlan_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
6b7c5b94
SP
1278{
1279 struct be_adapter *adapter = netdev_priv(netdev);
80817cbf 1280 int status = 0;
6b7c5b94 1281
a85e9986
PR
1282 /* Packets with VID 0 are always received by Lancer by default */
1283 if (lancer_chip(adapter) && vid == 0)
48291c22
VV
1284 return status;
1285
f6cbd364 1286 if (test_bit(vid, adapter->vids))
48291c22 1287 return status;
a85e9986 1288
f6cbd364 1289 set_bit(vid, adapter->vids);
a6b74e01 1290 adapter->vlans_added++;
8e586137 1291
a6b74e01
SK
1292 status = be_vid_config(adapter);
1293 if (status) {
1294 adapter->vlans_added--;
f6cbd364 1295 clear_bit(vid, adapter->vids);
a6b74e01 1296 }
48291c22 1297
80817cbf 1298 return status;
6b7c5b94
SP
1299}
1300
80d5c368 1301static int be_vlan_rem_vid(struct net_device *netdev, __be16 proto, u16 vid)
6b7c5b94
SP
1302{
1303 struct be_adapter *adapter = netdev_priv(netdev);
1304
a85e9986
PR
1305 /* Packets with VID 0 are always received by Lancer by default */
1306 if (lancer_chip(adapter) && vid == 0)
9d4dfe4a 1307 return 0;
a85e9986 1308
f6cbd364 1309 clear_bit(vid, adapter->vids);
9d4dfe4a
KA
1310 adapter->vlans_added--;
1311
1312 return be_vid_config(adapter);
6b7c5b94
SP
1313}
1314
f66b7cfd 1315static void be_clear_all_promisc(struct be_adapter *adapter)
7ad09458 1316{
ac34b743 1317 be_cmd_rx_filter(adapter, BE_IF_FLAGS_ALL_PROMISCUOUS, OFF);
f66b7cfd 1318 adapter->if_flags &= ~BE_IF_FLAGS_ALL_PROMISCUOUS;
7ad09458
S
1319}
1320
f66b7cfd
SP
1321static void be_set_all_promisc(struct be_adapter *adapter)
1322{
1323 be_cmd_rx_filter(adapter, BE_IF_FLAGS_ALL_PROMISCUOUS, ON);
1324 adapter->if_flags |= BE_IF_FLAGS_ALL_PROMISCUOUS;
1325}
1326
1327static void be_set_mc_promisc(struct be_adapter *adapter)
6b7c5b94 1328{
0fc16ebf 1329 int status;
6b7c5b94 1330
f66b7cfd
SP
1331 if (adapter->if_flags & BE_IF_FLAGS_MCAST_PROMISCUOUS)
1332 return;
6b7c5b94 1333
f66b7cfd
SP
1334 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_MCAST_PROMISCUOUS, ON);
1335 if (!status)
1336 adapter->if_flags |= BE_IF_FLAGS_MCAST_PROMISCUOUS;
1337}
1338
1339static void be_set_mc_list(struct be_adapter *adapter)
1340{
1341 int status;
1342
1343 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_MULTICAST, ON);
1344 if (!status)
1345 adapter->if_flags &= ~BE_IF_FLAGS_MCAST_PROMISCUOUS;
1346 else
1347 be_set_mc_promisc(adapter);
1348}
1349
1350static void be_set_uc_list(struct be_adapter *adapter)
1351{
1352 struct netdev_hw_addr *ha;
1353 int i = 1; /* First slot is claimed by the Primary MAC */
1354
1355 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++)
1356 be_cmd_pmac_del(adapter, adapter->if_handle,
1357 adapter->pmac_id[i], 0);
1358
1359 if (netdev_uc_count(adapter->netdev) > be_max_uc(adapter)) {
1360 be_set_all_promisc(adapter);
1361 return;
6b7c5b94
SP
1362 }
1363
f66b7cfd
SP
1364 netdev_for_each_uc_addr(ha, adapter->netdev) {
1365 adapter->uc_macs++; /* First slot is for Primary MAC */
1366 be_cmd_pmac_add(adapter, (u8 *)ha->addr, adapter->if_handle,
1367 &adapter->pmac_id[adapter->uc_macs], 0);
1368 }
1369}
6b7c5b94 1370
f66b7cfd
SP
1371static void be_clear_uc_list(struct be_adapter *adapter)
1372{
1373 int i;
fbc13f01 1374
f66b7cfd
SP
1375 for (i = 1; i < (adapter->uc_macs + 1); i++)
1376 be_cmd_pmac_del(adapter, adapter->if_handle,
1377 adapter->pmac_id[i], 0);
1378 adapter->uc_macs = 0;
1379}
fbc13f01 1380
f66b7cfd
SP
1381static void be_set_rx_mode(struct net_device *netdev)
1382{
1383 struct be_adapter *adapter = netdev_priv(netdev);
fbc13f01 1384
f66b7cfd
SP
1385 if (netdev->flags & IFF_PROMISC) {
1386 be_set_all_promisc(adapter);
1387 return;
fbc13f01
AK
1388 }
1389
f66b7cfd
SP
1390 /* Interface was previously in promiscuous mode; disable it */
1391 if (be_in_all_promisc(adapter)) {
1392 be_clear_all_promisc(adapter);
1393 if (adapter->vlans_added)
1394 be_vid_config(adapter);
0fc16ebf 1395 }
a0794885 1396
f66b7cfd
SP
1397 /* Enable multicast promisc if num configured exceeds what we support */
1398 if (netdev->flags & IFF_ALLMULTI ||
1399 netdev_mc_count(netdev) > be_max_mc(adapter)) {
1400 be_set_mc_promisc(adapter);
a0794885 1401 return;
f66b7cfd 1402 }
a0794885 1403
f66b7cfd
SP
1404 if (netdev_uc_count(netdev) != adapter->uc_macs)
1405 be_set_uc_list(adapter);
1406
1407 be_set_mc_list(adapter);
6b7c5b94
SP
1408}
1409
ba343c77
SB
1410static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
1411{
1412 struct be_adapter *adapter = netdev_priv(netdev);
11ac75ed 1413 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
ba343c77
SB
1414 int status;
1415
11ac75ed 1416 if (!sriov_enabled(adapter))
ba343c77
SB
1417 return -EPERM;
1418
11ac75ed 1419 if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
ba343c77
SB
1420 return -EINVAL;
1421
3c31aaf3
VV
1422 /* Proceed further only if user provided MAC is different
1423 * from active MAC
1424 */
1425 if (ether_addr_equal(mac, vf_cfg->mac_addr))
1426 return 0;
1427
3175d8c2
SP
1428 if (BEx_chip(adapter)) {
1429 be_cmd_pmac_del(adapter, vf_cfg->if_handle, vf_cfg->pmac_id,
1430 vf + 1);
ba343c77 1431
11ac75ed
SP
1432 status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
1433 &vf_cfg->pmac_id, vf + 1);
3175d8c2
SP
1434 } else {
1435 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
1436 vf + 1);
590c391d
PR
1437 }
1438
abccf23e
KA
1439 if (status) {
1440 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed: %#x",
1441 mac, vf, status);
1442 return be_cmd_status(status);
1443 }
64600ea5 1444
abccf23e
KA
1445 ether_addr_copy(vf_cfg->mac_addr, mac);
1446
1447 return 0;
ba343c77
SB
1448}
1449
64600ea5 1450static int be_get_vf_config(struct net_device *netdev, int vf,
748b539a 1451 struct ifla_vf_info *vi)
64600ea5
AK
1452{
1453 struct be_adapter *adapter = netdev_priv(netdev);
11ac75ed 1454 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
64600ea5 1455
11ac75ed 1456 if (!sriov_enabled(adapter))
64600ea5
AK
1457 return -EPERM;
1458
11ac75ed 1459 if (vf >= adapter->num_vfs)
64600ea5
AK
1460 return -EINVAL;
1461
1462 vi->vf = vf;
ed616689
SC
1463 vi->max_tx_rate = vf_cfg->tx_rate;
1464 vi->min_tx_rate = 0;
a60b3a13
AK
1465 vi->vlan = vf_cfg->vlan_tag & VLAN_VID_MASK;
1466 vi->qos = vf_cfg->vlan_tag >> VLAN_PRIO_SHIFT;
11ac75ed 1467 memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
bdce2ad7 1468 vi->linkstate = adapter->vf_cfg[vf].plink_tracking;
64600ea5
AK
1469
1470 return 0;
1471}
1472
435452aa
VV
1473static int be_set_vf_tvt(struct be_adapter *adapter, int vf, u16 vlan)
1474{
1475 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1476 u16 vids[BE_NUM_VLANS_SUPPORTED];
1477 int vf_if_id = vf_cfg->if_handle;
1478 int status;
1479
1480 /* Enable Transparent VLAN Tagging */
1481 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1, vf_if_id, 0);
1482 if (status)
1483 return status;
1484
1485 /* Clear pre-programmed VLAN filters on VF if any, if TVT is enabled */
1486 vids[0] = 0;
1487 status = be_cmd_vlan_config(adapter, vf_if_id, vids, 1, vf + 1);
1488 if (!status)
1489 dev_info(&adapter->pdev->dev,
1490 "Cleared guest VLANs on VF%d", vf);
1491
1492 /* After TVT is enabled, disallow VFs to program VLAN filters */
1493 if (vf_cfg->privileges & BE_PRIV_FILTMGMT) {
1494 status = be_cmd_set_fn_privileges(adapter, vf_cfg->privileges &
1495 ~BE_PRIV_FILTMGMT, vf + 1);
1496 if (!status)
1497 vf_cfg->privileges &= ~BE_PRIV_FILTMGMT;
1498 }
1499 return 0;
1500}
1501
1502static int be_clear_vf_tvt(struct be_adapter *adapter, int vf)
1503{
1504 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1505 struct device *dev = &adapter->pdev->dev;
1506 int status;
1507
1508 /* Reset Transparent VLAN Tagging. */
1509 status = be_cmd_set_hsw_config(adapter, BE_RESET_VLAN_TAG_ID, vf + 1,
1510 vf_cfg->if_handle, 0);
1511 if (status)
1512 return status;
1513
1514 /* Allow VFs to program VLAN filtering */
1515 if (!(vf_cfg->privileges & BE_PRIV_FILTMGMT)) {
1516 status = be_cmd_set_fn_privileges(adapter, vf_cfg->privileges |
1517 BE_PRIV_FILTMGMT, vf + 1);
1518 if (!status) {
1519 vf_cfg->privileges |= BE_PRIV_FILTMGMT;
1520 dev_info(dev, "VF%d: FILTMGMT priv enabled", vf);
1521 }
1522 }
1523
1524 dev_info(dev,
1525 "Disable/re-enable i/f in VM to clear Transparent VLAN tag");
1526 return 0;
1527}
1528
748b539a 1529static int be_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos)
1da87b7f
AK
1530{
1531 struct be_adapter *adapter = netdev_priv(netdev);
b9fc0e53 1532 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
435452aa 1533 int status;
1da87b7f 1534
11ac75ed 1535 if (!sriov_enabled(adapter))
1da87b7f
AK
1536 return -EPERM;
1537
b9fc0e53 1538 if (vf >= adapter->num_vfs || vlan > 4095 || qos > 7)
1da87b7f
AK
1539 return -EINVAL;
1540
b9fc0e53
AK
1541 if (vlan || qos) {
1542 vlan |= qos << VLAN_PRIO_SHIFT;
435452aa 1543 status = be_set_vf_tvt(adapter, vf, vlan);
1da87b7f 1544 } else {
435452aa 1545 status = be_clear_vf_tvt(adapter, vf);
1da87b7f
AK
1546 }
1547
abccf23e
KA
1548 if (status) {
1549 dev_err(&adapter->pdev->dev,
435452aa
VV
1550 "VLAN %d config on VF %d failed : %#x\n", vlan, vf,
1551 status);
abccf23e
KA
1552 return be_cmd_status(status);
1553 }
1554
1555 vf_cfg->vlan_tag = vlan;
abccf23e 1556 return 0;
1da87b7f
AK
1557}
1558
ed616689
SC
1559static int be_set_vf_tx_rate(struct net_device *netdev, int vf,
1560 int min_tx_rate, int max_tx_rate)
e1d18735
AK
1561{
1562 struct be_adapter *adapter = netdev_priv(netdev);
0f77ba73
RN
1563 struct device *dev = &adapter->pdev->dev;
1564 int percent_rate, status = 0;
1565 u16 link_speed = 0;
1566 u8 link_status;
e1d18735 1567
11ac75ed 1568 if (!sriov_enabled(adapter))
e1d18735
AK
1569 return -EPERM;
1570
94f434c2 1571 if (vf >= adapter->num_vfs)
e1d18735
AK
1572 return -EINVAL;
1573
ed616689
SC
1574 if (min_tx_rate)
1575 return -EINVAL;
1576
0f77ba73
RN
1577 if (!max_tx_rate)
1578 goto config_qos;
1579
1580 status = be_cmd_link_status_query(adapter, &link_speed,
1581 &link_status, 0);
1582 if (status)
1583 goto err;
1584
1585 if (!link_status) {
1586 dev_err(dev, "TX-rate setting not allowed when link is down\n");
940a3fcd 1587 status = -ENETDOWN;
0f77ba73
RN
1588 goto err;
1589 }
1590
1591 if (max_tx_rate < 100 || max_tx_rate > link_speed) {
1592 dev_err(dev, "TX-rate must be between 100 and %d Mbps\n",
1593 link_speed);
1594 status = -EINVAL;
1595 goto err;
1596 }
1597
1598 /* On Skyhawk the QOS setting must be done only as a % value */
1599 percent_rate = link_speed / 100;
1600 if (skyhawk_chip(adapter) && (max_tx_rate % percent_rate)) {
1601 dev_err(dev, "TX-rate must be a multiple of %d Mbps\n",
1602 percent_rate);
1603 status = -EINVAL;
1604 goto err;
94f434c2 1605 }
e1d18735 1606
0f77ba73
RN
1607config_qos:
1608 status = be_cmd_config_qos(adapter, max_tx_rate, link_speed, vf + 1);
e1d18735 1609 if (status)
0f77ba73
RN
1610 goto err;
1611
1612 adapter->vf_cfg[vf].tx_rate = max_tx_rate;
1613 return 0;
1614
1615err:
1616 dev_err(dev, "TX-rate setting of %dMbps on VF%d failed\n",
1617 max_tx_rate, vf);
abccf23e 1618 return be_cmd_status(status);
e1d18735 1619}
e2fb1afa 1620
bdce2ad7
SR
1621static int be_set_vf_link_state(struct net_device *netdev, int vf,
1622 int link_state)
1623{
1624 struct be_adapter *adapter = netdev_priv(netdev);
1625 int status;
1626
1627 if (!sriov_enabled(adapter))
1628 return -EPERM;
1629
1630 if (vf >= adapter->num_vfs)
1631 return -EINVAL;
1632
1633 status = be_cmd_set_logical_link_config(adapter, link_state, vf+1);
abccf23e
KA
1634 if (status) {
1635 dev_err(&adapter->pdev->dev,
1636 "Link state change on VF %d failed: %#x\n", vf, status);
1637 return be_cmd_status(status);
1638 }
bdce2ad7 1639
abccf23e
KA
1640 adapter->vf_cfg[vf].plink_tracking = link_state;
1641
1642 return 0;
bdce2ad7 1643}
e1d18735 1644
2632bafd
SP
1645static void be_aic_update(struct be_aic_obj *aic, u64 rx_pkts, u64 tx_pkts,
1646 ulong now)
6b7c5b94 1647{
2632bafd
SP
1648 aic->rx_pkts_prev = rx_pkts;
1649 aic->tx_reqs_prev = tx_pkts;
1650 aic->jiffies = now;
1651}
ac124ff9 1652
2632bafd
SP
1653static void be_eqd_update(struct be_adapter *adapter)
1654{
1655 struct be_set_eqd set_eqd[MAX_EVT_QS];
1656 int eqd, i, num = 0, start;
1657 struct be_aic_obj *aic;
1658 struct be_eq_obj *eqo;
1659 struct be_rx_obj *rxo;
1660 struct be_tx_obj *txo;
1661 u64 rx_pkts, tx_pkts;
1662 ulong now;
1663 u32 pps, delta;
10ef9ab4 1664
2632bafd
SP
1665 for_all_evt_queues(adapter, eqo, i) {
1666 aic = &adapter->aic_obj[eqo->idx];
1667 if (!aic->enable) {
1668 if (aic->jiffies)
1669 aic->jiffies = 0;
1670 eqd = aic->et_eqd;
1671 goto modify_eqd;
1672 }
6b7c5b94 1673
2632bafd
SP
1674 rxo = &adapter->rx_obj[eqo->idx];
1675 do {
57a7744e 1676 start = u64_stats_fetch_begin_irq(&rxo->stats.sync);
2632bafd 1677 rx_pkts = rxo->stats.rx_pkts;
57a7744e 1678 } while (u64_stats_fetch_retry_irq(&rxo->stats.sync, start));
10ef9ab4 1679
2632bafd
SP
1680 txo = &adapter->tx_obj[eqo->idx];
1681 do {
57a7744e 1682 start = u64_stats_fetch_begin_irq(&txo->stats.sync);
2632bafd 1683 tx_pkts = txo->stats.tx_reqs;
57a7744e 1684 } while (u64_stats_fetch_retry_irq(&txo->stats.sync, start));
6b7c5b94 1685
2632bafd
SP
1686 /* Skip, if wrapped around or first calculation */
1687 now = jiffies;
1688 if (!aic->jiffies || time_before(now, aic->jiffies) ||
1689 rx_pkts < aic->rx_pkts_prev ||
1690 tx_pkts < aic->tx_reqs_prev) {
1691 be_aic_update(aic, rx_pkts, tx_pkts, now);
1692 continue;
1693 }
1694
1695 delta = jiffies_to_msecs(now - aic->jiffies);
1696 pps = (((u32)(rx_pkts - aic->rx_pkts_prev) * 1000) / delta) +
1697 (((u32)(tx_pkts - aic->tx_reqs_prev) * 1000) / delta);
1698 eqd = (pps / 15000) << 2;
10ef9ab4 1699
2632bafd
SP
1700 if (eqd < 8)
1701 eqd = 0;
1702 eqd = min_t(u32, eqd, aic->max_eqd);
1703 eqd = max_t(u32, eqd, aic->min_eqd);
1704
1705 be_aic_update(aic, rx_pkts, tx_pkts, now);
10ef9ab4 1706modify_eqd:
2632bafd
SP
1707 if (eqd != aic->prev_eqd) {
1708 set_eqd[num].delay_multiplier = (eqd * 65)/100;
1709 set_eqd[num].eq_id = eqo->q.id;
1710 aic->prev_eqd = eqd;
1711 num++;
1712 }
ac124ff9 1713 }
2632bafd
SP
1714
1715 if (num)
1716 be_cmd_modify_eqd(adapter, set_eqd, num);
6b7c5b94
SP
1717}
1718
3abcdeda 1719static void be_rx_stats_update(struct be_rx_obj *rxo,
748b539a 1720 struct be_rx_compl_info *rxcp)
4097f663 1721{
ac124ff9 1722 struct be_rx_stats *stats = rx_stats(rxo);
1ef78abe 1723
ab1594e9 1724 u64_stats_update_begin(&stats->sync);
3abcdeda 1725 stats->rx_compl++;
2e588f84 1726 stats->rx_bytes += rxcp->pkt_size;
3abcdeda 1727 stats->rx_pkts++;
2e588f84 1728 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
3abcdeda 1729 stats->rx_mcast_pkts++;
2e588f84 1730 if (rxcp->err)
ac124ff9 1731 stats->rx_compl_err++;
ab1594e9 1732 u64_stats_update_end(&stats->sync);
4097f663
SP
1733}
1734
2e588f84 1735static inline bool csum_passed(struct be_rx_compl_info *rxcp)
728a9972 1736{
19fad86f 1737 /* L4 checksum is not reliable for non TCP/UDP packets.
c9c47142
SP
1738 * Also ignore ipcksm for ipv6 pkts
1739 */
2e588f84 1740 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
c9c47142 1741 (rxcp->ip_csum || rxcp->ipv6) && !rxcp->err;
728a9972
AK
1742}
1743
0b0ef1d0 1744static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo)
6b7c5b94 1745{
10ef9ab4 1746 struct be_adapter *adapter = rxo->adapter;
6b7c5b94 1747 struct be_rx_page_info *rx_page_info;
3abcdeda 1748 struct be_queue_info *rxq = &rxo->q;
0b0ef1d0 1749 u16 frag_idx = rxq->tail;
6b7c5b94 1750
3abcdeda 1751 rx_page_info = &rxo->page_info_tbl[frag_idx];
6b7c5b94
SP
1752 BUG_ON(!rx_page_info->page);
1753
e50287be 1754 if (rx_page_info->last_frag) {
2b7bcebf
IV
1755 dma_unmap_page(&adapter->pdev->dev,
1756 dma_unmap_addr(rx_page_info, bus),
1757 adapter->big_page_size, DMA_FROM_DEVICE);
e50287be
SP
1758 rx_page_info->last_frag = false;
1759 } else {
1760 dma_sync_single_for_cpu(&adapter->pdev->dev,
1761 dma_unmap_addr(rx_page_info, bus),
1762 rx_frag_size, DMA_FROM_DEVICE);
205859a2 1763 }
6b7c5b94 1764
0b0ef1d0 1765 queue_tail_inc(rxq);
6b7c5b94
SP
1766 atomic_dec(&rxq->used);
1767 return rx_page_info;
1768}
1769
1770/* Throwaway the data in the Rx completion */
10ef9ab4
SP
1771static void be_rx_compl_discard(struct be_rx_obj *rxo,
1772 struct be_rx_compl_info *rxcp)
6b7c5b94 1773{
6b7c5b94 1774 struct be_rx_page_info *page_info;
2e588f84 1775 u16 i, num_rcvd = rxcp->num_rcvd;
6b7c5b94 1776
e80d9da6 1777 for (i = 0; i < num_rcvd; i++) {
0b0ef1d0 1778 page_info = get_rx_page_info(rxo);
e80d9da6
PR
1779 put_page(page_info->page);
1780 memset(page_info, 0, sizeof(*page_info));
6b7c5b94
SP
1781 }
1782}
1783
1784/*
1785 * skb_fill_rx_data forms a complete skb for an ether frame
1786 * indicated by rxcp.
1787 */
10ef9ab4
SP
1788static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
1789 struct be_rx_compl_info *rxcp)
6b7c5b94 1790{
6b7c5b94 1791 struct be_rx_page_info *page_info;
2e588f84
SP
1792 u16 i, j;
1793 u16 hdr_len, curr_frag_len, remaining;
6b7c5b94 1794 u8 *start;
6b7c5b94 1795
0b0ef1d0 1796 page_info = get_rx_page_info(rxo);
6b7c5b94
SP
1797 start = page_address(page_info->page) + page_info->page_offset;
1798 prefetch(start);
1799
1800 /* Copy data in the first descriptor of this completion */
2e588f84 1801 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
6b7c5b94 1802
6b7c5b94
SP
1803 skb->len = curr_frag_len;
1804 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
ac1ae5f3 1805 memcpy(skb->data, start, curr_frag_len);
6b7c5b94
SP
1806 /* Complete packet has now been moved to data */
1807 put_page(page_info->page);
1808 skb->data_len = 0;
1809 skb->tail += curr_frag_len;
1810 } else {
ac1ae5f3
ED
1811 hdr_len = ETH_HLEN;
1812 memcpy(skb->data, start, hdr_len);
6b7c5b94 1813 skb_shinfo(skb)->nr_frags = 1;
b061b39e 1814 skb_frag_set_page(skb, 0, page_info->page);
6b7c5b94
SP
1815 skb_shinfo(skb)->frags[0].page_offset =
1816 page_info->page_offset + hdr_len;
748b539a
SP
1817 skb_frag_size_set(&skb_shinfo(skb)->frags[0],
1818 curr_frag_len - hdr_len);
6b7c5b94 1819 skb->data_len = curr_frag_len - hdr_len;
bdb28a97 1820 skb->truesize += rx_frag_size;
6b7c5b94
SP
1821 skb->tail += hdr_len;
1822 }
205859a2 1823 page_info->page = NULL;
6b7c5b94 1824
2e588f84
SP
1825 if (rxcp->pkt_size <= rx_frag_size) {
1826 BUG_ON(rxcp->num_rcvd != 1);
1827 return;
6b7c5b94
SP
1828 }
1829
1830 /* More frags present for this completion */
2e588f84
SP
1831 remaining = rxcp->pkt_size - curr_frag_len;
1832 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
0b0ef1d0 1833 page_info = get_rx_page_info(rxo);
2e588f84 1834 curr_frag_len = min(remaining, rx_frag_size);
6b7c5b94 1835
bd46cb6c
AK
1836 /* Coalesce all frags from the same physical page in one slot */
1837 if (page_info->page_offset == 0) {
1838 /* Fresh page */
1839 j++;
b061b39e 1840 skb_frag_set_page(skb, j, page_info->page);
bd46cb6c
AK
1841 skb_shinfo(skb)->frags[j].page_offset =
1842 page_info->page_offset;
9e903e08 1843 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
bd46cb6c
AK
1844 skb_shinfo(skb)->nr_frags++;
1845 } else {
1846 put_page(page_info->page);
1847 }
1848
9e903e08 1849 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
6b7c5b94
SP
1850 skb->len += curr_frag_len;
1851 skb->data_len += curr_frag_len;
bdb28a97 1852 skb->truesize += rx_frag_size;
2e588f84 1853 remaining -= curr_frag_len;
205859a2 1854 page_info->page = NULL;
6b7c5b94 1855 }
bd46cb6c 1856 BUG_ON(j > MAX_SKB_FRAGS);
6b7c5b94
SP
1857}
1858
5be93b9a 1859/* Process the RX completion indicated by rxcp when GRO is disabled */
6384a4d0 1860static void be_rx_compl_process(struct be_rx_obj *rxo, struct napi_struct *napi,
10ef9ab4 1861 struct be_rx_compl_info *rxcp)
6b7c5b94 1862{
10ef9ab4 1863 struct be_adapter *adapter = rxo->adapter;
6332c8d3 1864 struct net_device *netdev = adapter->netdev;
6b7c5b94 1865 struct sk_buff *skb;
89420424 1866
bb349bb4 1867 skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE);
a058a632 1868 if (unlikely(!skb)) {
ac124ff9 1869 rx_stats(rxo)->rx_drops_no_skbs++;
10ef9ab4 1870 be_rx_compl_discard(rxo, rxcp);
6b7c5b94
SP
1871 return;
1872 }
1873
10ef9ab4 1874 skb_fill_rx_data(rxo, skb, rxcp);
6b7c5b94 1875
6332c8d3 1876 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
728a9972 1877 skb->ip_summed = CHECKSUM_UNNECESSARY;
c6ce2f4b
SK
1878 else
1879 skb_checksum_none_assert(skb);
6b7c5b94 1880
6332c8d3 1881 skb->protocol = eth_type_trans(skb, netdev);
aaa6daec 1882 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
10ef9ab4 1883 if (netdev->features & NETIF_F_RXHASH)
d2464c8c 1884 skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
c9c47142 1885
b6c0e89d 1886 skb->csum_level = rxcp->tunneled;
6384a4d0 1887 skb_mark_napi_id(skb, napi);
6b7c5b94 1888
343e43c0 1889 if (rxcp->vlanf)
86a9bad3 1890 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
4c5102f9
AK
1891
1892 netif_receive_skb(skb);
6b7c5b94
SP
1893}
1894
5be93b9a 1895/* Process the RX completion indicated by rxcp when GRO is enabled */
4188e7df
JH
1896static void be_rx_compl_process_gro(struct be_rx_obj *rxo,
1897 struct napi_struct *napi,
1898 struct be_rx_compl_info *rxcp)
6b7c5b94 1899{
10ef9ab4 1900 struct be_adapter *adapter = rxo->adapter;
6b7c5b94 1901 struct be_rx_page_info *page_info;
5be93b9a 1902 struct sk_buff *skb = NULL;
2e588f84
SP
1903 u16 remaining, curr_frag_len;
1904 u16 i, j;
3968fa1e 1905
10ef9ab4 1906 skb = napi_get_frags(napi);
5be93b9a 1907 if (!skb) {
10ef9ab4 1908 be_rx_compl_discard(rxo, rxcp);
5be93b9a
AK
1909 return;
1910 }
1911
2e588f84
SP
1912 remaining = rxcp->pkt_size;
1913 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
0b0ef1d0 1914 page_info = get_rx_page_info(rxo);
6b7c5b94
SP
1915
1916 curr_frag_len = min(remaining, rx_frag_size);
1917
bd46cb6c
AK
1918 /* Coalesce all frags from the same physical page in one slot */
1919 if (i == 0 || page_info->page_offset == 0) {
1920 /* First frag or Fresh page */
1921 j++;
b061b39e 1922 skb_frag_set_page(skb, j, page_info->page);
5be93b9a
AK
1923 skb_shinfo(skb)->frags[j].page_offset =
1924 page_info->page_offset;
9e903e08 1925 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
bd46cb6c
AK
1926 } else {
1927 put_page(page_info->page);
1928 }
9e903e08 1929 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
bdb28a97 1930 skb->truesize += rx_frag_size;
bd46cb6c 1931 remaining -= curr_frag_len;
6b7c5b94
SP
1932 memset(page_info, 0, sizeof(*page_info));
1933 }
bd46cb6c 1934 BUG_ON(j > MAX_SKB_FRAGS);
6b7c5b94 1935
5be93b9a 1936 skb_shinfo(skb)->nr_frags = j + 1;
2e588f84
SP
1937 skb->len = rxcp->pkt_size;
1938 skb->data_len = rxcp->pkt_size;
5be93b9a 1939 skb->ip_summed = CHECKSUM_UNNECESSARY;
aaa6daec 1940 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
4b972914 1941 if (adapter->netdev->features & NETIF_F_RXHASH)
d2464c8c 1942 skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
c9c47142 1943
b6c0e89d 1944 skb->csum_level = rxcp->tunneled;
6384a4d0 1945 skb_mark_napi_id(skb, napi);
5be93b9a 1946
343e43c0 1947 if (rxcp->vlanf)
86a9bad3 1948 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
4c5102f9 1949
10ef9ab4 1950 napi_gro_frags(napi);
2e588f84
SP
1951}
1952
10ef9ab4
SP
1953static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
1954 struct be_rx_compl_info *rxcp)
2e588f84 1955{
c3c18bc1
SP
1956 rxcp->pkt_size = GET_RX_COMPL_V1_BITS(pktsize, compl);
1957 rxcp->vlanf = GET_RX_COMPL_V1_BITS(vtp, compl);
1958 rxcp->err = GET_RX_COMPL_V1_BITS(err, compl);
1959 rxcp->tcpf = GET_RX_COMPL_V1_BITS(tcpf, compl);
1960 rxcp->udpf = GET_RX_COMPL_V1_BITS(udpf, compl);
1961 rxcp->ip_csum = GET_RX_COMPL_V1_BITS(ipcksm, compl);
1962 rxcp->l4_csum = GET_RX_COMPL_V1_BITS(l4_cksm, compl);
1963 rxcp->ipv6 = GET_RX_COMPL_V1_BITS(ip_version, compl);
1964 rxcp->num_rcvd = GET_RX_COMPL_V1_BITS(numfrags, compl);
1965 rxcp->pkt_type = GET_RX_COMPL_V1_BITS(cast_enc, compl);
1966 rxcp->rss_hash = GET_RX_COMPL_V1_BITS(rsshash, compl);
15d72184 1967 if (rxcp->vlanf) {
c3c18bc1
SP
1968 rxcp->qnq = GET_RX_COMPL_V1_BITS(qnq, compl);
1969 rxcp->vlan_tag = GET_RX_COMPL_V1_BITS(vlan_tag, compl);
15d72184 1970 }
c3c18bc1 1971 rxcp->port = GET_RX_COMPL_V1_BITS(port, compl);
c9c47142 1972 rxcp->tunneled =
c3c18bc1 1973 GET_RX_COMPL_V1_BITS(tunneled, compl);
2e588f84
SP
1974}
1975
10ef9ab4
SP
1976static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
1977 struct be_rx_compl_info *rxcp)
2e588f84 1978{
c3c18bc1
SP
1979 rxcp->pkt_size = GET_RX_COMPL_V0_BITS(pktsize, compl);
1980 rxcp->vlanf = GET_RX_COMPL_V0_BITS(vtp, compl);
1981 rxcp->err = GET_RX_COMPL_V0_BITS(err, compl);
1982 rxcp->tcpf = GET_RX_COMPL_V0_BITS(tcpf, compl);
1983 rxcp->udpf = GET_RX_COMPL_V0_BITS(udpf, compl);
1984 rxcp->ip_csum = GET_RX_COMPL_V0_BITS(ipcksm, compl);
1985 rxcp->l4_csum = GET_RX_COMPL_V0_BITS(l4_cksm, compl);
1986 rxcp->ipv6 = GET_RX_COMPL_V0_BITS(ip_version, compl);
1987 rxcp->num_rcvd = GET_RX_COMPL_V0_BITS(numfrags, compl);
1988 rxcp->pkt_type = GET_RX_COMPL_V0_BITS(cast_enc, compl);
1989 rxcp->rss_hash = GET_RX_COMPL_V0_BITS(rsshash, compl);
15d72184 1990 if (rxcp->vlanf) {
c3c18bc1
SP
1991 rxcp->qnq = GET_RX_COMPL_V0_BITS(qnq, compl);
1992 rxcp->vlan_tag = GET_RX_COMPL_V0_BITS(vlan_tag, compl);
15d72184 1993 }
c3c18bc1
SP
1994 rxcp->port = GET_RX_COMPL_V0_BITS(port, compl);
1995 rxcp->ip_frag = GET_RX_COMPL_V0_BITS(ip_frag, compl);
2e588f84
SP
1996}
1997
1998static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1999{
2000 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
2001 struct be_rx_compl_info *rxcp = &rxo->rxcp;
2002 struct be_adapter *adapter = rxo->adapter;
6b7c5b94 2003
2e588f84
SP
2004 /* For checking the valid bit it is Ok to use either definition as the
2005 * valid bit is at the same position in both v0 and v1 Rx compl */
2006 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
2007 return NULL;
6b7c5b94 2008
2e588f84
SP
2009 rmb();
2010 be_dws_le_to_cpu(compl, sizeof(*compl));
6b7c5b94 2011
2e588f84 2012 if (adapter->be3_native)
10ef9ab4 2013 be_parse_rx_compl_v1(compl, rxcp);
2e588f84 2014 else
10ef9ab4 2015 be_parse_rx_compl_v0(compl, rxcp);
6b7c5b94 2016
e38b1706
SK
2017 if (rxcp->ip_frag)
2018 rxcp->l4_csum = 0;
2019
15d72184 2020 if (rxcp->vlanf) {
f93f160b
VV
2021 /* In QNQ modes, if qnq bit is not set, then the packet was
2022 * tagged only with the transparent outer vlan-tag and must
2023 * not be treated as a vlan packet by host
2024 */
2025 if (be_is_qnq_mode(adapter) && !rxcp->qnq)
15d72184 2026 rxcp->vlanf = 0;
6b7c5b94 2027
15d72184 2028 if (!lancer_chip(adapter))
3c709f8f 2029 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
6b7c5b94 2030
939cf306 2031 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
f6cbd364 2032 !test_bit(rxcp->vlan_tag, adapter->vids))
15d72184
SP
2033 rxcp->vlanf = 0;
2034 }
2e588f84
SP
2035
2036 /* As the compl has been parsed, reset it; we wont touch it again */
2037 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
6b7c5b94 2038
3abcdeda 2039 queue_tail_inc(&rxo->cq);
6b7c5b94
SP
2040 return rxcp;
2041}
2042
1829b086 2043static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
6b7c5b94 2044{
6b7c5b94 2045 u32 order = get_order(size);
1829b086 2046
6b7c5b94 2047 if (order > 0)
1829b086
ED
2048 gfp |= __GFP_COMP;
2049 return alloc_pages(gfp, order);
6b7c5b94
SP
2050}
2051
2052/*
2053 * Allocate a page, split it to fragments of size rx_frag_size and post as
2054 * receive buffers to BE
2055 */
c30d7266 2056static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp, u32 frags_needed)
6b7c5b94 2057{
3abcdeda 2058 struct be_adapter *adapter = rxo->adapter;
26d92f92 2059 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
3abcdeda 2060 struct be_queue_info *rxq = &rxo->q;
6b7c5b94 2061 struct page *pagep = NULL;
ba42fad0 2062 struct device *dev = &adapter->pdev->dev;
6b7c5b94
SP
2063 struct be_eth_rx_d *rxd;
2064 u64 page_dmaaddr = 0, frag_dmaaddr;
c30d7266 2065 u32 posted, page_offset = 0, notify = 0;
6b7c5b94 2066
3abcdeda 2067 page_info = &rxo->page_info_tbl[rxq->head];
c30d7266 2068 for (posted = 0; posted < frags_needed && !page_info->page; posted++) {
6b7c5b94 2069 if (!pagep) {
1829b086 2070 pagep = be_alloc_pages(adapter->big_page_size, gfp);
6b7c5b94 2071 if (unlikely(!pagep)) {
ac124ff9 2072 rx_stats(rxo)->rx_post_fail++;
6b7c5b94
SP
2073 break;
2074 }
ba42fad0
IV
2075 page_dmaaddr = dma_map_page(dev, pagep, 0,
2076 adapter->big_page_size,
2b7bcebf 2077 DMA_FROM_DEVICE);
ba42fad0
IV
2078 if (dma_mapping_error(dev, page_dmaaddr)) {
2079 put_page(pagep);
2080 pagep = NULL;
d3de1540 2081 adapter->drv_stats.dma_map_errors++;
ba42fad0
IV
2082 break;
2083 }
e50287be 2084 page_offset = 0;
6b7c5b94
SP
2085 } else {
2086 get_page(pagep);
e50287be 2087 page_offset += rx_frag_size;
6b7c5b94 2088 }
e50287be 2089 page_info->page_offset = page_offset;
6b7c5b94 2090 page_info->page = pagep;
6b7c5b94
SP
2091
2092 rxd = queue_head_node(rxq);
e50287be 2093 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
6b7c5b94
SP
2094 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
2095 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
6b7c5b94
SP
2096
2097 /* Any space left in the current big page for another frag? */
2098 if ((page_offset + rx_frag_size + rx_frag_size) >
2099 adapter->big_page_size) {
2100 pagep = NULL;
e50287be
SP
2101 page_info->last_frag = true;
2102 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
2103 } else {
2104 dma_unmap_addr_set(page_info, bus, frag_dmaaddr);
6b7c5b94 2105 }
26d92f92
SP
2106
2107 prev_page_info = page_info;
2108 queue_head_inc(rxq);
10ef9ab4 2109 page_info = &rxo->page_info_tbl[rxq->head];
6b7c5b94 2110 }
e50287be
SP
2111
2112 /* Mark the last frag of a page when we break out of the above loop
2113 * with no more slots available in the RXQ
2114 */
2115 if (pagep) {
2116 prev_page_info->last_frag = true;
2117 dma_unmap_addr_set(prev_page_info, bus, page_dmaaddr);
2118 }
6b7c5b94
SP
2119
2120 if (posted) {
6b7c5b94 2121 atomic_add(posted, &rxq->used);
6384a4d0
SP
2122 if (rxo->rx_post_starved)
2123 rxo->rx_post_starved = false;
c30d7266
AK
2124 do {
2125 notify = min(256u, posted);
2126 be_rxq_notify(adapter, rxq->id, notify);
2127 posted -= notify;
2128 } while (posted);
ea1dae11
SP
2129 } else if (atomic_read(&rxq->used) == 0) {
2130 /* Let be_worker replenish when memory is available */
3abcdeda 2131 rxo->rx_post_starved = true;
6b7c5b94 2132 }
6b7c5b94
SP
2133}
2134
152ffe5b 2135static struct be_tx_compl_info *be_tx_compl_get(struct be_tx_obj *txo)
6b7c5b94 2136{
152ffe5b
SB
2137 struct be_queue_info *tx_cq = &txo->cq;
2138 struct be_tx_compl_info *txcp = &txo->txcp;
2139 struct be_eth_tx_compl *compl = queue_tail_node(tx_cq);
6b7c5b94 2140
152ffe5b 2141 if (compl->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
6b7c5b94
SP
2142 return NULL;
2143
152ffe5b 2144 /* Ensure load ordering of valid bit dword and other dwords below */
f3eb62d2 2145 rmb();
152ffe5b 2146 be_dws_le_to_cpu(compl, sizeof(*compl));
6b7c5b94 2147
152ffe5b
SB
2148 txcp->status = GET_TX_COMPL_BITS(status, compl);
2149 txcp->end_index = GET_TX_COMPL_BITS(wrb_index, compl);
6b7c5b94 2150
152ffe5b 2151 compl->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
6b7c5b94
SP
2152 queue_tail_inc(tx_cq);
2153 return txcp;
2154}
2155
3c8def97 2156static u16 be_tx_compl_process(struct be_adapter *adapter,
748b539a 2157 struct be_tx_obj *txo, u16 last_index)
6b7c5b94 2158{
5f07b3c5 2159 struct sk_buff **sent_skbs = txo->sent_skb_list;
3c8def97 2160 struct be_queue_info *txq = &txo->q;
5f07b3c5
SP
2161 u16 frag_index, num_wrbs = 0;
2162 struct sk_buff *skb = NULL;
2163 bool unmap_skb_hdr = false;
a73b796e 2164 struct be_eth_wrb *wrb;
6b7c5b94 2165
ec43b1a6 2166 do {
5f07b3c5
SP
2167 if (sent_skbs[txq->tail]) {
2168 /* Free skb from prev req */
2169 if (skb)
2170 dev_consume_skb_any(skb);
2171 skb = sent_skbs[txq->tail];
2172 sent_skbs[txq->tail] = NULL;
2173 queue_tail_inc(txq); /* skip hdr wrb */
2174 num_wrbs++;
2175 unmap_skb_hdr = true;
2176 }
a73b796e 2177 wrb = queue_tail_node(txq);
5f07b3c5 2178 frag_index = txq->tail;
2b7bcebf 2179 unmap_tx_frag(&adapter->pdev->dev, wrb,
5f07b3c5 2180 (unmap_skb_hdr && skb_headlen(skb)));
ec43b1a6 2181 unmap_skb_hdr = false;
6b7c5b94 2182 queue_tail_inc(txq);
5f07b3c5
SP
2183 num_wrbs++;
2184 } while (frag_index != last_index);
2185 dev_consume_skb_any(skb);
6b7c5b94 2186
4d586b82 2187 return num_wrbs;
6b7c5b94
SP
2188}
2189
10ef9ab4
SP
2190/* Return the number of events in the event queue */
2191static inline int events_get(struct be_eq_obj *eqo)
859b1e4e 2192{
10ef9ab4
SP
2193 struct be_eq_entry *eqe;
2194 int num = 0;
859b1e4e 2195
10ef9ab4
SP
2196 do {
2197 eqe = queue_tail_node(&eqo->q);
2198 if (eqe->evt == 0)
2199 break;
859b1e4e 2200
10ef9ab4
SP
2201 rmb();
2202 eqe->evt = 0;
2203 num++;
2204 queue_tail_inc(&eqo->q);
2205 } while (true);
2206
2207 return num;
859b1e4e
SP
2208}
2209
10ef9ab4
SP
2210/* Leaves the EQ is disarmed state */
2211static void be_eq_clean(struct be_eq_obj *eqo)
859b1e4e 2212{
10ef9ab4 2213 int num = events_get(eqo);
859b1e4e 2214
10ef9ab4 2215 be_eq_notify(eqo->adapter, eqo->q.id, false, true, num);
859b1e4e
SP
2216}
2217
10ef9ab4 2218static void be_rx_cq_clean(struct be_rx_obj *rxo)
6b7c5b94
SP
2219{
2220 struct be_rx_page_info *page_info;
3abcdeda
SP
2221 struct be_queue_info *rxq = &rxo->q;
2222 struct be_queue_info *rx_cq = &rxo->cq;
2e588f84 2223 struct be_rx_compl_info *rxcp;
d23e946c
SP
2224 struct be_adapter *adapter = rxo->adapter;
2225 int flush_wait = 0;
6b7c5b94 2226
d23e946c
SP
2227 /* Consume pending rx completions.
2228 * Wait for the flush completion (identified by zero num_rcvd)
2229 * to arrive. Notify CQ even when there are no more CQ entries
2230 * for HW to flush partially coalesced CQ entries.
2231 * In Lancer, there is no need to wait for flush compl.
2232 */
2233 for (;;) {
2234 rxcp = be_rx_compl_get(rxo);
ddf1169f 2235 if (!rxcp) {
d23e946c
SP
2236 if (lancer_chip(adapter))
2237 break;
2238
2239 if (flush_wait++ > 10 || be_hw_error(adapter)) {
2240 dev_warn(&adapter->pdev->dev,
2241 "did not receive flush compl\n");
2242 break;
2243 }
2244 be_cq_notify(adapter, rx_cq->id, true, 0);
2245 mdelay(1);
2246 } else {
2247 be_rx_compl_discard(rxo, rxcp);
3f5dffe6 2248 be_cq_notify(adapter, rx_cq->id, false, 1);
d23e946c
SP
2249 if (rxcp->num_rcvd == 0)
2250 break;
2251 }
6b7c5b94
SP
2252 }
2253
d23e946c
SP
2254 /* After cleanup, leave the CQ in unarmed state */
2255 be_cq_notify(adapter, rx_cq->id, false, 0);
2256
2257 /* Then free posted rx buffers that were not used */
0b0ef1d0
SR
2258 while (atomic_read(&rxq->used) > 0) {
2259 page_info = get_rx_page_info(rxo);
6b7c5b94
SP
2260 put_page(page_info->page);
2261 memset(page_info, 0, sizeof(*page_info));
2262 }
2263 BUG_ON(atomic_read(&rxq->used));
5f820b6c
KA
2264 rxq->tail = 0;
2265 rxq->head = 0;
6b7c5b94
SP
2266}
2267
0ae57bb3 2268static void be_tx_compl_clean(struct be_adapter *adapter)
6b7c5b94 2269{
5f07b3c5
SP
2270 u16 end_idx, notified_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
2271 struct device *dev = &adapter->pdev->dev;
152ffe5b 2272 struct be_tx_compl_info *txcp;
0ae57bb3 2273 struct be_queue_info *txq;
152ffe5b 2274 struct be_tx_obj *txo;
0ae57bb3 2275 int i, pending_txqs;
a8e9179a 2276
1a3d0717 2277 /* Stop polling for compls when HW has been silent for 10ms */
a8e9179a 2278 do {
0ae57bb3
SP
2279 pending_txqs = adapter->num_tx_qs;
2280
2281 for_all_tx_queues(adapter, txo, i) {
1a3d0717
VV
2282 cmpl = 0;
2283 num_wrbs = 0;
0ae57bb3 2284 txq = &txo->q;
152ffe5b
SB
2285 while ((txcp = be_tx_compl_get(txo))) {
2286 num_wrbs +=
2287 be_tx_compl_process(adapter, txo,
2288 txcp->end_index);
0ae57bb3
SP
2289 cmpl++;
2290 }
2291 if (cmpl) {
2292 be_cq_notify(adapter, txo->cq.id, false, cmpl);
2293 atomic_sub(num_wrbs, &txq->used);
1a3d0717 2294 timeo = 0;
0ae57bb3 2295 }
cf5671e6 2296 if (!be_is_tx_compl_pending(txo))
0ae57bb3 2297 pending_txqs--;
a8e9179a
SP
2298 }
2299
1a3d0717 2300 if (pending_txqs == 0 || ++timeo > 10 || be_hw_error(adapter))
a8e9179a
SP
2301 break;
2302
2303 mdelay(1);
2304 } while (true);
2305
5f07b3c5 2306 /* Free enqueued TX that was never notified to HW */
0ae57bb3
SP
2307 for_all_tx_queues(adapter, txo, i) {
2308 txq = &txo->q;
0ae57bb3 2309
5f07b3c5
SP
2310 if (atomic_read(&txq->used)) {
2311 dev_info(dev, "txq%d: cleaning %d pending tx-wrbs\n",
2312 i, atomic_read(&txq->used));
2313 notified_idx = txq->tail;
0ae57bb3 2314 end_idx = txq->tail;
5f07b3c5
SP
2315 index_adv(&end_idx, atomic_read(&txq->used) - 1,
2316 txq->len);
2317 /* Use the tx-compl process logic to handle requests
2318 * that were not sent to the HW.
2319 */
0ae57bb3
SP
2320 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
2321 atomic_sub(num_wrbs, &txq->used);
5f07b3c5
SP
2322 BUG_ON(atomic_read(&txq->used));
2323 txo->pend_wrb_cnt = 0;
2324 /* Since hw was never notified of these requests,
2325 * reset TXQ indices
2326 */
2327 txq->head = notified_idx;
2328 txq->tail = notified_idx;
0ae57bb3 2329 }
b03388d6 2330 }
6b7c5b94
SP
2331}
2332
10ef9ab4
SP
2333static void be_evt_queues_destroy(struct be_adapter *adapter)
2334{
2335 struct be_eq_obj *eqo;
2336 int i;
2337
2338 for_all_evt_queues(adapter, eqo, i) {
19d59aa7
PR
2339 if (eqo->q.created) {
2340 be_eq_clean(eqo);
10ef9ab4 2341 be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
6384a4d0 2342 napi_hash_del(&eqo->napi);
68d7bdcb 2343 netif_napi_del(&eqo->napi);
19d59aa7 2344 }
d658d98a 2345 free_cpumask_var(eqo->affinity_mask);
10ef9ab4
SP
2346 be_queue_free(adapter, &eqo->q);
2347 }
2348}
2349
2350static int be_evt_queues_create(struct be_adapter *adapter)
2351{
2352 struct be_queue_info *eq;
2353 struct be_eq_obj *eqo;
2632bafd 2354 struct be_aic_obj *aic;
10ef9ab4
SP
2355 int i, rc;
2356
92bf14ab
SP
2357 adapter->num_evt_qs = min_t(u16, num_irqs(adapter),
2358 adapter->cfg_num_qs);
10ef9ab4
SP
2359
2360 for_all_evt_queues(adapter, eqo, i) {
d658d98a
PR
2361 if (!zalloc_cpumask_var(&eqo->affinity_mask, GFP_KERNEL))
2362 return -ENOMEM;
2363 cpumask_set_cpu_local_first(i, dev_to_node(&adapter->pdev->dev),
2364 eqo->affinity_mask);
2365
68d7bdcb
SP
2366 netif_napi_add(adapter->netdev, &eqo->napi, be_poll,
2367 BE_NAPI_WEIGHT);
6384a4d0 2368 napi_hash_add(&eqo->napi);
2632bafd 2369 aic = &adapter->aic_obj[i];
10ef9ab4 2370 eqo->adapter = adapter;
10ef9ab4 2371 eqo->idx = i;
2632bafd
SP
2372 aic->max_eqd = BE_MAX_EQD;
2373 aic->enable = true;
10ef9ab4
SP
2374
2375 eq = &eqo->q;
2376 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
748b539a 2377 sizeof(struct be_eq_entry));
10ef9ab4
SP
2378 if (rc)
2379 return rc;
2380
f2f781a7 2381 rc = be_cmd_eq_create(adapter, eqo);
10ef9ab4
SP
2382 if (rc)
2383 return rc;
2384 }
1cfafab9 2385 return 0;
10ef9ab4
SP
2386}
2387
5fb379ee
SP
2388static void be_mcc_queues_destroy(struct be_adapter *adapter)
2389{
2390 struct be_queue_info *q;
5fb379ee 2391
8788fdc2 2392 q = &adapter->mcc_obj.q;
5fb379ee 2393 if (q->created)
8788fdc2 2394 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
5fb379ee
SP
2395 be_queue_free(adapter, q);
2396
8788fdc2 2397 q = &adapter->mcc_obj.cq;
5fb379ee 2398 if (q->created)
8788fdc2 2399 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
5fb379ee
SP
2400 be_queue_free(adapter, q);
2401}
2402
2403/* Must be called only after TX qs are created as MCC shares TX EQ */
2404static int be_mcc_queues_create(struct be_adapter *adapter)
2405{
2406 struct be_queue_info *q, *cq;
5fb379ee 2407
8788fdc2 2408 cq = &adapter->mcc_obj.cq;
5fb379ee 2409 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
748b539a 2410 sizeof(struct be_mcc_compl)))
5fb379ee
SP
2411 goto err;
2412
10ef9ab4
SP
2413 /* Use the default EQ for MCC completions */
2414 if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0))
5fb379ee
SP
2415 goto mcc_cq_free;
2416
8788fdc2 2417 q = &adapter->mcc_obj.q;
5fb379ee
SP
2418 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
2419 goto mcc_cq_destroy;
2420
8788fdc2 2421 if (be_cmd_mccq_create(adapter, q, cq))
5fb379ee
SP
2422 goto mcc_q_free;
2423
2424 return 0;
2425
2426mcc_q_free:
2427 be_queue_free(adapter, q);
2428mcc_cq_destroy:
8788fdc2 2429 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
5fb379ee
SP
2430mcc_cq_free:
2431 be_queue_free(adapter, cq);
2432err:
2433 return -1;
2434}
2435
6b7c5b94
SP
2436static void be_tx_queues_destroy(struct be_adapter *adapter)
2437{
2438 struct be_queue_info *q;
3c8def97
SP
2439 struct be_tx_obj *txo;
2440 u8 i;
6b7c5b94 2441
3c8def97
SP
2442 for_all_tx_queues(adapter, txo, i) {
2443 q = &txo->q;
2444 if (q->created)
2445 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
2446 be_queue_free(adapter, q);
6b7c5b94 2447
3c8def97
SP
2448 q = &txo->cq;
2449 if (q->created)
2450 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2451 be_queue_free(adapter, q);
2452 }
6b7c5b94
SP
2453}
2454
7707133c 2455static int be_tx_qs_create(struct be_adapter *adapter)
6b7c5b94 2456{
10ef9ab4 2457 struct be_queue_info *cq, *eq;
3c8def97 2458 struct be_tx_obj *txo;
92bf14ab 2459 int status, i;
6b7c5b94 2460
92bf14ab 2461 adapter->num_tx_qs = min(adapter->num_evt_qs, be_max_txqs(adapter));
dafc0fe3 2462
10ef9ab4
SP
2463 for_all_tx_queues(adapter, txo, i) {
2464 cq = &txo->cq;
2465 status = be_queue_alloc(adapter, cq, TX_CQ_LEN,
2466 sizeof(struct be_eth_tx_compl));
2467 if (status)
2468 return status;
3c8def97 2469
827da44c
JS
2470 u64_stats_init(&txo->stats.sync);
2471 u64_stats_init(&txo->stats.sync_compl);
2472
10ef9ab4
SP
2473 /* If num_evt_qs is less than num_tx_qs, then more than
2474 * one txq share an eq
2475 */
2476 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2477 status = be_cmd_cq_create(adapter, cq, eq, false, 3);
2478 if (status)
2479 return status;
6b7c5b94 2480
10ef9ab4
SP
2481 status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
2482 sizeof(struct be_eth_wrb));
2483 if (status)
2484 return status;
6b7c5b94 2485
94d73aaa 2486 status = be_cmd_txq_create(adapter, txo);
10ef9ab4
SP
2487 if (status)
2488 return status;
3c8def97 2489 }
6b7c5b94 2490
d379142b
SP
2491 dev_info(&adapter->pdev->dev, "created %d TX queue(s)\n",
2492 adapter->num_tx_qs);
10ef9ab4 2493 return 0;
6b7c5b94
SP
2494}
2495
10ef9ab4 2496static void be_rx_cqs_destroy(struct be_adapter *adapter)
6b7c5b94
SP
2497{
2498 struct be_queue_info *q;
3abcdeda
SP
2499 struct be_rx_obj *rxo;
2500 int i;
2501
2502 for_all_rx_queues(adapter, rxo, i) {
3abcdeda
SP
2503 q = &rxo->cq;
2504 if (q->created)
2505 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2506 be_queue_free(adapter, q);
ac6a0c4a
SP
2507 }
2508}
2509
10ef9ab4 2510static int be_rx_cqs_create(struct be_adapter *adapter)
6b7c5b94 2511{
10ef9ab4 2512 struct be_queue_info *eq, *cq;
3abcdeda
SP
2513 struct be_rx_obj *rxo;
2514 int rc, i;
6b7c5b94 2515
92bf14ab 2516 /* We can create as many RSS rings as there are EQs. */
71bb8bd0 2517 adapter->num_rss_qs = adapter->num_evt_qs;
92bf14ab 2518
71bb8bd0
VV
2519 /* We'll use RSS only if atleast 2 RSS rings are supported. */
2520 if (adapter->num_rss_qs <= 1)
2521 adapter->num_rss_qs = 0;
2522
2523 adapter->num_rx_qs = adapter->num_rss_qs + adapter->need_def_rxq;
2524
2525 /* When the interface is not capable of RSS rings (and there is no
2526 * need to create a default RXQ) we'll still need one RXQ
10ef9ab4 2527 */
71bb8bd0
VV
2528 if (adapter->num_rx_qs == 0)
2529 adapter->num_rx_qs = 1;
92bf14ab 2530
6b7c5b94 2531 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
3abcdeda
SP
2532 for_all_rx_queues(adapter, rxo, i) {
2533 rxo->adapter = adapter;
3abcdeda
SP
2534 cq = &rxo->cq;
2535 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
748b539a 2536 sizeof(struct be_eth_rx_compl));
3abcdeda 2537 if (rc)
10ef9ab4 2538 return rc;
3abcdeda 2539
827da44c 2540 u64_stats_init(&rxo->stats.sync);
10ef9ab4
SP
2541 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2542 rc = be_cmd_cq_create(adapter, cq, eq, false, 3);
3abcdeda 2543 if (rc)
10ef9ab4 2544 return rc;
3abcdeda 2545 }
6b7c5b94 2546
d379142b 2547 dev_info(&adapter->pdev->dev,
71bb8bd0 2548 "created %d RX queue(s)\n", adapter->num_rx_qs);
10ef9ab4 2549 return 0;
b628bde2
SP
2550}
2551
6b7c5b94
SP
2552static irqreturn_t be_intx(int irq, void *dev)
2553{
e49cc34f
SP
2554 struct be_eq_obj *eqo = dev;
2555 struct be_adapter *adapter = eqo->adapter;
2556 int num_evts = 0;
6b7c5b94 2557
d0b9cec3
SP
2558 /* IRQ is not expected when NAPI is scheduled as the EQ
2559 * will not be armed.
2560 * But, this can happen on Lancer INTx where it takes
2561 * a while to de-assert INTx or in BE2 where occasionaly
2562 * an interrupt may be raised even when EQ is unarmed.
2563 * If NAPI is already scheduled, then counting & notifying
2564 * events will orphan them.
e49cc34f 2565 */
d0b9cec3 2566 if (napi_schedule_prep(&eqo->napi)) {
e49cc34f 2567 num_evts = events_get(eqo);
d0b9cec3
SP
2568 __napi_schedule(&eqo->napi);
2569 if (num_evts)
2570 eqo->spurious_intr = 0;
2571 }
2572 be_eq_notify(adapter, eqo->q.id, false, true, num_evts);
e49cc34f 2573
d0b9cec3
SP
2574 /* Return IRQ_HANDLED only for the the first spurious intr
2575 * after a valid intr to stop the kernel from branding
2576 * this irq as a bad one!
e49cc34f 2577 */
d0b9cec3
SP
2578 if (num_evts || eqo->spurious_intr++ == 0)
2579 return IRQ_HANDLED;
2580 else
2581 return IRQ_NONE;
6b7c5b94
SP
2582}
2583
10ef9ab4 2584static irqreturn_t be_msix(int irq, void *dev)
6b7c5b94 2585{
10ef9ab4 2586 struct be_eq_obj *eqo = dev;
6b7c5b94 2587
0b545a62
SP
2588 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
2589 napi_schedule(&eqo->napi);
6b7c5b94
SP
2590 return IRQ_HANDLED;
2591}
2592
2e588f84 2593static inline bool do_gro(struct be_rx_compl_info *rxcp)
6b7c5b94 2594{
e38b1706 2595 return (rxcp->tcpf && !rxcp->err && rxcp->l4_csum) ? true : false;
6b7c5b94
SP
2596}
2597
10ef9ab4 2598static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
748b539a 2599 int budget, int polling)
6b7c5b94 2600{
3abcdeda
SP
2601 struct be_adapter *adapter = rxo->adapter;
2602 struct be_queue_info *rx_cq = &rxo->cq;
2e588f84 2603 struct be_rx_compl_info *rxcp;
6b7c5b94 2604 u32 work_done;
c30d7266 2605 u32 frags_consumed = 0;
6b7c5b94
SP
2606
2607 for (work_done = 0; work_done < budget; work_done++) {
3abcdeda 2608 rxcp = be_rx_compl_get(rxo);
6b7c5b94
SP
2609 if (!rxcp)
2610 break;
2611
12004ae9
SP
2612 /* Is it a flush compl that has no data */
2613 if (unlikely(rxcp->num_rcvd == 0))
2614 goto loop_continue;
2615
2616 /* Discard compl with partial DMA Lancer B0 */
2617 if (unlikely(!rxcp->pkt_size)) {
10ef9ab4 2618 be_rx_compl_discard(rxo, rxcp);
12004ae9
SP
2619 goto loop_continue;
2620 }
2621
2622 /* On BE drop pkts that arrive due to imperfect filtering in
2623 * promiscuous mode on some skews
2624 */
2625 if (unlikely(rxcp->port != adapter->port_num &&
748b539a 2626 !lancer_chip(adapter))) {
10ef9ab4 2627 be_rx_compl_discard(rxo, rxcp);
12004ae9 2628 goto loop_continue;
64642811 2629 }
009dd872 2630
6384a4d0
SP
2631 /* Don't do gro when we're busy_polling */
2632 if (do_gro(rxcp) && polling != BUSY_POLLING)
10ef9ab4 2633 be_rx_compl_process_gro(rxo, napi, rxcp);
12004ae9 2634 else
6384a4d0
SP
2635 be_rx_compl_process(rxo, napi, rxcp);
2636
12004ae9 2637loop_continue:
c30d7266 2638 frags_consumed += rxcp->num_rcvd;
2e588f84 2639 be_rx_stats_update(rxo, rxcp);
6b7c5b94
SP
2640 }
2641
10ef9ab4
SP
2642 if (work_done) {
2643 be_cq_notify(adapter, rx_cq->id, true, work_done);
9372cacb 2644
6384a4d0
SP
2645 /* When an rx-obj gets into post_starved state, just
2646 * let be_worker do the posting.
2647 */
2648 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM &&
2649 !rxo->rx_post_starved)
c30d7266
AK
2650 be_post_rx_frags(rxo, GFP_ATOMIC,
2651 max_t(u32, MAX_RX_POST,
2652 frags_consumed));
6b7c5b94 2653 }
10ef9ab4 2654
6b7c5b94
SP
2655 return work_done;
2656}
2657
152ffe5b 2658static inline void be_update_tx_err(struct be_tx_obj *txo, u8 status)
512bb8a2
KA
2659{
2660 switch (status) {
2661 case BE_TX_COMP_HDR_PARSE_ERR:
2662 tx_stats(txo)->tx_hdr_parse_err++;
2663 break;
2664 case BE_TX_COMP_NDMA_ERR:
2665 tx_stats(txo)->tx_dma_err++;
2666 break;
2667 case BE_TX_COMP_ACL_ERR:
2668 tx_stats(txo)->tx_spoof_check_err++;
2669 break;
2670 }
2671}
2672
152ffe5b 2673static inline void lancer_update_tx_err(struct be_tx_obj *txo, u8 status)
512bb8a2
KA
2674{
2675 switch (status) {
2676 case LANCER_TX_COMP_LSO_ERR:
2677 tx_stats(txo)->tx_tso_err++;
2678 break;
2679 case LANCER_TX_COMP_HSW_DROP_MAC_ERR:
2680 case LANCER_TX_COMP_HSW_DROP_VLAN_ERR:
2681 tx_stats(txo)->tx_spoof_check_err++;
2682 break;
2683 case LANCER_TX_COMP_QINQ_ERR:
2684 tx_stats(txo)->tx_qinq_err++;
2685 break;
2686 case LANCER_TX_COMP_PARITY_ERR:
2687 tx_stats(txo)->tx_internal_parity_err++;
2688 break;
2689 case LANCER_TX_COMP_DMA_ERR:
2690 tx_stats(txo)->tx_dma_err++;
2691 break;
2692 }
2693}
2694
c8f64615
SP
2695static void be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
2696 int idx)
6b7c5b94 2697{
c8f64615 2698 int num_wrbs = 0, work_done = 0;
152ffe5b 2699 struct be_tx_compl_info *txcp;
c8f64615 2700
152ffe5b
SB
2701 while ((txcp = be_tx_compl_get(txo))) {
2702 num_wrbs += be_tx_compl_process(adapter, txo, txcp->end_index);
c8f64615 2703 work_done++;
3c8def97 2704
152ffe5b 2705 if (txcp->status) {
512bb8a2 2706 if (lancer_chip(adapter))
152ffe5b 2707 lancer_update_tx_err(txo, txcp->status);
512bb8a2 2708 else
152ffe5b 2709 be_update_tx_err(txo, txcp->status);
512bb8a2 2710 }
10ef9ab4 2711 }
6b7c5b94 2712
10ef9ab4
SP
2713 if (work_done) {
2714 be_cq_notify(adapter, txo->cq.id, true, work_done);
2715 atomic_sub(num_wrbs, &txo->q.used);
3c8def97 2716
10ef9ab4
SP
2717 /* As Tx wrbs have been freed up, wake up netdev queue
2718 * if it was stopped due to lack of tx wrbs. */
2719 if (__netif_subqueue_stopped(adapter->netdev, idx) &&
cf5671e6 2720 be_can_txq_wake(txo)) {
10ef9ab4 2721 netif_wake_subqueue(adapter->netdev, idx);
3c8def97 2722 }
10ef9ab4
SP
2723
2724 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
2725 tx_stats(txo)->tx_compl += work_done;
2726 u64_stats_update_end(&tx_stats(txo)->sync_compl);
6b7c5b94 2727 }
10ef9ab4 2728}
6b7c5b94 2729
f7062ee5
SP
2730#ifdef CONFIG_NET_RX_BUSY_POLL
2731static inline bool be_lock_napi(struct be_eq_obj *eqo)
2732{
2733 bool status = true;
2734
2735 spin_lock(&eqo->lock); /* BH is already disabled */
2736 if (eqo->state & BE_EQ_LOCKED) {
2737 WARN_ON(eqo->state & BE_EQ_NAPI);
2738 eqo->state |= BE_EQ_NAPI_YIELD;
2739 status = false;
2740 } else {
2741 eqo->state = BE_EQ_NAPI;
2742 }
2743 spin_unlock(&eqo->lock);
2744 return status;
2745}
2746
2747static inline void be_unlock_napi(struct be_eq_obj *eqo)
2748{
2749 spin_lock(&eqo->lock); /* BH is already disabled */
2750
2751 WARN_ON(eqo->state & (BE_EQ_POLL | BE_EQ_NAPI_YIELD));
2752 eqo->state = BE_EQ_IDLE;
2753
2754 spin_unlock(&eqo->lock);
2755}
2756
2757static inline bool be_lock_busy_poll(struct be_eq_obj *eqo)
2758{
2759 bool status = true;
2760
2761 spin_lock_bh(&eqo->lock);
2762 if (eqo->state & BE_EQ_LOCKED) {
2763 eqo->state |= BE_EQ_POLL_YIELD;
2764 status = false;
2765 } else {
2766 eqo->state |= BE_EQ_POLL;
2767 }
2768 spin_unlock_bh(&eqo->lock);
2769 return status;
2770}
2771
2772static inline void be_unlock_busy_poll(struct be_eq_obj *eqo)
2773{
2774 spin_lock_bh(&eqo->lock);
2775
2776 WARN_ON(eqo->state & (BE_EQ_NAPI));
2777 eqo->state = BE_EQ_IDLE;
2778
2779 spin_unlock_bh(&eqo->lock);
2780}
2781
2782static inline void be_enable_busy_poll(struct be_eq_obj *eqo)
2783{
2784 spin_lock_init(&eqo->lock);
2785 eqo->state = BE_EQ_IDLE;
2786}
2787
2788static inline void be_disable_busy_poll(struct be_eq_obj *eqo)
2789{
2790 local_bh_disable();
2791
2792 /* It's enough to just acquire napi lock on the eqo to stop
2793 * be_busy_poll() from processing any queueus.
2794 */
2795 while (!be_lock_napi(eqo))
2796 mdelay(1);
2797
2798 local_bh_enable();
2799}
2800
2801#else /* CONFIG_NET_RX_BUSY_POLL */
2802
2803static inline bool be_lock_napi(struct be_eq_obj *eqo)
2804{
2805 return true;
2806}
2807
2808static inline void be_unlock_napi(struct be_eq_obj *eqo)
2809{
2810}
2811
2812static inline bool be_lock_busy_poll(struct be_eq_obj *eqo)
2813{
2814 return false;
2815}
2816
2817static inline void be_unlock_busy_poll(struct be_eq_obj *eqo)
2818{
2819}
2820
2821static inline void be_enable_busy_poll(struct be_eq_obj *eqo)
2822{
2823}
2824
2825static inline void be_disable_busy_poll(struct be_eq_obj *eqo)
2826{
2827}
2828#endif /* CONFIG_NET_RX_BUSY_POLL */
2829
68d7bdcb 2830int be_poll(struct napi_struct *napi, int budget)
10ef9ab4
SP
2831{
2832 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2833 struct be_adapter *adapter = eqo->adapter;
0b545a62 2834 int max_work = 0, work, i, num_evts;
6384a4d0 2835 struct be_rx_obj *rxo;
a4906ea0 2836 struct be_tx_obj *txo;
f31e50a8 2837
0b545a62
SP
2838 num_evts = events_get(eqo);
2839
a4906ea0
SP
2840 for_all_tx_queues_on_eq(adapter, eqo, txo, i)
2841 be_process_tx(adapter, txo, i);
f31e50a8 2842
6384a4d0
SP
2843 if (be_lock_napi(eqo)) {
2844 /* This loop will iterate twice for EQ0 in which
2845 * completions of the last RXQ (default one) are also processed
2846 * For other EQs the loop iterates only once
2847 */
2848 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
2849 work = be_process_rx(rxo, napi, budget, NAPI_POLLING);
2850 max_work = max(work, max_work);
2851 }
2852 be_unlock_napi(eqo);
2853 } else {
2854 max_work = budget;
10ef9ab4 2855 }
6b7c5b94 2856
10ef9ab4
SP
2857 if (is_mcc_eqo(eqo))
2858 be_process_mcc(adapter);
93c86700 2859
10ef9ab4
SP
2860 if (max_work < budget) {
2861 napi_complete(napi);
0b545a62 2862 be_eq_notify(adapter, eqo->q.id, true, false, num_evts);
10ef9ab4
SP
2863 } else {
2864 /* As we'll continue in polling mode, count and clear events */
0b545a62 2865 be_eq_notify(adapter, eqo->q.id, false, false, num_evts);
93c86700 2866 }
10ef9ab4 2867 return max_work;
6b7c5b94
SP
2868}
2869
6384a4d0
SP
2870#ifdef CONFIG_NET_RX_BUSY_POLL
2871static int be_busy_poll(struct napi_struct *napi)
2872{
2873 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2874 struct be_adapter *adapter = eqo->adapter;
2875 struct be_rx_obj *rxo;
2876 int i, work = 0;
2877
2878 if (!be_lock_busy_poll(eqo))
2879 return LL_FLUSH_BUSY;
2880
2881 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
2882 work = be_process_rx(rxo, napi, 4, BUSY_POLLING);
2883 if (work)
2884 break;
2885 }
2886
2887 be_unlock_busy_poll(eqo);
2888 return work;
2889}
2890#endif
2891
f67ef7ba 2892void be_detect_error(struct be_adapter *adapter)
7c185276 2893{
e1cfb67a
PR
2894 u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
2895 u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
7c185276 2896 u32 i;
eb0eecc1
SK
2897 bool error_detected = false;
2898 struct device *dev = &adapter->pdev->dev;
2899 struct net_device *netdev = adapter->netdev;
7c185276 2900
d23e946c 2901 if (be_hw_error(adapter))
72f02485
SP
2902 return;
2903
e1cfb67a
PR
2904 if (lancer_chip(adapter)) {
2905 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2906 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2907 sliport_err1 = ioread32(adapter->db +
748b539a 2908 SLIPORT_ERROR1_OFFSET);
e1cfb67a 2909 sliport_err2 = ioread32(adapter->db +
748b539a 2910 SLIPORT_ERROR2_OFFSET);
eb0eecc1 2911 adapter->hw_error = true;
d0e1b319 2912 error_detected = true;
eb0eecc1
SK
2913 /* Do not log error messages if its a FW reset */
2914 if (sliport_err1 == SLIPORT_ERROR_FW_RESET1 &&
2915 sliport_err2 == SLIPORT_ERROR_FW_RESET2) {
2916 dev_info(dev, "Firmware update in progress\n");
2917 } else {
eb0eecc1
SK
2918 dev_err(dev, "Error detected in the card\n");
2919 dev_err(dev, "ERR: sliport status 0x%x\n",
2920 sliport_status);
2921 dev_err(dev, "ERR: sliport error1 0x%x\n",
2922 sliport_err1);
2923 dev_err(dev, "ERR: sliport error2 0x%x\n",
2924 sliport_err2);
2925 }
e1cfb67a
PR
2926 }
2927 } else {
25848c90
SR
2928 ue_lo = ioread32(adapter->pcicfg + PCICFG_UE_STATUS_LOW);
2929 ue_hi = ioread32(adapter->pcicfg + PCICFG_UE_STATUS_HIGH);
2930 ue_lo_mask = ioread32(adapter->pcicfg +
2931 PCICFG_UE_STATUS_LOW_MASK);
2932 ue_hi_mask = ioread32(adapter->pcicfg +
2933 PCICFG_UE_STATUS_HI_MASK);
e1cfb67a 2934
f67ef7ba
PR
2935 ue_lo = (ue_lo & ~ue_lo_mask);
2936 ue_hi = (ue_hi & ~ue_hi_mask);
7c185276 2937
eb0eecc1
SK
2938 /* On certain platforms BE hardware can indicate spurious UEs.
2939 * Allow HW to stop working completely in case of a real UE.
2940 * Hence not setting the hw_error for UE detection.
2941 */
f67ef7ba 2942
eb0eecc1
SK
2943 if (ue_lo || ue_hi) {
2944 error_detected = true;
2945 dev_err(dev,
2946 "Unrecoverable Error detected in the adapter");
2947 dev_err(dev, "Please reboot server to recover");
2948 if (skyhawk_chip(adapter))
2949 adapter->hw_error = true;
2950 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
2951 if (ue_lo & 1)
2952 dev_err(dev, "UE: %s bit set\n",
2953 ue_status_low_desc[i]);
2954 }
2955 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
2956 if (ue_hi & 1)
2957 dev_err(dev, "UE: %s bit set\n",
2958 ue_status_hi_desc[i]);
2959 }
7c185276
AK
2960 }
2961 }
eb0eecc1
SK
2962 if (error_detected)
2963 netif_carrier_off(netdev);
7c185276
AK
2964}
2965
8d56ff11
SP
2966static void be_msix_disable(struct be_adapter *adapter)
2967{
ac6a0c4a 2968 if (msix_enabled(adapter)) {
8d56ff11 2969 pci_disable_msix(adapter->pdev);
ac6a0c4a 2970 adapter->num_msix_vec = 0;
68d7bdcb 2971 adapter->num_msix_roce_vec = 0;
3abcdeda
SP
2972 }
2973}
2974
c2bba3df 2975static int be_msix_enable(struct be_adapter *adapter)
6b7c5b94 2976{
7dc4c064 2977 int i, num_vec;
d379142b 2978 struct device *dev = &adapter->pdev->dev;
6b7c5b94 2979
92bf14ab
SP
2980 /* If RoCE is supported, program the max number of NIC vectors that
2981 * may be configured via set-channels, along with vectors needed for
2982 * RoCe. Else, just program the number we'll use initially.
2983 */
2984 if (be_roce_supported(adapter))
2985 num_vec = min_t(int, 2 * be_max_eqs(adapter),
2986 2 * num_online_cpus());
2987 else
2988 num_vec = adapter->cfg_num_qs;
3abcdeda 2989
ac6a0c4a 2990 for (i = 0; i < num_vec; i++)
6b7c5b94
SP
2991 adapter->msix_entries[i].entry = i;
2992
7dc4c064
AG
2993 num_vec = pci_enable_msix_range(adapter->pdev, adapter->msix_entries,
2994 MIN_MSIX_VECTORS, num_vec);
2995 if (num_vec < 0)
2996 goto fail;
92bf14ab 2997
92bf14ab
SP
2998 if (be_roce_supported(adapter) && num_vec > MIN_MSIX_VECTORS) {
2999 adapter->num_msix_roce_vec = num_vec / 2;
3000 dev_info(dev, "enabled %d MSI-x vector(s) for RoCE\n",
3001 adapter->num_msix_roce_vec);
3002 }
3003
3004 adapter->num_msix_vec = num_vec - adapter->num_msix_roce_vec;
3005
3006 dev_info(dev, "enabled %d MSI-x vector(s) for NIC\n",
3007 adapter->num_msix_vec);
c2bba3df 3008 return 0;
7dc4c064
AG
3009
3010fail:
3011 dev_warn(dev, "MSIx enable failed\n");
3012
3013 /* INTx is not supported in VFs, so fail probe if enable_msix fails */
3014 if (!be_physfn(adapter))
3015 return num_vec;
3016 return 0;
6b7c5b94
SP
3017}
3018
fe6d2a38 3019static inline int be_msix_vec_get(struct be_adapter *adapter,
748b539a 3020 struct be_eq_obj *eqo)
b628bde2 3021{
f2f781a7 3022 return adapter->msix_entries[eqo->msix_idx].vector;
b628bde2 3023}
6b7c5b94 3024
b628bde2
SP
3025static int be_msix_register(struct be_adapter *adapter)
3026{
10ef9ab4
SP
3027 struct net_device *netdev = adapter->netdev;
3028 struct be_eq_obj *eqo;
3029 int status, i, vec;
6b7c5b94 3030
10ef9ab4
SP
3031 for_all_evt_queues(adapter, eqo, i) {
3032 sprintf(eqo->desc, "%s-q%d", netdev->name, i);
3033 vec = be_msix_vec_get(adapter, eqo);
3034 status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
3abcdeda
SP
3035 if (status)
3036 goto err_msix;
d658d98a
PR
3037
3038 irq_set_affinity_hint(vec, eqo->affinity_mask);
3abcdeda 3039 }
b628bde2 3040
6b7c5b94 3041 return 0;
3abcdeda 3042err_msix:
10ef9ab4
SP
3043 for (i--, eqo = &adapter->eq_obj[i]; i >= 0; i--, eqo--)
3044 free_irq(be_msix_vec_get(adapter, eqo), eqo);
3045 dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
748b539a 3046 status);
ac6a0c4a 3047 be_msix_disable(adapter);
6b7c5b94
SP
3048 return status;
3049}
3050
3051static int be_irq_register(struct be_adapter *adapter)
3052{
3053 struct net_device *netdev = adapter->netdev;
3054 int status;
3055
ac6a0c4a 3056 if (msix_enabled(adapter)) {
6b7c5b94
SP
3057 status = be_msix_register(adapter);
3058 if (status == 0)
3059 goto done;
ba343c77
SB
3060 /* INTx is not supported for VF */
3061 if (!be_physfn(adapter))
3062 return status;
6b7c5b94
SP
3063 }
3064
e49cc34f 3065 /* INTx: only the first EQ is used */
6b7c5b94
SP
3066 netdev->irq = adapter->pdev->irq;
3067 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
e49cc34f 3068 &adapter->eq_obj[0]);
6b7c5b94
SP
3069 if (status) {
3070 dev_err(&adapter->pdev->dev,
3071 "INTx request IRQ failed - err %d\n", status);
3072 return status;
3073 }
3074done:
3075 adapter->isr_registered = true;
3076 return 0;
3077}
3078
3079static void be_irq_unregister(struct be_adapter *adapter)
3080{
3081 struct net_device *netdev = adapter->netdev;
10ef9ab4 3082 struct be_eq_obj *eqo;
d658d98a 3083 int i, vec;
6b7c5b94
SP
3084
3085 if (!adapter->isr_registered)
3086 return;
3087
3088 /* INTx */
ac6a0c4a 3089 if (!msix_enabled(adapter)) {
e49cc34f 3090 free_irq(netdev->irq, &adapter->eq_obj[0]);
6b7c5b94
SP
3091 goto done;
3092 }
3093
3094 /* MSIx */
d658d98a
PR
3095 for_all_evt_queues(adapter, eqo, i) {
3096 vec = be_msix_vec_get(adapter, eqo);
3097 irq_set_affinity_hint(vec, NULL);
3098 free_irq(vec, eqo);
3099 }
3abcdeda 3100
6b7c5b94
SP
3101done:
3102 adapter->isr_registered = false;
6b7c5b94
SP
3103}
3104
10ef9ab4 3105static void be_rx_qs_destroy(struct be_adapter *adapter)
482c9e79
SP
3106{
3107 struct be_queue_info *q;
3108 struct be_rx_obj *rxo;
3109 int i;
3110
3111 for_all_rx_queues(adapter, rxo, i) {
3112 q = &rxo->q;
3113 if (q->created) {
3114 be_cmd_rxq_destroy(adapter, q);
10ef9ab4 3115 be_rx_cq_clean(rxo);
482c9e79 3116 }
10ef9ab4 3117 be_queue_free(adapter, q);
482c9e79
SP
3118 }
3119}
3120
889cd4b2
SP
3121static int be_close(struct net_device *netdev)
3122{
3123 struct be_adapter *adapter = netdev_priv(netdev);
10ef9ab4
SP
3124 struct be_eq_obj *eqo;
3125 int i;
889cd4b2 3126
e1ad8e33
KA
3127 /* This protection is needed as be_close() may be called even when the
3128 * adapter is in cleared state (after eeh perm failure)
3129 */
3130 if (!(adapter->flags & BE_FLAGS_SETUP_DONE))
3131 return 0;
3132
045508a8
PP
3133 be_roce_dev_close(adapter);
3134
dff345c5
IV
3135 if (adapter->flags & BE_FLAGS_NAPI_ENABLED) {
3136 for_all_evt_queues(adapter, eqo, i) {
04d3d624 3137 napi_disable(&eqo->napi);
6384a4d0
SP
3138 be_disable_busy_poll(eqo);
3139 }
71237b6f 3140 adapter->flags &= ~BE_FLAGS_NAPI_ENABLED;
04d3d624 3141 }
a323d9bf
SP
3142
3143 be_async_mcc_disable(adapter);
3144
3145 /* Wait for all pending tx completions to arrive so that
3146 * all tx skbs are freed.
3147 */
fba87559 3148 netif_tx_disable(netdev);
6e1f9975 3149 be_tx_compl_clean(adapter);
a323d9bf
SP
3150
3151 be_rx_qs_destroy(adapter);
f66b7cfd 3152 be_clear_uc_list(adapter);
d11a347d 3153
a323d9bf 3154 for_all_evt_queues(adapter, eqo, i) {
10ef9ab4
SP
3155 if (msix_enabled(adapter))
3156 synchronize_irq(be_msix_vec_get(adapter, eqo));
3157 else
3158 synchronize_irq(netdev->irq);
3159 be_eq_clean(eqo);
63fcb27f
PR
3160 }
3161
889cd4b2
SP
3162 be_irq_unregister(adapter);
3163
482c9e79
SP
3164 return 0;
3165}
3166
10ef9ab4 3167static int be_rx_qs_create(struct be_adapter *adapter)
482c9e79 3168{
1dcf7b1c
ED
3169 struct rss_info *rss = &adapter->rss_info;
3170 u8 rss_key[RSS_HASH_KEY_LEN];
482c9e79 3171 struct be_rx_obj *rxo;
e9008ee9 3172 int rc, i, j;
482c9e79
SP
3173
3174 for_all_rx_queues(adapter, rxo, i) {
10ef9ab4
SP
3175 rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
3176 sizeof(struct be_eth_rx_d));
3177 if (rc)
3178 return rc;
3179 }
3180
71bb8bd0
VV
3181 if (adapter->need_def_rxq || !adapter->num_rss_qs) {
3182 rxo = default_rxo(adapter);
3183 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
3184 rx_frag_size, adapter->if_handle,
3185 false, &rxo->rss_id);
3186 if (rc)
3187 return rc;
3188 }
10ef9ab4
SP
3189
3190 for_all_rss_queues(adapter, rxo, i) {
482c9e79 3191 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
10ef9ab4
SP
3192 rx_frag_size, adapter->if_handle,
3193 true, &rxo->rss_id);
482c9e79
SP
3194 if (rc)
3195 return rc;
3196 }
3197
3198 if (be_multi_rxq(adapter)) {
71bb8bd0 3199 for (j = 0; j < RSS_INDIR_TABLE_LEN; j += adapter->num_rss_qs) {
e9008ee9 3200 for_all_rss_queues(adapter, rxo, i) {
e2557877 3201 if ((j + i) >= RSS_INDIR_TABLE_LEN)
e9008ee9 3202 break;
e2557877
VD
3203 rss->rsstable[j + i] = rxo->rss_id;
3204 rss->rss_queue[j + i] = i;
e9008ee9
PR
3205 }
3206 }
e2557877
VD
3207 rss->rss_flags = RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_IPV4 |
3208 RSS_ENABLE_TCP_IPV6 | RSS_ENABLE_IPV6;
594ad54a
SR
3209
3210 if (!BEx_chip(adapter))
e2557877
VD
3211 rss->rss_flags |= RSS_ENABLE_UDP_IPV4 |
3212 RSS_ENABLE_UDP_IPV6;
da1388d6
VV
3213 } else {
3214 /* Disable RSS, if only default RX Q is created */
e2557877 3215 rss->rss_flags = RSS_ENABLE_NONE;
da1388d6 3216 }
594ad54a 3217
1dcf7b1c 3218 netdev_rss_key_fill(rss_key, RSS_HASH_KEY_LEN);
748b539a 3219 rc = be_cmd_rss_config(adapter, rss->rsstable, rss->rss_flags,
1dcf7b1c 3220 128, rss_key);
da1388d6 3221 if (rc) {
e2557877 3222 rss->rss_flags = RSS_ENABLE_NONE;
da1388d6 3223 return rc;
482c9e79
SP
3224 }
3225
1dcf7b1c 3226 memcpy(rss->rss_hkey, rss_key, RSS_HASH_KEY_LEN);
e2557877 3227
482c9e79 3228 /* First time posting */
10ef9ab4 3229 for_all_rx_queues(adapter, rxo, i)
c30d7266 3230 be_post_rx_frags(rxo, GFP_KERNEL, MAX_RX_POST);
889cd4b2
SP
3231 return 0;
3232}
3233
6b7c5b94
SP
3234static int be_open(struct net_device *netdev)
3235{
3236 struct be_adapter *adapter = netdev_priv(netdev);
10ef9ab4 3237 struct be_eq_obj *eqo;
3abcdeda 3238 struct be_rx_obj *rxo;
10ef9ab4 3239 struct be_tx_obj *txo;
b236916a 3240 u8 link_status;
3abcdeda 3241 int status, i;
5fb379ee 3242
10ef9ab4 3243 status = be_rx_qs_create(adapter);
482c9e79
SP
3244 if (status)
3245 goto err;
3246
c2bba3df
SK
3247 status = be_irq_register(adapter);
3248 if (status)
3249 goto err;
5fb379ee 3250
10ef9ab4 3251 for_all_rx_queues(adapter, rxo, i)
3abcdeda 3252 be_cq_notify(adapter, rxo->cq.id, true, 0);
5fb379ee 3253
10ef9ab4
SP
3254 for_all_tx_queues(adapter, txo, i)
3255 be_cq_notify(adapter, txo->cq.id, true, 0);
3256
7a1e9b20
SP
3257 be_async_mcc_enable(adapter);
3258
10ef9ab4
SP
3259 for_all_evt_queues(adapter, eqo, i) {
3260 napi_enable(&eqo->napi);
6384a4d0 3261 be_enable_busy_poll(eqo);
4cad9f3b 3262 be_eq_notify(adapter, eqo->q.id, true, true, 0);
10ef9ab4 3263 }
04d3d624 3264 adapter->flags |= BE_FLAGS_NAPI_ENABLED;
10ef9ab4 3265
323ff71e 3266 status = be_cmd_link_status_query(adapter, NULL, &link_status, 0);
b236916a
AK
3267 if (!status)
3268 be_link_status_update(adapter, link_status);
3269
fba87559 3270 netif_tx_start_all_queues(netdev);
045508a8 3271 be_roce_dev_open(adapter);
c9c47142 3272
c5abe7c0 3273#ifdef CONFIG_BE2NET_VXLAN
c9c47142
SP
3274 if (skyhawk_chip(adapter))
3275 vxlan_get_rx_port(netdev);
c5abe7c0
SP
3276#endif
3277
889cd4b2
SP
3278 return 0;
3279err:
3280 be_close(adapter->netdev);
3281 return -EIO;
5fb379ee
SP
3282}
3283
71d8d1b5
AK
3284static int be_setup_wol(struct be_adapter *adapter, bool enable)
3285{
3286 struct be_dma_mem cmd;
3287 int status = 0;
3288 u8 mac[ETH_ALEN];
3289
c7bf7169 3290 eth_zero_addr(mac);
71d8d1b5
AK
3291
3292 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
ede23fa8
JP
3293 cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
3294 GFP_KERNEL);
ddf1169f 3295 if (!cmd.va)
6b568689 3296 return -ENOMEM;
71d8d1b5
AK
3297
3298 if (enable) {
3299 status = pci_write_config_dword(adapter->pdev,
748b539a
SP
3300 PCICFG_PM_CONTROL_OFFSET,
3301 PCICFG_PM_CONTROL_MASK);
71d8d1b5
AK
3302 if (status) {
3303 dev_err(&adapter->pdev->dev,
2381a55c 3304 "Could not enable Wake-on-lan\n");
2b7bcebf
IV
3305 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
3306 cmd.dma);
71d8d1b5
AK
3307 return status;
3308 }
3309 status = be_cmd_enable_magic_wol(adapter,
748b539a
SP
3310 adapter->netdev->dev_addr,
3311 &cmd);
71d8d1b5
AK
3312 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
3313 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
3314 } else {
3315 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
3316 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
3317 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
3318 }
3319
2b7bcebf 3320 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
71d8d1b5
AK
3321 return status;
3322}
3323
f7062ee5
SP
3324static void be_vf_eth_addr_generate(struct be_adapter *adapter, u8 *mac)
3325{
3326 u32 addr;
3327
3328 addr = jhash(adapter->netdev->dev_addr, ETH_ALEN, 0);
3329
3330 mac[5] = (u8)(addr & 0xFF);
3331 mac[4] = (u8)((addr >> 8) & 0xFF);
3332 mac[3] = (u8)((addr >> 16) & 0xFF);
3333 /* Use the OUI from the current MAC address */
3334 memcpy(mac, adapter->netdev->dev_addr, 3);
3335}
3336
6d87f5c3
AK
3337/*
3338 * Generate a seed MAC address from the PF MAC Address using jhash.
3339 * MAC Address for VFs are assigned incrementally starting from the seed.
3340 * These addresses are programmed in the ASIC by the PF and the VF driver
3341 * queries for the MAC address during its probe.
3342 */
4c876616 3343static int be_vf_eth_addr_config(struct be_adapter *adapter)
6d87f5c3 3344{
f9449ab7 3345 u32 vf;
3abcdeda 3346 int status = 0;
6d87f5c3 3347 u8 mac[ETH_ALEN];
11ac75ed 3348 struct be_vf_cfg *vf_cfg;
6d87f5c3
AK
3349
3350 be_vf_eth_addr_generate(adapter, mac);
3351
11ac75ed 3352 for_all_vfs(adapter, vf_cfg, vf) {
3175d8c2 3353 if (BEx_chip(adapter))
590c391d 3354 status = be_cmd_pmac_add(adapter, mac,
11ac75ed
SP
3355 vf_cfg->if_handle,
3356 &vf_cfg->pmac_id, vf + 1);
3175d8c2
SP
3357 else
3358 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
3359 vf + 1);
590c391d 3360
6d87f5c3
AK
3361 if (status)
3362 dev_err(&adapter->pdev->dev,
748b539a
SP
3363 "Mac address assignment failed for VF %d\n",
3364 vf);
6d87f5c3 3365 else
11ac75ed 3366 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
6d87f5c3
AK
3367
3368 mac[5] += 1;
3369 }
3370 return status;
3371}
3372
4c876616
SP
3373static int be_vfs_mac_query(struct be_adapter *adapter)
3374{
3375 int status, vf;
3376 u8 mac[ETH_ALEN];
3377 struct be_vf_cfg *vf_cfg;
4c876616
SP
3378
3379 for_all_vfs(adapter, vf_cfg, vf) {
b188f090
SR
3380 status = be_cmd_get_active_mac(adapter, vf_cfg->pmac_id,
3381 mac, vf_cfg->if_handle,
3382 false, vf+1);
4c876616
SP
3383 if (status)
3384 return status;
3385 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
3386 }
3387 return 0;
3388}
3389
f9449ab7 3390static void be_vf_clear(struct be_adapter *adapter)
6d87f5c3 3391{
11ac75ed 3392 struct be_vf_cfg *vf_cfg;
6d87f5c3
AK
3393 u32 vf;
3394
257a3feb 3395 if (pci_vfs_assigned(adapter->pdev)) {
4c876616
SP
3396 dev_warn(&adapter->pdev->dev,
3397 "VFs are assigned to VMs: not disabling VFs\n");
39f1d94d
SP
3398 goto done;
3399 }
3400
b4c1df93
SP
3401 pci_disable_sriov(adapter->pdev);
3402
11ac75ed 3403 for_all_vfs(adapter, vf_cfg, vf) {
3175d8c2 3404 if (BEx_chip(adapter))
11ac75ed
SP
3405 be_cmd_pmac_del(adapter, vf_cfg->if_handle,
3406 vf_cfg->pmac_id, vf + 1);
3175d8c2
SP
3407 else
3408 be_cmd_set_mac(adapter, NULL, vf_cfg->if_handle,
3409 vf + 1);
f9449ab7 3410
11ac75ed
SP
3411 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
3412 }
39f1d94d
SP
3413done:
3414 kfree(adapter->vf_cfg);
3415 adapter->num_vfs = 0;
f174c7ec 3416 adapter->flags &= ~BE_FLAGS_SRIOV_ENABLED;
6d87f5c3
AK
3417}
3418
7707133c
SP
3419static void be_clear_queues(struct be_adapter *adapter)
3420{
3421 be_mcc_queues_destroy(adapter);
3422 be_rx_cqs_destroy(adapter);
3423 be_tx_queues_destroy(adapter);
3424 be_evt_queues_destroy(adapter);
3425}
3426
68d7bdcb 3427static void be_cancel_worker(struct be_adapter *adapter)
a54769f5 3428{
191eb756
SP
3429 if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) {
3430 cancel_delayed_work_sync(&adapter->work);
3431 adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED;
3432 }
68d7bdcb
SP
3433}
3434
eb7dd46c
SP
3435static void be_cancel_err_detection(struct be_adapter *adapter)
3436{
3437 if (adapter->flags & BE_FLAGS_ERR_DETECTION_SCHEDULED) {
3438 cancel_delayed_work_sync(&adapter->be_err_detection_work);
3439 adapter->flags &= ~BE_FLAGS_ERR_DETECTION_SCHEDULED;
3440 }
3441}
3442
b05004ad 3443static void be_mac_clear(struct be_adapter *adapter)
68d7bdcb 3444{
b05004ad 3445 if (adapter->pmac_id) {
f66b7cfd
SP
3446 be_cmd_pmac_del(adapter, adapter->if_handle,
3447 adapter->pmac_id[0], 0);
b05004ad
SK
3448 kfree(adapter->pmac_id);
3449 adapter->pmac_id = NULL;
3450 }
3451}
3452
c5abe7c0 3453#ifdef CONFIG_BE2NET_VXLAN
c9c47142
SP
3454static void be_disable_vxlan_offloads(struct be_adapter *adapter)
3455{
630f4b70
SB
3456 struct net_device *netdev = adapter->netdev;
3457
c9c47142
SP
3458 if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS)
3459 be_cmd_manage_iface(adapter, adapter->if_handle,
3460 OP_CONVERT_TUNNEL_TO_NORMAL);
3461
3462 if (adapter->vxlan_port)
3463 be_cmd_set_vxlan_port(adapter, 0);
3464
3465 adapter->flags &= ~BE_FLAGS_VXLAN_OFFLOADS;
3466 adapter->vxlan_port = 0;
630f4b70
SB
3467
3468 netdev->hw_enc_features = 0;
3469 netdev->hw_features &= ~(NETIF_F_GSO_UDP_TUNNEL);
ac9a3d84 3470 netdev->features &= ~(NETIF_F_GSO_UDP_TUNNEL);
c9c47142 3471}
c5abe7c0 3472#endif
c9c47142 3473
f2858738
VV
3474static u16 be_calculate_vf_qs(struct be_adapter *adapter, u16 num_vfs)
3475{
3476 struct be_resources res = adapter->pool_res;
3477 u16 num_vf_qs = 1;
3478
3479 /* Distribute the queue resources equally among the PF and it's VFs
3480 * Do not distribute queue resources in multi-channel configuration.
3481 */
3482 if (num_vfs && !be_is_mc(adapter)) {
3483 /* If number of VFs requested is 8 less than max supported,
3484 * assign 8 queue pairs to the PF and divide the remaining
3485 * resources evenly among the VFs
3486 */
3487 if (num_vfs < (be_max_vfs(adapter) - 8))
3488 num_vf_qs = (res.max_rss_qs - 8) / num_vfs;
3489 else
3490 num_vf_qs = res.max_rss_qs / num_vfs;
3491
3492 /* Skyhawk-R chip supports only MAX_RSS_IFACES RSS capable
3493 * interfaces per port. Provide RSS on VFs, only if number
3494 * of VFs requested is less than MAX_RSS_IFACES limit.
3495 */
3496 if (num_vfs >= MAX_RSS_IFACES)
3497 num_vf_qs = 1;
3498 }
3499 return num_vf_qs;
3500}
3501
b05004ad
SK
3502static int be_clear(struct be_adapter *adapter)
3503{
f2858738
VV
3504 struct pci_dev *pdev = adapter->pdev;
3505 u16 num_vf_qs;
3506
68d7bdcb 3507 be_cancel_worker(adapter);
191eb756 3508
11ac75ed 3509 if (sriov_enabled(adapter))
f9449ab7
SP
3510 be_vf_clear(adapter);
3511
bec84e6b
VV
3512 /* Re-configure FW to distribute resources evenly across max-supported
3513 * number of VFs, only when VFs are not already enabled.
3514 */
ace40aff
VV
3515 if (skyhawk_chip(adapter) && be_physfn(adapter) &&
3516 !pci_vfs_assigned(pdev)) {
f2858738
VV
3517 num_vf_qs = be_calculate_vf_qs(adapter,
3518 pci_sriov_get_totalvfs(pdev));
bec84e6b 3519 be_cmd_set_sriov_config(adapter, adapter->pool_res,
f2858738
VV
3520 pci_sriov_get_totalvfs(pdev),
3521 num_vf_qs);
3522 }
bec84e6b 3523
c5abe7c0 3524#ifdef CONFIG_BE2NET_VXLAN
c9c47142 3525 be_disable_vxlan_offloads(adapter);
c5abe7c0 3526#endif
2d17f403 3527 /* delete the primary mac along with the uc-mac list */
b05004ad 3528 be_mac_clear(adapter);
fbc13f01 3529
f9449ab7 3530 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
a54769f5 3531
7707133c 3532 be_clear_queues(adapter);
a54769f5 3533
10ef9ab4 3534 be_msix_disable(adapter);
e1ad8e33 3535 adapter->flags &= ~BE_FLAGS_SETUP_DONE;
a54769f5
SP
3536 return 0;
3537}
3538
0700d816
KA
3539static int be_if_create(struct be_adapter *adapter, u32 *if_handle,
3540 u32 cap_flags, u32 vf)
3541{
3542 u32 en_flags;
0700d816
KA
3543
3544 en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
3545 BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS |
71bb8bd0 3546 BE_IF_FLAGS_RSS | BE_IF_FLAGS_DEFQ_RSS;
0700d816
KA
3547
3548 en_flags &= cap_flags;
3549
435452aa 3550 return be_cmd_if_create(adapter, cap_flags, en_flags, if_handle, vf);
0700d816
KA
3551}
3552
4c876616 3553static int be_vfs_if_create(struct be_adapter *adapter)
abb93951 3554{
92bf14ab 3555 struct be_resources res = {0};
4c876616 3556 struct be_vf_cfg *vf_cfg;
0700d816
KA
3557 u32 cap_flags, vf;
3558 int status;
abb93951 3559
0700d816 3560 /* If a FW profile exists, then cap_flags are updated */
4c876616
SP
3561 cap_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
3562 BE_IF_FLAGS_MULTICAST;
abb93951 3563
4c876616 3564 for_all_vfs(adapter, vf_cfg, vf) {
92bf14ab
SP
3565 if (!BE3_chip(adapter)) {
3566 status = be_cmd_get_profile_config(adapter, &res,
f2858738 3567 RESOURCE_LIMITS,
92bf14ab 3568 vf + 1);
435452aa 3569 if (!status) {
92bf14ab 3570 cap_flags = res.if_cap_flags;
435452aa
VV
3571 /* Prevent VFs from enabling VLAN promiscuous
3572 * mode
3573 */
3574 cap_flags &= ~BE_IF_FLAGS_VLAN_PROMISCUOUS;
3575 }
92bf14ab 3576 }
4c876616 3577
0700d816
KA
3578 status = be_if_create(adapter, &vf_cfg->if_handle,
3579 cap_flags, vf + 1);
4c876616 3580 if (status)
0700d816 3581 return status;
4c876616 3582 }
0700d816
KA
3583
3584 return 0;
abb93951
PR
3585}
3586
39f1d94d 3587static int be_vf_setup_init(struct be_adapter *adapter)
30128031 3588{
11ac75ed 3589 struct be_vf_cfg *vf_cfg;
30128031
SP
3590 int vf;
3591
39f1d94d
SP
3592 adapter->vf_cfg = kcalloc(adapter->num_vfs, sizeof(*vf_cfg),
3593 GFP_KERNEL);
3594 if (!adapter->vf_cfg)
3595 return -ENOMEM;
3596
11ac75ed
SP
3597 for_all_vfs(adapter, vf_cfg, vf) {
3598 vf_cfg->if_handle = -1;
3599 vf_cfg->pmac_id = -1;
30128031 3600 }
39f1d94d 3601 return 0;
30128031
SP
3602}
3603
f9449ab7
SP
3604static int be_vf_setup(struct be_adapter *adapter)
3605{
c502224e 3606 struct device *dev = &adapter->pdev->dev;
11ac75ed 3607 struct be_vf_cfg *vf_cfg;
4c876616 3608 int status, old_vfs, vf;
39f1d94d 3609
257a3feb 3610 old_vfs = pci_num_vf(adapter->pdev);
39f1d94d
SP
3611
3612 status = be_vf_setup_init(adapter);
3613 if (status)
3614 goto err;
30128031 3615
4c876616
SP
3616 if (old_vfs) {
3617 for_all_vfs(adapter, vf_cfg, vf) {
3618 status = be_cmd_get_if_id(adapter, vf_cfg, vf);
3619 if (status)
3620 goto err;
3621 }
f9449ab7 3622
4c876616
SP
3623 status = be_vfs_mac_query(adapter);
3624 if (status)
3625 goto err;
3626 } else {
bec84e6b
VV
3627 status = be_vfs_if_create(adapter);
3628 if (status)
3629 goto err;
3630
39f1d94d
SP
3631 status = be_vf_eth_addr_config(adapter);
3632 if (status)
3633 goto err;
3634 }
f9449ab7 3635
11ac75ed 3636 for_all_vfs(adapter, vf_cfg, vf) {
04a06028 3637 /* Allow VFs to programs MAC/VLAN filters */
435452aa
VV
3638 status = be_cmd_get_fn_privileges(adapter, &vf_cfg->privileges,
3639 vf + 1);
3640 if (!status && !(vf_cfg->privileges & BE_PRIV_FILTMGMT)) {
04a06028 3641 status = be_cmd_set_fn_privileges(adapter,
435452aa 3642 vf_cfg->privileges |
04a06028
SP
3643 BE_PRIV_FILTMGMT,
3644 vf + 1);
435452aa
VV
3645 if (!status) {
3646 vf_cfg->privileges |= BE_PRIV_FILTMGMT;
04a06028
SP
3647 dev_info(dev, "VF%d has FILTMGMT privilege\n",
3648 vf);
435452aa 3649 }
04a06028
SP
3650 }
3651
0f77ba73
RN
3652 /* Allow full available bandwidth */
3653 if (!old_vfs)
3654 be_cmd_config_qos(adapter, 0, 0, vf + 1);
f1f3ee1b 3655
bdce2ad7 3656 if (!old_vfs) {
0599863d 3657 be_cmd_enable_vf(adapter, vf + 1);
bdce2ad7
SR
3658 be_cmd_set_logical_link_config(adapter,
3659 IFLA_VF_LINK_STATE_AUTO,
3660 vf+1);
3661 }
f9449ab7 3662 }
b4c1df93
SP
3663
3664 if (!old_vfs) {
3665 status = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
3666 if (status) {
3667 dev_err(dev, "SRIOV enable failed\n");
3668 adapter->num_vfs = 0;
3669 goto err;
3670 }
3671 }
f174c7ec
VV
3672
3673 adapter->flags |= BE_FLAGS_SRIOV_ENABLED;
f9449ab7
SP
3674 return 0;
3675err:
4c876616
SP
3676 dev_err(dev, "VF setup failed\n");
3677 be_vf_clear(adapter);
f9449ab7
SP
3678 return status;
3679}
3680
f93f160b
VV
3681/* Converting function_mode bits on BE3 to SH mc_type enums */
3682
3683static u8 be_convert_mc_type(u32 function_mode)
3684{
66064dbc 3685 if (function_mode & VNIC_MODE && function_mode & QNQ_MODE)
f93f160b 3686 return vNIC1;
66064dbc 3687 else if (function_mode & QNQ_MODE)
f93f160b
VV
3688 return FLEX10;
3689 else if (function_mode & VNIC_MODE)
3690 return vNIC2;
3691 else if (function_mode & UMC_ENABLED)
3692 return UMC;
3693 else
3694 return MC_NONE;
3695}
3696
92bf14ab
SP
3697/* On BE2/BE3 FW does not suggest the supported limits */
3698static void BEx_get_resources(struct be_adapter *adapter,
3699 struct be_resources *res)
3700{
bec84e6b 3701 bool use_sriov = adapter->num_vfs ? 1 : 0;
92bf14ab
SP
3702
3703 if (be_physfn(adapter))
3704 res->max_uc_mac = BE_UC_PMAC_COUNT;
3705 else
3706 res->max_uc_mac = BE_VF_UC_PMAC_COUNT;
3707
f93f160b
VV
3708 adapter->mc_type = be_convert_mc_type(adapter->function_mode);
3709
3710 if (be_is_mc(adapter)) {
3711 /* Assuming that there are 4 channels per port,
3712 * when multi-channel is enabled
3713 */
3714 if (be_is_qnq_mode(adapter))
3715 res->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
3716 else
3717 /* In a non-qnq multichannel mode, the pvid
3718 * takes up one vlan entry
3719 */
3720 res->max_vlans = (BE_NUM_VLANS_SUPPORTED / 4) - 1;
3721 } else {
92bf14ab 3722 res->max_vlans = BE_NUM_VLANS_SUPPORTED;
f93f160b
VV
3723 }
3724
92bf14ab
SP
3725 res->max_mcast_mac = BE_MAX_MC;
3726
a5243dab
VV
3727 /* 1) For BE3 1Gb ports, FW does not support multiple TXQs
3728 * 2) Create multiple TX rings on a BE3-R multi-channel interface
3729 * *only* if it is RSS-capable.
3730 */
3731 if (BE2_chip(adapter) || use_sriov || (adapter->port_num > 1) ||
3732 !be_physfn(adapter) || (be_is_mc(adapter) &&
a28277dc 3733 !(adapter->function_caps & BE_FUNCTION_CAPS_RSS))) {
92bf14ab 3734 res->max_tx_qs = 1;
a28277dc
SR
3735 } else if (adapter->function_caps & BE_FUNCTION_CAPS_SUPER_NIC) {
3736 struct be_resources super_nic_res = {0};
3737
3738 /* On a SuperNIC profile, the driver needs to use the
3739 * GET_PROFILE_CONFIG cmd to query the per-function TXQ limits
3740 */
f2858738
VV
3741 be_cmd_get_profile_config(adapter, &super_nic_res,
3742 RESOURCE_LIMITS, 0);
a28277dc
SR
3743 /* Some old versions of BE3 FW don't report max_tx_qs value */
3744 res->max_tx_qs = super_nic_res.max_tx_qs ? : BE3_MAX_TX_QS;
3745 } else {
92bf14ab 3746 res->max_tx_qs = BE3_MAX_TX_QS;
a28277dc 3747 }
92bf14ab
SP
3748
3749 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
3750 !use_sriov && be_physfn(adapter))
3751 res->max_rss_qs = (adapter->be3_native) ?
3752 BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
3753 res->max_rx_qs = res->max_rss_qs + 1;
3754
e3dc867c 3755 if (be_physfn(adapter))
d3518e21 3756 res->max_evt_qs = (be_max_vfs(adapter) > 0) ?
e3dc867c
SR
3757 BE3_SRIOV_MAX_EVT_QS : BE3_MAX_EVT_QS;
3758 else
3759 res->max_evt_qs = 1;
92bf14ab
SP
3760
3761 res->if_cap_flags = BE_IF_CAP_FLAGS_WANT;
71bb8bd0 3762 res->if_cap_flags &= ~BE_IF_FLAGS_DEFQ_RSS;
92bf14ab
SP
3763 if (!(adapter->function_caps & BE_FUNCTION_CAPS_RSS))
3764 res->if_cap_flags &= ~BE_IF_FLAGS_RSS;
3765}
3766
30128031
SP
3767static void be_setup_init(struct be_adapter *adapter)
3768{
3769 adapter->vlan_prio_bmap = 0xff;
42f11cf2 3770 adapter->phy.link_speed = -1;
30128031
SP
3771 adapter->if_handle = -1;
3772 adapter->be3_native = false;
f66b7cfd 3773 adapter->if_flags = 0;
f25b119c
PR
3774 if (be_physfn(adapter))
3775 adapter->cmd_privileges = MAX_PRIVILEGES;
3776 else
3777 adapter->cmd_privileges = MIN_PRIVILEGES;
30128031
SP
3778}
3779
bec84e6b
VV
3780static int be_get_sriov_config(struct be_adapter *adapter)
3781{
bec84e6b 3782 struct be_resources res = {0};
d3d18312 3783 int max_vfs, old_vfs;
bec84e6b 3784
f2858738 3785 be_cmd_get_profile_config(adapter, &res, RESOURCE_LIMITS, 0);
d3d18312 3786
ace40aff 3787 /* Some old versions of BE3 FW don't report max_vfs value */
bec84e6b
VV
3788 if (BE3_chip(adapter) && !res.max_vfs) {
3789 max_vfs = pci_sriov_get_totalvfs(adapter->pdev);
3790 res.max_vfs = max_vfs > 0 ? min(MAX_VFS, max_vfs) : 0;
3791 }
3792
d3d18312 3793 adapter->pool_res = res;
bec84e6b 3794
ace40aff
VV
3795 /* If during previous unload of the driver, the VFs were not disabled,
3796 * then we cannot rely on the PF POOL limits for the TotalVFs value.
3797 * Instead use the TotalVFs value stored in the pci-dev struct.
3798 */
bec84e6b
VV
3799 old_vfs = pci_num_vf(adapter->pdev);
3800 if (old_vfs) {
ace40aff
VV
3801 dev_info(&adapter->pdev->dev, "%d VFs are already enabled\n",
3802 old_vfs);
3803
3804 adapter->pool_res.max_vfs =
3805 pci_sriov_get_totalvfs(adapter->pdev);
bec84e6b 3806 adapter->num_vfs = old_vfs;
bec84e6b
VV
3807 }
3808
3809 return 0;
3810}
3811
ace40aff
VV
3812static void be_alloc_sriov_res(struct be_adapter *adapter)
3813{
3814 int old_vfs = pci_num_vf(adapter->pdev);
3815 u16 num_vf_qs;
3816 int status;
3817
3818 be_get_sriov_config(adapter);
3819
3820 if (!old_vfs)
3821 pci_sriov_set_totalvfs(adapter->pdev, be_max_vfs(adapter));
3822
3823 /* When the HW is in SRIOV capable configuration, the PF-pool
3824 * resources are given to PF during driver load, if there are no
3825 * old VFs. This facility is not available in BE3 FW.
3826 * Also, this is done by FW in Lancer chip.
3827 */
3828 if (skyhawk_chip(adapter) && be_max_vfs(adapter) && !old_vfs) {
3829 num_vf_qs = be_calculate_vf_qs(adapter, 0);
3830 status = be_cmd_set_sriov_config(adapter, adapter->pool_res, 0,
3831 num_vf_qs);
3832 if (status)
3833 dev_err(&adapter->pdev->dev,
3834 "Failed to optimize SRIOV resources\n");
3835 }
3836}
3837
92bf14ab 3838static int be_get_resources(struct be_adapter *adapter)
abb93951 3839{
92bf14ab
SP
3840 struct device *dev = &adapter->pdev->dev;
3841 struct be_resources res = {0};
3842 int status;
abb93951 3843
92bf14ab
SP
3844 if (BEx_chip(adapter)) {
3845 BEx_get_resources(adapter, &res);
3846 adapter->res = res;
abb93951
PR
3847 }
3848
92bf14ab
SP
3849 /* For Lancer, SH etc read per-function resource limits from FW.
3850 * GET_FUNC_CONFIG returns per function guaranteed limits.
3851 * GET_PROFILE_CONFIG returns PCI-E related limits PF-pool limits
3852 */
3853 if (!BEx_chip(adapter)) {
3854 status = be_cmd_get_func_config(adapter, &res);
3855 if (status)
3856 return status;
abb93951 3857
71bb8bd0
VV
3858 /* If a deafault RXQ must be created, we'll use up one RSSQ*/
3859 if (res.max_rss_qs && res.max_rss_qs == res.max_rx_qs &&
3860 !(res.if_cap_flags & BE_IF_FLAGS_DEFQ_RSS))
3861 res.max_rss_qs -= 1;
3862
92bf14ab
SP
3863 /* If RoCE may be enabled stash away half the EQs for RoCE */
3864 if (be_roce_supported(adapter))
3865 res.max_evt_qs /= 2;
3866 adapter->res = res;
abb93951 3867 }
4c876616 3868
71bb8bd0
VV
3869 /* If FW supports RSS default queue, then skip creating non-RSS
3870 * queue for non-IP traffic.
3871 */
3872 adapter->need_def_rxq = (be_if_cap_flags(adapter) &
3873 BE_IF_FLAGS_DEFQ_RSS) ? 0 : 1;
3874
acbafeb1
SP
3875 dev_info(dev, "Max: txqs %d, rxqs %d, rss %d, eqs %d, vfs %d\n",
3876 be_max_txqs(adapter), be_max_rxqs(adapter),
3877 be_max_rss(adapter), be_max_eqs(adapter),
3878 be_max_vfs(adapter));
3879 dev_info(dev, "Max: uc-macs %d, mc-macs %d, vlans %d\n",
3880 be_max_uc(adapter), be_max_mc(adapter),
3881 be_max_vlans(adapter));
3882
ace40aff
VV
3883 /* Sanitize cfg_num_qs based on HW and platform limits */
3884 adapter->cfg_num_qs = min_t(u16, netif_get_num_default_rss_queues(),
3885 be_max_qs(adapter));
92bf14ab 3886 return 0;
abb93951
PR
3887}
3888
39f1d94d
SP
3889static int be_get_config(struct be_adapter *adapter)
3890{
6b085ba9 3891 int status, level;
542963b7 3892 u16 profile_id;
6b085ba9
SP
3893
3894 status = be_cmd_get_cntl_attributes(adapter);
3895 if (status)
3896 return status;
39f1d94d 3897
e97e3cda 3898 status = be_cmd_query_fw_cfg(adapter);
abb93951 3899 if (status)
92bf14ab 3900 return status;
abb93951 3901
6b085ba9
SP
3902 if (BEx_chip(adapter)) {
3903 level = be_cmd_get_fw_log_level(adapter);
3904 adapter->msg_enable =
3905 level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0;
3906 }
3907
3908 be_cmd_get_acpi_wol_cap(adapter);
3909
21252377
VV
3910 be_cmd_query_port_name(adapter);
3911
3912 if (be_physfn(adapter)) {
542963b7
VV
3913 status = be_cmd_get_active_profile(adapter, &profile_id);
3914 if (!status)
3915 dev_info(&adapter->pdev->dev,
3916 "Using profile 0x%x\n", profile_id);
962bcb75 3917 }
bec84e6b 3918
92bf14ab
SP
3919 status = be_get_resources(adapter);
3920 if (status)
3921 return status;
abb93951 3922
46ee9c14
RN
3923 adapter->pmac_id = kcalloc(be_max_uc(adapter),
3924 sizeof(*adapter->pmac_id), GFP_KERNEL);
92bf14ab
SP
3925 if (!adapter->pmac_id)
3926 return -ENOMEM;
abb93951 3927
92bf14ab 3928 return 0;
39f1d94d
SP
3929}
3930
95046b92
SP
3931static int be_mac_setup(struct be_adapter *adapter)
3932{
3933 u8 mac[ETH_ALEN];
3934 int status;
3935
3936 if (is_zero_ether_addr(adapter->netdev->dev_addr)) {
3937 status = be_cmd_get_perm_mac(adapter, mac);
3938 if (status)
3939 return status;
3940
3941 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
3942 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
3943 } else {
3944 /* Maybe the HW was reset; dev_addr must be re-programmed */
3945 memcpy(mac, adapter->netdev->dev_addr, ETH_ALEN);
3946 }
3947
2c7a9dc1
AK
3948 /* For BE3-R VFs, the PF programs the initial MAC address */
3949 if (!(BEx_chip(adapter) && be_virtfn(adapter)))
3950 be_cmd_pmac_add(adapter, mac, adapter->if_handle,
3951 &adapter->pmac_id[0], 0);
95046b92
SP
3952 return 0;
3953}
3954
68d7bdcb
SP
3955static void be_schedule_worker(struct be_adapter *adapter)
3956{
3957 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
3958 adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
3959}
3960
eb7dd46c
SP
3961static void be_schedule_err_detection(struct be_adapter *adapter)
3962{
3963 schedule_delayed_work(&adapter->be_err_detection_work,
3964 msecs_to_jiffies(1000));
3965 adapter->flags |= BE_FLAGS_ERR_DETECTION_SCHEDULED;
3966}
3967
7707133c 3968static int be_setup_queues(struct be_adapter *adapter)
5fb379ee 3969{
68d7bdcb 3970 struct net_device *netdev = adapter->netdev;
10ef9ab4 3971 int status;
ba343c77 3972
7707133c 3973 status = be_evt_queues_create(adapter);
abb93951
PR
3974 if (status)
3975 goto err;
73d540f2 3976
7707133c 3977 status = be_tx_qs_create(adapter);
c2bba3df
SK
3978 if (status)
3979 goto err;
10ef9ab4 3980
7707133c 3981 status = be_rx_cqs_create(adapter);
10ef9ab4 3982 if (status)
a54769f5 3983 goto err;
6b7c5b94 3984
7707133c 3985 status = be_mcc_queues_create(adapter);
10ef9ab4
SP
3986 if (status)
3987 goto err;
3988
68d7bdcb
SP
3989 status = netif_set_real_num_rx_queues(netdev, adapter->num_rx_qs);
3990 if (status)
3991 goto err;
3992
3993 status = netif_set_real_num_tx_queues(netdev, adapter->num_tx_qs);
3994 if (status)
3995 goto err;
3996
7707133c
SP
3997 return 0;
3998err:
3999 dev_err(&adapter->pdev->dev, "queue_setup failed\n");
4000 return status;
4001}
4002
68d7bdcb
SP
4003int be_update_queues(struct be_adapter *adapter)
4004{
4005 struct net_device *netdev = adapter->netdev;
4006 int status;
4007
4008 if (netif_running(netdev))
4009 be_close(netdev);
4010
4011 be_cancel_worker(adapter);
4012
4013 /* If any vectors have been shared with RoCE we cannot re-program
4014 * the MSIx table.
4015 */
4016 if (!adapter->num_msix_roce_vec)
4017 be_msix_disable(adapter);
4018
4019 be_clear_queues(adapter);
4020
4021 if (!msix_enabled(adapter)) {
4022 status = be_msix_enable(adapter);
4023 if (status)
4024 return status;
4025 }
4026
4027 status = be_setup_queues(adapter);
4028 if (status)
4029 return status;
4030
4031 be_schedule_worker(adapter);
4032
4033 if (netif_running(netdev))
4034 status = be_open(netdev);
4035
4036 return status;
4037}
4038
f7062ee5
SP
4039static inline int fw_major_num(const char *fw_ver)
4040{
4041 int fw_major = 0, i;
4042
4043 i = sscanf(fw_ver, "%d.", &fw_major);
4044 if (i != 1)
4045 return 0;
4046
4047 return fw_major;
4048}
4049
f962f840
SP
4050/* If any VFs are already enabled don't FLR the PF */
4051static bool be_reset_required(struct be_adapter *adapter)
4052{
4053 return pci_num_vf(adapter->pdev) ? false : true;
4054}
4055
4056/* Wait for the FW to be ready and perform the required initialization */
4057static int be_func_init(struct be_adapter *adapter)
4058{
4059 int status;
4060
4061 status = be_fw_wait_ready(adapter);
4062 if (status)
4063 return status;
4064
4065 if (be_reset_required(adapter)) {
4066 status = be_cmd_reset_function(adapter);
4067 if (status)
4068 return status;
4069
4070 /* Wait for interrupts to quiesce after an FLR */
4071 msleep(100);
4072
4073 /* We can clear all errors when function reset succeeds */
4074 be_clear_all_error(adapter);
4075 }
4076
4077 /* Tell FW we're ready to fire cmds */
4078 status = be_cmd_fw_init(adapter);
4079 if (status)
4080 return status;
4081
4082 /* Allow interrupts for other ULPs running on NIC function */
4083 be_intr_set(adapter, true);
4084
4085 return 0;
4086}
4087
7707133c
SP
4088static int be_setup(struct be_adapter *adapter)
4089{
4090 struct device *dev = &adapter->pdev->dev;
7707133c
SP
4091 int status;
4092
f962f840
SP
4093 status = be_func_init(adapter);
4094 if (status)
4095 return status;
4096
7707133c
SP
4097 be_setup_init(adapter);
4098
4099 if (!lancer_chip(adapter))
4100 be_cmd_req_native_mode(adapter);
4101
ace40aff
VV
4102 if (!BE2_chip(adapter) && be_physfn(adapter))
4103 be_alloc_sriov_res(adapter);
4104
7707133c 4105 status = be_get_config(adapter);
10ef9ab4 4106 if (status)
a54769f5 4107 goto err;
6b7c5b94 4108
7707133c 4109 status = be_msix_enable(adapter);
10ef9ab4 4110 if (status)
a54769f5 4111 goto err;
6b7c5b94 4112
0700d816
KA
4113 status = be_if_create(adapter, &adapter->if_handle,
4114 be_if_cap_flags(adapter), 0);
7707133c 4115 if (status)
a54769f5 4116 goto err;
6b7c5b94 4117
68d7bdcb
SP
4118 /* Updating real_num_tx/rx_queues() requires rtnl_lock() */
4119 rtnl_lock();
7707133c 4120 status = be_setup_queues(adapter);
68d7bdcb 4121 rtnl_unlock();
95046b92 4122 if (status)
1578e777
PR
4123 goto err;
4124
7707133c 4125 be_cmd_get_fn_privileges(adapter, &adapter->cmd_privileges, 0);
7707133c
SP
4126
4127 status = be_mac_setup(adapter);
10ef9ab4
SP
4128 if (status)
4129 goto err;
4130
e97e3cda 4131 be_cmd_get_fw_ver(adapter);
acbafeb1 4132 dev_info(dev, "FW version is %s\n", adapter->fw_ver);
5a56eb10 4133
e9e2a904 4134 if (BE2_chip(adapter) && fw_major_num(adapter->fw_ver) < 4) {
50762667 4135 dev_err(dev, "Firmware on card is old(%s), IRQs may not work",
e9e2a904
SK
4136 adapter->fw_ver);
4137 dev_err(dev, "Please upgrade firmware to version >= 4.0\n");
4138 }
4139
1d1e9a46 4140 if (adapter->vlans_added)
10329df8 4141 be_vid_config(adapter);
7ab8b0b4 4142
a54769f5 4143 be_set_rx_mode(adapter->netdev);
5fb379ee 4144
00d594c3
KA
4145 status = be_cmd_set_flow_control(adapter, adapter->tx_fc,
4146 adapter->rx_fc);
4147 if (status)
4148 be_cmd_get_flow_control(adapter, &adapter->tx_fc,
4149 &adapter->rx_fc);
590c391d 4150
00d594c3
KA
4151 dev_info(&adapter->pdev->dev, "HW Flow control - TX:%d RX:%d\n",
4152 adapter->tx_fc, adapter->rx_fc);
2dc1deb6 4153
bdce2ad7
SR
4154 if (be_physfn(adapter))
4155 be_cmd_set_logical_link_config(adapter,
4156 IFLA_VF_LINK_STATE_AUTO, 0);
4157
bec84e6b
VV
4158 if (adapter->num_vfs)
4159 be_vf_setup(adapter);
f9449ab7 4160
f25b119c
PR
4161 status = be_cmd_get_phy_info(adapter);
4162 if (!status && be_pause_supported(adapter))
42f11cf2
AK
4163 adapter->phy.fc_autoneg = 1;
4164
68d7bdcb 4165 be_schedule_worker(adapter);
e1ad8e33 4166 adapter->flags |= BE_FLAGS_SETUP_DONE;
f9449ab7 4167 return 0;
a54769f5
SP
4168err:
4169 be_clear(adapter);
4170 return status;
4171}
6b7c5b94 4172
66268739
IV
4173#ifdef CONFIG_NET_POLL_CONTROLLER
4174static void be_netpoll(struct net_device *netdev)
4175{
4176 struct be_adapter *adapter = netdev_priv(netdev);
10ef9ab4 4177 struct be_eq_obj *eqo;
66268739
IV
4178 int i;
4179
e49cc34f
SP
4180 for_all_evt_queues(adapter, eqo, i) {
4181 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
4182 napi_schedule(&eqo->napi);
4183 }
66268739
IV
4184}
4185#endif
4186
96c9b2e4 4187static char flash_cookie[2][16] = {"*** SE FLAS", "H DIRECTORY *** "};
fa9a6fed 4188
306f1348
SP
4189static bool phy_flashing_required(struct be_adapter *adapter)
4190{
e02cfd96 4191 return (adapter->phy.phy_type == PHY_TYPE_TN_8022 &&
42f11cf2 4192 adapter->phy.interface_type == PHY_TYPE_BASET_10GB);
306f1348
SP
4193}
4194
c165541e
PR
4195static bool is_comp_in_ufi(struct be_adapter *adapter,
4196 struct flash_section_info *fsec, int type)
4197{
4198 int i = 0, img_type = 0;
4199 struct flash_section_info_g2 *fsec_g2 = NULL;
4200
ca34fe38 4201 if (BE2_chip(adapter))
c165541e
PR
4202 fsec_g2 = (struct flash_section_info_g2 *)fsec;
4203
4204 for (i = 0; i < MAX_FLASH_COMP; i++) {
4205 if (fsec_g2)
4206 img_type = le32_to_cpu(fsec_g2->fsec_entry[i].type);
4207 else
4208 img_type = le32_to_cpu(fsec->fsec_entry[i].type);
4209
4210 if (img_type == type)
4211 return true;
4212 }
4213 return false;
4214
4215}
4216
4188e7df 4217static struct flash_section_info *get_fsec_info(struct be_adapter *adapter,
748b539a
SP
4218 int header_size,
4219 const struct firmware *fw)
c165541e
PR
4220{
4221 struct flash_section_info *fsec = NULL;
4222 const u8 *p = fw->data;
4223
4224 p += header_size;
4225 while (p < (fw->data + fw->size)) {
4226 fsec = (struct flash_section_info *)p;
4227 if (!memcmp(flash_cookie, fsec->cookie, sizeof(flash_cookie)))
4228 return fsec;
4229 p += 32;
4230 }
4231 return NULL;
4232}
4233
96c9b2e4
VV
4234static int be_check_flash_crc(struct be_adapter *adapter, const u8 *p,
4235 u32 img_offset, u32 img_size, int hdr_size,
4236 u16 img_optype, bool *crc_match)
4237{
4238 u32 crc_offset;
4239 int status;
4240 u8 crc[4];
4241
70a7b525
VV
4242 status = be_cmd_get_flash_crc(adapter, crc, img_optype, img_offset,
4243 img_size - 4);
96c9b2e4
VV
4244 if (status)
4245 return status;
4246
4247 crc_offset = hdr_size + img_offset + img_size - 4;
4248
4249 /* Skip flashing, if crc of flashed region matches */
4250 if (!memcmp(crc, p + crc_offset, 4))
4251 *crc_match = true;
4252 else
4253 *crc_match = false;
4254
4255 return status;
4256}
4257
773a2d7c 4258static int be_flash(struct be_adapter *adapter, const u8 *img,
70a7b525
VV
4259 struct be_dma_mem *flash_cmd, int optype, int img_size,
4260 u32 img_offset)
773a2d7c 4261{
70a7b525 4262 u32 flash_op, num_bytes, total_bytes = img_size, bytes_sent = 0;
773a2d7c 4263 struct be_cmd_write_flashrom *req = flash_cmd->va;
96c9b2e4 4264 int status;
773a2d7c 4265
773a2d7c
PR
4266 while (total_bytes) {
4267 num_bytes = min_t(u32, 32*1024, total_bytes);
4268
4269 total_bytes -= num_bytes;
4270
4271 if (!total_bytes) {
4272 if (optype == OPTYPE_PHY_FW)
4273 flash_op = FLASHROM_OPER_PHY_FLASH;
4274 else
4275 flash_op = FLASHROM_OPER_FLASH;
4276 } else {
4277 if (optype == OPTYPE_PHY_FW)
4278 flash_op = FLASHROM_OPER_PHY_SAVE;
4279 else
4280 flash_op = FLASHROM_OPER_SAVE;
4281 }
4282
be716446 4283 memcpy(req->data_buf, img, num_bytes);
773a2d7c
PR
4284 img += num_bytes;
4285 status = be_cmd_write_flashrom(adapter, flash_cmd, optype,
70a7b525
VV
4286 flash_op, img_offset +
4287 bytes_sent, num_bytes);
4c60005f 4288 if (base_status(status) == MCC_STATUS_ILLEGAL_REQUEST &&
96c9b2e4
VV
4289 optype == OPTYPE_PHY_FW)
4290 break;
4291 else if (status)
773a2d7c 4292 return status;
70a7b525
VV
4293
4294 bytes_sent += num_bytes;
773a2d7c
PR
4295 }
4296 return 0;
4297}
4298
0ad3157e 4299/* For BE2, BE3 and BE3-R */
ca34fe38 4300static int be_flash_BEx(struct be_adapter *adapter,
748b539a
SP
4301 const struct firmware *fw,
4302 struct be_dma_mem *flash_cmd, int num_of_images)
84517482 4303{
c165541e 4304 int img_hdrs_size = (num_of_images * sizeof(struct image_hdr));
96c9b2e4 4305 struct device *dev = &adapter->pdev->dev;
c165541e 4306 struct flash_section_info *fsec = NULL;
96c9b2e4
VV
4307 int status, i, filehdr_size, num_comp;
4308 const struct flash_comp *pflashcomp;
4309 bool crc_match;
4310 const u8 *p;
c165541e
PR
4311
4312 struct flash_comp gen3_flash_types[] = {
4313 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, OPTYPE_ISCSI_ACTIVE,
4314 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_iSCSI},
4315 { FLASH_REDBOOT_START_g3, OPTYPE_REDBOOT,
4316 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3, IMAGE_BOOT_CODE},
4317 { FLASH_iSCSI_BIOS_START_g3, OPTYPE_BIOS,
4318 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_ISCSI},
4319 { FLASH_PXE_BIOS_START_g3, OPTYPE_PXE_BIOS,
4320 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_PXE},
4321 { FLASH_FCoE_BIOS_START_g3, OPTYPE_FCOE_BIOS,
4322 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_FCoE},
4323 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, OPTYPE_ISCSI_BACKUP,
4324 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_iSCSI},
4325 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, OPTYPE_FCOE_FW_ACTIVE,
4326 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_FCoE},
4327 { FLASH_FCoE_BACKUP_IMAGE_START_g3, OPTYPE_FCOE_FW_BACKUP,
4328 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_FCoE},
4329 { FLASH_NCSI_START_g3, OPTYPE_NCSI_FW,
4330 FLASH_NCSI_IMAGE_MAX_SIZE_g3, IMAGE_NCSI},
4331 { FLASH_PHY_FW_START_g3, OPTYPE_PHY_FW,
4332 FLASH_PHY_FW_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_PHY}
3f0d4560 4333 };
c165541e
PR
4334
4335 struct flash_comp gen2_flash_types[] = {
4336 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, OPTYPE_ISCSI_ACTIVE,
4337 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_iSCSI},
4338 { FLASH_REDBOOT_START_g2, OPTYPE_REDBOOT,
4339 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2, IMAGE_BOOT_CODE},
4340 { FLASH_iSCSI_BIOS_START_g2, OPTYPE_BIOS,
4341 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_ISCSI},
4342 { FLASH_PXE_BIOS_START_g2, OPTYPE_PXE_BIOS,
4343 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_PXE},
4344 { FLASH_FCoE_BIOS_START_g2, OPTYPE_FCOE_BIOS,
4345 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_FCoE},
4346 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, OPTYPE_ISCSI_BACKUP,
4347 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_iSCSI},
4348 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, OPTYPE_FCOE_FW_ACTIVE,
4349 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_FCoE},
4350 { FLASH_FCoE_BACKUP_IMAGE_START_g2, OPTYPE_FCOE_FW_BACKUP,
4351 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_FCoE}
3f0d4560
AK
4352 };
4353
ca34fe38 4354 if (BE3_chip(adapter)) {
3f0d4560
AK
4355 pflashcomp = gen3_flash_types;
4356 filehdr_size = sizeof(struct flash_file_hdr_g3);
215faf9c 4357 num_comp = ARRAY_SIZE(gen3_flash_types);
3f0d4560
AK
4358 } else {
4359 pflashcomp = gen2_flash_types;
4360 filehdr_size = sizeof(struct flash_file_hdr_g2);
215faf9c 4361 num_comp = ARRAY_SIZE(gen2_flash_types);
5d3acd0d 4362 img_hdrs_size = 0;
84517482 4363 }
ca34fe38 4364
c165541e
PR
4365 /* Get flash section info*/
4366 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
4367 if (!fsec) {
96c9b2e4 4368 dev_err(dev, "Invalid Cookie. FW image may be corrupted\n");
c165541e
PR
4369 return -1;
4370 }
9fe96934 4371 for (i = 0; i < num_comp; i++) {
c165541e 4372 if (!is_comp_in_ufi(adapter, fsec, pflashcomp[i].img_type))
9fe96934 4373 continue;
c165541e
PR
4374
4375 if ((pflashcomp[i].optype == OPTYPE_NCSI_FW) &&
4376 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
4377 continue;
4378
773a2d7c
PR
4379 if (pflashcomp[i].optype == OPTYPE_PHY_FW &&
4380 !phy_flashing_required(adapter))
306f1348 4381 continue;
c165541e 4382
773a2d7c 4383 if (pflashcomp[i].optype == OPTYPE_REDBOOT) {
96c9b2e4
VV
4384 status = be_check_flash_crc(adapter, fw->data,
4385 pflashcomp[i].offset,
4386 pflashcomp[i].size,
4387 filehdr_size +
4388 img_hdrs_size,
4389 OPTYPE_REDBOOT, &crc_match);
4390 if (status) {
4391 dev_err(dev,
4392 "Could not get CRC for 0x%x region\n",
4393 pflashcomp[i].optype);
4394 continue;
4395 }
4396
4397 if (crc_match)
773a2d7c
PR
4398 continue;
4399 }
c165541e 4400
96c9b2e4
VV
4401 p = fw->data + filehdr_size + pflashcomp[i].offset +
4402 img_hdrs_size;
306f1348
SP
4403 if (p + pflashcomp[i].size > fw->data + fw->size)
4404 return -1;
773a2d7c
PR
4405
4406 status = be_flash(adapter, p, flash_cmd, pflashcomp[i].optype,
70a7b525 4407 pflashcomp[i].size, 0);
773a2d7c 4408 if (status) {
96c9b2e4 4409 dev_err(dev, "Flashing section type 0x%x failed\n",
773a2d7c
PR
4410 pflashcomp[i].img_type);
4411 return status;
84517482 4412 }
84517482 4413 }
84517482
AK
4414 return 0;
4415}
4416
96c9b2e4
VV
4417static u16 be_get_img_optype(struct flash_section_entry fsec_entry)
4418{
4419 u32 img_type = le32_to_cpu(fsec_entry.type);
4420 u16 img_optype = le16_to_cpu(fsec_entry.optype);
4421
4422 if (img_optype != 0xFFFF)
4423 return img_optype;
4424
4425 switch (img_type) {
4426 case IMAGE_FIRMWARE_iSCSI:
4427 img_optype = OPTYPE_ISCSI_ACTIVE;
4428 break;
4429 case IMAGE_BOOT_CODE:
4430 img_optype = OPTYPE_REDBOOT;
4431 break;
4432 case IMAGE_OPTION_ROM_ISCSI:
4433 img_optype = OPTYPE_BIOS;
4434 break;
4435 case IMAGE_OPTION_ROM_PXE:
4436 img_optype = OPTYPE_PXE_BIOS;
4437 break;
4438 case IMAGE_OPTION_ROM_FCoE:
4439 img_optype = OPTYPE_FCOE_BIOS;
4440 break;
4441 case IMAGE_FIRMWARE_BACKUP_iSCSI:
4442 img_optype = OPTYPE_ISCSI_BACKUP;
4443 break;
4444 case IMAGE_NCSI:
4445 img_optype = OPTYPE_NCSI_FW;
4446 break;
4447 case IMAGE_FLASHISM_JUMPVECTOR:
4448 img_optype = OPTYPE_FLASHISM_JUMPVECTOR;
4449 break;
4450 case IMAGE_FIRMWARE_PHY:
4451 img_optype = OPTYPE_SH_PHY_FW;
4452 break;
4453 case IMAGE_REDBOOT_DIR:
4454 img_optype = OPTYPE_REDBOOT_DIR;
4455 break;
4456 case IMAGE_REDBOOT_CONFIG:
4457 img_optype = OPTYPE_REDBOOT_CONFIG;
4458 break;
4459 case IMAGE_UFI_DIR:
4460 img_optype = OPTYPE_UFI_DIR;
4461 break;
4462 default:
4463 break;
4464 }
4465
4466 return img_optype;
4467}
4468
773a2d7c 4469static int be_flash_skyhawk(struct be_adapter *adapter,
748b539a
SP
4470 const struct firmware *fw,
4471 struct be_dma_mem *flash_cmd, int num_of_images)
3f0d4560 4472{
773a2d7c 4473 int img_hdrs_size = num_of_images * sizeof(struct image_hdr);
70a7b525 4474 bool crc_match, old_fw_img, flash_offset_support = true;
96c9b2e4 4475 struct device *dev = &adapter->pdev->dev;
773a2d7c 4476 struct flash_section_info *fsec = NULL;
96c9b2e4 4477 u32 img_offset, img_size, img_type;
70a7b525 4478 u16 img_optype, flash_optype;
96c9b2e4 4479 int status, i, filehdr_size;
96c9b2e4 4480 const u8 *p;
773a2d7c
PR
4481
4482 filehdr_size = sizeof(struct flash_file_hdr_g3);
4483 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
4484 if (!fsec) {
96c9b2e4 4485 dev_err(dev, "Invalid Cookie. FW image may be corrupted\n");
56ace3a0 4486 return -EINVAL;
773a2d7c
PR
4487 }
4488
70a7b525 4489retry_flash:
773a2d7c
PR
4490 for (i = 0; i < le32_to_cpu(fsec->fsec_hdr.num_images); i++) {
4491 img_offset = le32_to_cpu(fsec->fsec_entry[i].offset);
4492 img_size = le32_to_cpu(fsec->fsec_entry[i].pad_size);
96c9b2e4
VV
4493 img_type = le32_to_cpu(fsec->fsec_entry[i].type);
4494 img_optype = be_get_img_optype(fsec->fsec_entry[i]);
4495 old_fw_img = fsec->fsec_entry[i].optype == 0xFFFF;
773a2d7c 4496
96c9b2e4 4497 if (img_optype == 0xFFFF)
773a2d7c 4498 continue;
70a7b525
VV
4499
4500 if (flash_offset_support)
4501 flash_optype = OPTYPE_OFFSET_SPECIFIED;
4502 else
4503 flash_optype = img_optype;
4504
96c9b2e4
VV
4505 /* Don't bother verifying CRC if an old FW image is being
4506 * flashed
4507 */
4508 if (old_fw_img)
4509 goto flash;
4510
4511 status = be_check_flash_crc(adapter, fw->data, img_offset,
4512 img_size, filehdr_size +
70a7b525 4513 img_hdrs_size, flash_optype,
96c9b2e4 4514 &crc_match);
4c60005f
KA
4515 if (base_status(status) == MCC_STATUS_ILLEGAL_REQUEST ||
4516 base_status(status) == MCC_STATUS_ILLEGAL_FIELD) {
70a7b525
VV
4517 /* The current FW image on the card does not support
4518 * OFFSET based flashing. Retry using older mechanism
4519 * of OPTYPE based flashing
4520 */
4521 if (flash_optype == OPTYPE_OFFSET_SPECIFIED) {
4522 flash_offset_support = false;
4523 goto retry_flash;
4524 }
4525
4526 /* The current FW image on the card does not recognize
4527 * the new FLASH op_type. The FW download is partially
4528 * complete. Reboot the server now to enable FW image
4529 * to recognize the new FLASH op_type. To complete the
4530 * remaining process, download the same FW again after
4531 * the reboot.
4532 */
96c9b2e4
VV
4533 dev_err(dev, "Flash incomplete. Reset the server\n");
4534 dev_err(dev, "Download FW image again after reset\n");
4535 return -EAGAIN;
4536 } else if (status) {
4537 dev_err(dev, "Could not get CRC for 0x%x region\n",
4538 img_optype);
4539 return -EFAULT;
773a2d7c
PR
4540 }
4541
96c9b2e4
VV
4542 if (crc_match)
4543 continue;
773a2d7c 4544
96c9b2e4
VV
4545flash:
4546 p = fw->data + filehdr_size + img_offset + img_hdrs_size;
773a2d7c
PR
4547 if (p + img_size > fw->data + fw->size)
4548 return -1;
4549
70a7b525
VV
4550 status = be_flash(adapter, p, flash_cmd, flash_optype, img_size,
4551 img_offset);
4552
4553 /* The current FW image on the card does not support OFFSET
4554 * based flashing. Retry using older mechanism of OPTYPE based
4555 * flashing
4556 */
4557 if (base_status(status) == MCC_STATUS_ILLEGAL_FIELD &&
4558 flash_optype == OPTYPE_OFFSET_SPECIFIED) {
4559 flash_offset_support = false;
4560 goto retry_flash;
4561 }
4562
96c9b2e4
VV
4563 /* For old FW images ignore ILLEGAL_FIELD error or errors on
4564 * UFI_DIR region
4565 */
4c60005f
KA
4566 if (old_fw_img &&
4567 (base_status(status) == MCC_STATUS_ILLEGAL_FIELD ||
4568 (img_optype == OPTYPE_UFI_DIR &&
4569 base_status(status) == MCC_STATUS_FAILED))) {
96c9b2e4
VV
4570 continue;
4571 } else if (status) {
4572 dev_err(dev, "Flashing section type 0x%x failed\n",
4573 img_type);
4574 return -EFAULT;
773a2d7c
PR
4575 }
4576 }
4577 return 0;
3f0d4560
AK
4578}
4579
485bf569 4580static int lancer_fw_download(struct be_adapter *adapter,
748b539a 4581 const struct firmware *fw)
84517482 4582{
485bf569
SN
4583#define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024)
4584#define LANCER_FW_DOWNLOAD_LOCATION "/prg"
bb864e07 4585 struct device *dev = &adapter->pdev->dev;
84517482 4586 struct be_dma_mem flash_cmd;
485bf569
SN
4587 const u8 *data_ptr = NULL;
4588 u8 *dest_image_ptr = NULL;
4589 size_t image_size = 0;
4590 u32 chunk_size = 0;
4591 u32 data_written = 0;
4592 u32 offset = 0;
4593 int status = 0;
4594 u8 add_status = 0;
f67ef7ba 4595 u8 change_status;
84517482 4596
485bf569 4597 if (!IS_ALIGNED(fw->size, sizeof(u32))) {
bb864e07 4598 dev_err(dev, "FW image size should be multiple of 4\n");
3fb8cb80 4599 return -EINVAL;
d9efd2af
SB
4600 }
4601
485bf569
SN
4602 flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
4603 + LANCER_FW_DOWNLOAD_CHUNK;
bb864e07 4604 flash_cmd.va = dma_alloc_coherent(dev, flash_cmd.size,
d0320f75 4605 &flash_cmd.dma, GFP_KERNEL);
3fb8cb80
KA
4606 if (!flash_cmd.va)
4607 return -ENOMEM;
84517482 4608
485bf569
SN
4609 dest_image_ptr = flash_cmd.va +
4610 sizeof(struct lancer_cmd_req_write_object);
4611 image_size = fw->size;
4612 data_ptr = fw->data;
4613
4614 while (image_size) {
4615 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
4616
4617 /* Copy the image chunk content. */
4618 memcpy(dest_image_ptr, data_ptr, chunk_size);
4619
4620 status = lancer_cmd_write_object(adapter, &flash_cmd,
f67ef7ba
PR
4621 chunk_size, offset,
4622 LANCER_FW_DOWNLOAD_LOCATION,
4623 &data_written, &change_status,
4624 &add_status);
485bf569
SN
4625 if (status)
4626 break;
4627
4628 offset += data_written;
4629 data_ptr += data_written;
4630 image_size -= data_written;
4631 }
4632
4633 if (!status) {
4634 /* Commit the FW written */
4635 status = lancer_cmd_write_object(adapter, &flash_cmd,
f67ef7ba
PR
4636 0, offset,
4637 LANCER_FW_DOWNLOAD_LOCATION,
4638 &data_written, &change_status,
4639 &add_status);
485bf569
SN
4640 }
4641
bb864e07 4642 dma_free_coherent(dev, flash_cmd.size, flash_cmd.va, flash_cmd.dma);
485bf569 4643 if (status) {
bb864e07 4644 dev_err(dev, "Firmware load error\n");
3fb8cb80 4645 return be_cmd_status(status);
485bf569
SN
4646 }
4647
bb864e07
KA
4648 dev_info(dev, "Firmware flashed successfully\n");
4649
f67ef7ba 4650 if (change_status == LANCER_FW_RESET_NEEDED) {
bb864e07 4651 dev_info(dev, "Resetting adapter to activate new FW\n");
5c510811
SK
4652 status = lancer_physdev_ctrl(adapter,
4653 PHYSDEV_CONTROL_FW_RESET_MASK);
f67ef7ba 4654 if (status) {
bb864e07
KA
4655 dev_err(dev, "Adapter busy, could not reset FW\n");
4656 dev_err(dev, "Reboot server to activate new FW\n");
f67ef7ba
PR
4657 }
4658 } else if (change_status != LANCER_NO_RESET_NEEDED) {
bb864e07 4659 dev_info(dev, "Reboot server to activate new FW\n");
f67ef7ba 4660 }
3fb8cb80
KA
4661
4662 return 0;
485bf569
SN
4663}
4664
5d3acd0d
VV
4665#define BE2_UFI 2
4666#define BE3_UFI 3
4667#define BE3R_UFI 10
4668#define SH_UFI 4
81a9e226 4669#define SH_P2_UFI 11
5d3acd0d 4670
ca34fe38 4671static int be_get_ufi_type(struct be_adapter *adapter,
0ad3157e 4672 struct flash_file_hdr_g3 *fhdr)
773a2d7c 4673{
5d3acd0d
VV
4674 if (!fhdr) {
4675 dev_err(&adapter->pdev->dev, "Invalid FW UFI file");
4676 return -1;
4677 }
773a2d7c 4678
5d3acd0d
VV
4679 /* First letter of the build version is used to identify
4680 * which chip this image file is meant for.
4681 */
4682 switch (fhdr->build[0]) {
4683 case BLD_STR_UFI_TYPE_SH:
81a9e226
VV
4684 return (fhdr->asic_type_rev == ASIC_REV_P2) ? SH_P2_UFI :
4685 SH_UFI;
5d3acd0d
VV
4686 case BLD_STR_UFI_TYPE_BE3:
4687 return (fhdr->asic_type_rev == ASIC_REV_B0) ? BE3R_UFI :
4688 BE3_UFI;
4689 case BLD_STR_UFI_TYPE_BE2:
4690 return BE2_UFI;
4691 default:
4692 return -1;
4693 }
4694}
773a2d7c 4695
5d3acd0d
VV
4696/* Check if the flash image file is compatible with the adapter that
4697 * is being flashed.
4698 * BE3 chips with asic-rev B0 must be flashed only with BE3R_UFI type.
81a9e226 4699 * Skyhawk chips with asic-rev P2 must be flashed only with SH_P2_UFI type.
5d3acd0d
VV
4700 */
4701static bool be_check_ufi_compatibility(struct be_adapter *adapter,
4702 struct flash_file_hdr_g3 *fhdr)
4703{
4704 int ufi_type = be_get_ufi_type(adapter, fhdr);
4705
4706 switch (ufi_type) {
81a9e226 4707 case SH_P2_UFI:
5d3acd0d 4708 return skyhawk_chip(adapter);
81a9e226
VV
4709 case SH_UFI:
4710 return (skyhawk_chip(adapter) &&
4711 adapter->asic_rev < ASIC_REV_P2);
5d3acd0d
VV
4712 case BE3R_UFI:
4713 return BE3_chip(adapter);
4714 case BE3_UFI:
4715 return (BE3_chip(adapter) && adapter->asic_rev < ASIC_REV_B0);
4716 case BE2_UFI:
4717 return BE2_chip(adapter);
4718 default:
4719 return false;
4720 }
773a2d7c
PR
4721}
4722
485bf569
SN
4723static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
4724{
5d3acd0d 4725 struct device *dev = &adapter->pdev->dev;
485bf569 4726 struct flash_file_hdr_g3 *fhdr3;
5d3acd0d
VV
4727 struct image_hdr *img_hdr_ptr;
4728 int status = 0, i, num_imgs;
485bf569 4729 struct be_dma_mem flash_cmd;
84517482 4730
5d3acd0d
VV
4731 fhdr3 = (struct flash_file_hdr_g3 *)fw->data;
4732 if (!be_check_ufi_compatibility(adapter, fhdr3)) {
4733 dev_err(dev, "Flash image is not compatible with adapter\n");
4734 return -EINVAL;
84517482
AK
4735 }
4736
5d3acd0d
VV
4737 flash_cmd.size = sizeof(struct be_cmd_write_flashrom);
4738 flash_cmd.va = dma_alloc_coherent(dev, flash_cmd.size, &flash_cmd.dma,
4739 GFP_KERNEL);
4740 if (!flash_cmd.va)
4741 return -ENOMEM;
773a2d7c 4742
773a2d7c
PR
4743 num_imgs = le32_to_cpu(fhdr3->num_imgs);
4744 for (i = 0; i < num_imgs; i++) {
4745 img_hdr_ptr = (struct image_hdr *)(fw->data +
4746 (sizeof(struct flash_file_hdr_g3) +
4747 i * sizeof(struct image_hdr)));
5d3acd0d
VV
4748 if (!BE2_chip(adapter) &&
4749 le32_to_cpu(img_hdr_ptr->imageid) != 1)
4750 continue;
84517482 4751
5d3acd0d
VV
4752 if (skyhawk_chip(adapter))
4753 status = be_flash_skyhawk(adapter, fw, &flash_cmd,
4754 num_imgs);
4755 else
4756 status = be_flash_BEx(adapter, fw, &flash_cmd,
4757 num_imgs);
84517482
AK
4758 }
4759
5d3acd0d
VV
4760 dma_free_coherent(dev, flash_cmd.size, flash_cmd.va, flash_cmd.dma);
4761 if (!status)
4762 dev_info(dev, "Firmware flashed successfully\n");
84517482 4763
485bf569
SN
4764 return status;
4765}
4766
4767int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
4768{
4769 const struct firmware *fw;
4770 int status;
4771
4772 if (!netif_running(adapter->netdev)) {
4773 dev_err(&adapter->pdev->dev,
4774 "Firmware load not allowed (interface is down)\n");
940a3fcd 4775 return -ENETDOWN;
485bf569
SN
4776 }
4777
4778 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
4779 if (status)
4780 goto fw_exit;
4781
4782 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
4783
4784 if (lancer_chip(adapter))
4785 status = lancer_fw_download(adapter, fw);
4786 else
4787 status = be_fw_download(adapter, fw);
4788
eeb65ced 4789 if (!status)
e97e3cda 4790 be_cmd_get_fw_ver(adapter);
eeb65ced 4791
84517482
AK
4792fw_exit:
4793 release_firmware(fw);
4794 return status;
4795}
4796
add511b3
RP
4797static int be_ndo_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
4798 u16 flags)
a77dcb8c
AK
4799{
4800 struct be_adapter *adapter = netdev_priv(dev);
4801 struct nlattr *attr, *br_spec;
4802 int rem;
4803 int status = 0;
4804 u16 mode = 0;
4805
4806 if (!sriov_enabled(adapter))
4807 return -EOPNOTSUPP;
4808
4809 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
4ea85e83
TG
4810 if (!br_spec)
4811 return -EINVAL;
a77dcb8c
AK
4812
4813 nla_for_each_nested(attr, br_spec, rem) {
4814 if (nla_type(attr) != IFLA_BRIDGE_MODE)
4815 continue;
4816
b7c1a314
TG
4817 if (nla_len(attr) < sizeof(mode))
4818 return -EINVAL;
4819
a77dcb8c
AK
4820 mode = nla_get_u16(attr);
4821 if (mode != BRIDGE_MODE_VEPA && mode != BRIDGE_MODE_VEB)
4822 return -EINVAL;
4823
4824 status = be_cmd_set_hsw_config(adapter, 0, 0,
4825 adapter->if_handle,
4826 mode == BRIDGE_MODE_VEPA ?
4827 PORT_FWD_TYPE_VEPA :
4828 PORT_FWD_TYPE_VEB);
4829 if (status)
4830 goto err;
4831
4832 dev_info(&adapter->pdev->dev, "enabled switch mode: %s\n",
4833 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
4834
4835 return status;
4836 }
4837err:
4838 dev_err(&adapter->pdev->dev, "Failed to set switch mode %s\n",
4839 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
4840
4841 return status;
4842}
4843
4844static int be_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
748b539a 4845 struct net_device *dev, u32 filter_mask)
a77dcb8c
AK
4846{
4847 struct be_adapter *adapter = netdev_priv(dev);
4848 int status = 0;
4849 u8 hsw_mode;
4850
4851 if (!sriov_enabled(adapter))
4852 return 0;
4853
4854 /* BE and Lancer chips support VEB mode only */
4855 if (BEx_chip(adapter) || lancer_chip(adapter)) {
4856 hsw_mode = PORT_FWD_TYPE_VEB;
4857 } else {
4858 status = be_cmd_get_hsw_config(adapter, NULL, 0,
4859 adapter->if_handle, &hsw_mode);
4860 if (status)
4861 return 0;
4862 }
4863
4864 return ndo_dflt_bridge_getlink(skb, pid, seq, dev,
4865 hsw_mode == PORT_FWD_TYPE_VEPA ?
2c3c031c
SF
4866 BRIDGE_MODE_VEPA : BRIDGE_MODE_VEB,
4867 0, 0);
a77dcb8c
AK
4868}
4869
c5abe7c0 4870#ifdef CONFIG_BE2NET_VXLAN
630f4b70
SB
4871/* VxLAN offload Notes:
4872 *
4873 * The stack defines tunnel offload flags (hw_enc_features) for IP and doesn't
4874 * distinguish various types of transports (VxLAN, GRE, NVGRE ..). So, offload
4875 * is expected to work across all types of IP tunnels once exported. Skyhawk
4876 * supports offloads for either VxLAN or NVGRE, exclusively. So we export VxLAN
16dde0d6
SB
4877 * offloads in hw_enc_features only when a VxLAN port is added. If other (non
4878 * VxLAN) tunnels are configured while VxLAN offloads are enabled, offloads for
4879 * those other tunnels are unexported on the fly through ndo_features_check().
630f4b70
SB
4880 *
4881 * Skyhawk supports VxLAN offloads only for one UDP dport. So, if the stack
4882 * adds more than one port, disable offloads and don't re-enable them again
4883 * until after all the tunnels are removed.
4884 */
c9c47142
SP
4885static void be_add_vxlan_port(struct net_device *netdev, sa_family_t sa_family,
4886 __be16 port)
4887{
4888 struct be_adapter *adapter = netdev_priv(netdev);
4889 struct device *dev = &adapter->pdev->dev;
4890 int status;
4891
4892 if (lancer_chip(adapter) || BEx_chip(adapter))
4893 return;
4894
4895 if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS) {
c9c47142
SP
4896 dev_info(dev,
4897 "Only one UDP port supported for VxLAN offloads\n");
630f4b70
SB
4898 dev_info(dev, "Disabling VxLAN offloads\n");
4899 adapter->vxlan_port_count++;
4900 goto err;
c9c47142
SP
4901 }
4902
630f4b70
SB
4903 if (adapter->vxlan_port_count++ >= 1)
4904 return;
4905
c9c47142
SP
4906 status = be_cmd_manage_iface(adapter, adapter->if_handle,
4907 OP_CONVERT_NORMAL_TO_TUNNEL);
4908 if (status) {
4909 dev_warn(dev, "Failed to convert normal interface to tunnel\n");
4910 goto err;
4911 }
4912
4913 status = be_cmd_set_vxlan_port(adapter, port);
4914 if (status) {
4915 dev_warn(dev, "Failed to add VxLAN port\n");
4916 goto err;
4917 }
4918 adapter->flags |= BE_FLAGS_VXLAN_OFFLOADS;
4919 adapter->vxlan_port = port;
4920
630f4b70
SB
4921 netdev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
4922 NETIF_F_TSO | NETIF_F_TSO6 |
4923 NETIF_F_GSO_UDP_TUNNEL;
4924 netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL;
ac9a3d84 4925 netdev->features |= NETIF_F_GSO_UDP_TUNNEL;
630f4b70 4926
c9c47142
SP
4927 dev_info(dev, "Enabled VxLAN offloads for UDP port %d\n",
4928 be16_to_cpu(port));
4929 return;
4930err:
4931 be_disable_vxlan_offloads(adapter);
c9c47142
SP
4932}
4933
4934static void be_del_vxlan_port(struct net_device *netdev, sa_family_t sa_family,
4935 __be16 port)
4936{
4937 struct be_adapter *adapter = netdev_priv(netdev);
4938
4939 if (lancer_chip(adapter) || BEx_chip(adapter))
4940 return;
4941
4942 if (adapter->vxlan_port != port)
630f4b70 4943 goto done;
c9c47142
SP
4944
4945 be_disable_vxlan_offloads(adapter);
4946
4947 dev_info(&adapter->pdev->dev,
4948 "Disabled VxLAN offloads for UDP port %d\n",
4949 be16_to_cpu(port));
630f4b70
SB
4950done:
4951 adapter->vxlan_port_count--;
c9c47142 4952}
725d548f 4953
5f35227e
JG
4954static netdev_features_t be_features_check(struct sk_buff *skb,
4955 struct net_device *dev,
4956 netdev_features_t features)
725d548f 4957{
16dde0d6
SB
4958 struct be_adapter *adapter = netdev_priv(dev);
4959 u8 l4_hdr = 0;
4960
4961 /* The code below restricts offload features for some tunneled packets.
4962 * Offload features for normal (non tunnel) packets are unchanged.
4963 */
4964 if (!skb->encapsulation ||
4965 !(adapter->flags & BE_FLAGS_VXLAN_OFFLOADS))
4966 return features;
4967
4968 /* It's an encapsulated packet and VxLAN offloads are enabled. We
4969 * should disable tunnel offload features if it's not a VxLAN packet,
4970 * as tunnel offloads have been enabled only for VxLAN. This is done to
4971 * allow other tunneled traffic like GRE work fine while VxLAN
4972 * offloads are configured in Skyhawk-R.
4973 */
4974 switch (vlan_get_protocol(skb)) {
4975 case htons(ETH_P_IP):
4976 l4_hdr = ip_hdr(skb)->protocol;
4977 break;
4978 case htons(ETH_P_IPV6):
4979 l4_hdr = ipv6_hdr(skb)->nexthdr;
4980 break;
4981 default:
4982 return features;
4983 }
4984
4985 if (l4_hdr != IPPROTO_UDP ||
4986 skb->inner_protocol_type != ENCAP_TYPE_ETHER ||
4987 skb->inner_protocol != htons(ETH_P_TEB) ||
4988 skb_inner_mac_header(skb) - skb_transport_header(skb) !=
4989 sizeof(struct udphdr) + sizeof(struct vxlanhdr))
4990 return features & ~(NETIF_F_ALL_CSUM | NETIF_F_GSO_MASK);
4991
4992 return features;
725d548f 4993}
c5abe7c0 4994#endif
c9c47142 4995
e5686ad8 4996static const struct net_device_ops be_netdev_ops = {
6b7c5b94
SP
4997 .ndo_open = be_open,
4998 .ndo_stop = be_close,
4999 .ndo_start_xmit = be_xmit,
a54769f5 5000 .ndo_set_rx_mode = be_set_rx_mode,
6b7c5b94
SP
5001 .ndo_set_mac_address = be_mac_addr_set,
5002 .ndo_change_mtu = be_change_mtu,
ab1594e9 5003 .ndo_get_stats64 = be_get_stats64,
6b7c5b94 5004 .ndo_validate_addr = eth_validate_addr,
6b7c5b94
SP
5005 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
5006 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
64600ea5 5007 .ndo_set_vf_mac = be_set_vf_mac,
1da87b7f 5008 .ndo_set_vf_vlan = be_set_vf_vlan,
ed616689 5009 .ndo_set_vf_rate = be_set_vf_tx_rate,
66268739 5010 .ndo_get_vf_config = be_get_vf_config,
bdce2ad7 5011 .ndo_set_vf_link_state = be_set_vf_link_state,
66268739
IV
5012#ifdef CONFIG_NET_POLL_CONTROLLER
5013 .ndo_poll_controller = be_netpoll,
5014#endif
a77dcb8c
AK
5015 .ndo_bridge_setlink = be_ndo_bridge_setlink,
5016 .ndo_bridge_getlink = be_ndo_bridge_getlink,
6384a4d0 5017#ifdef CONFIG_NET_RX_BUSY_POLL
c9c47142 5018 .ndo_busy_poll = be_busy_poll,
6384a4d0 5019#endif
c5abe7c0 5020#ifdef CONFIG_BE2NET_VXLAN
c9c47142
SP
5021 .ndo_add_vxlan_port = be_add_vxlan_port,
5022 .ndo_del_vxlan_port = be_del_vxlan_port,
5f35227e 5023 .ndo_features_check = be_features_check,
c5abe7c0 5024#endif
6b7c5b94
SP
5025};
5026
5027static void be_netdev_init(struct net_device *netdev)
5028{
5029 struct be_adapter *adapter = netdev_priv(netdev);
5030
6332c8d3 5031 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
8b8ddc68 5032 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
f646968f 5033 NETIF_F_HW_VLAN_CTAG_TX;
8b8ddc68
MM
5034 if (be_multi_rxq(adapter))
5035 netdev->hw_features |= NETIF_F_RXHASH;
6332c8d3
MM
5036
5037 netdev->features |= netdev->hw_features |
f646968f 5038 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER;
4b972914 5039
eb8a50d9 5040 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
79032644 5041 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
51c59870 5042
fbc13f01
AK
5043 netdev->priv_flags |= IFF_UNICAST_FLT;
5044
6b7c5b94
SP
5045 netdev->flags |= IFF_MULTICAST;
5046
b7e5887e 5047 netif_set_gso_max_size(netdev, 65535 - ETH_HLEN);
c190e3c8 5048
10ef9ab4 5049 netdev->netdev_ops = &be_netdev_ops;
6b7c5b94 5050
7ad24ea4 5051 netdev->ethtool_ops = &be_ethtool_ops;
6b7c5b94
SP
5052}
5053
87ac1a52
KA
5054static void be_cleanup(struct be_adapter *adapter)
5055{
5056 struct net_device *netdev = adapter->netdev;
5057
5058 rtnl_lock();
5059 netif_device_detach(netdev);
5060 if (netif_running(netdev))
5061 be_close(netdev);
5062 rtnl_unlock();
5063
5064 be_clear(adapter);
5065}
5066
484d76fd 5067static int be_resume(struct be_adapter *adapter)
78fad34e 5068{
d0e1b319 5069 struct net_device *netdev = adapter->netdev;
78fad34e
SP
5070 int status;
5071
78fad34e
SP
5072 status = be_setup(adapter);
5073 if (status)
484d76fd 5074 return status;
78fad34e 5075
d0e1b319
KA
5076 if (netif_running(netdev)) {
5077 status = be_open(netdev);
78fad34e 5078 if (status)
484d76fd 5079 return status;
78fad34e
SP
5080 }
5081
d0e1b319
KA
5082 netif_device_attach(netdev);
5083
484d76fd
KA
5084 return 0;
5085}
5086
5087static int be_err_recover(struct be_adapter *adapter)
5088{
5089 struct device *dev = &adapter->pdev->dev;
5090 int status;
5091
5092 status = be_resume(adapter);
5093 if (status)
5094 goto err;
5095
9fa465c0 5096 dev_info(dev, "Adapter recovery successful\n");
78fad34e
SP
5097 return 0;
5098err:
9fa465c0 5099 if (be_physfn(adapter))
78fad34e 5100 dev_err(dev, "Adapter recovery failed\n");
9fa465c0
SP
5101 else
5102 dev_err(dev, "Re-trying adapter recovery\n");
78fad34e
SP
5103
5104 return status;
5105}
5106
eb7dd46c 5107static void be_err_detection_task(struct work_struct *work)
78fad34e
SP
5108{
5109 struct be_adapter *adapter =
eb7dd46c
SP
5110 container_of(work, struct be_adapter,
5111 be_err_detection_work.work);
78fad34e
SP
5112 int status = 0;
5113
5114 be_detect_error(adapter);
5115
d0e1b319 5116 if (adapter->hw_error) {
87ac1a52 5117 be_cleanup(adapter);
d0e1b319
KA
5118
5119 /* As of now error recovery support is in Lancer only */
5120 if (lancer_chip(adapter))
5121 status = be_err_recover(adapter);
78fad34e
SP
5122 }
5123
9fa465c0
SP
5124 /* Always attempt recovery on VFs */
5125 if (!status || be_virtfn(adapter))
eb7dd46c 5126 be_schedule_err_detection(adapter);
78fad34e
SP
5127}
5128
5129static void be_log_sfp_info(struct be_adapter *adapter)
5130{
5131 int status;
5132
5133 status = be_cmd_query_sfp_info(adapter);
5134 if (!status) {
5135 dev_err(&adapter->pdev->dev,
5136 "Unqualified SFP+ detected on %c from %s part no: %s",
5137 adapter->port_name, adapter->phy.vendor_name,
5138 adapter->phy.vendor_pn);
5139 }
5140 adapter->flags &= ~BE_FLAGS_EVT_INCOMPATIBLE_SFP;
5141}
5142
5143static void be_worker(struct work_struct *work)
5144{
5145 struct be_adapter *adapter =
5146 container_of(work, struct be_adapter, work.work);
5147 struct be_rx_obj *rxo;
5148 int i;
5149
5150 /* when interrupts are not yet enabled, just reap any pending
5151 * mcc completions
5152 */
5153 if (!netif_running(adapter->netdev)) {
5154 local_bh_disable();
5155 be_process_mcc(adapter);
5156 local_bh_enable();
5157 goto reschedule;
5158 }
5159
5160 if (!adapter->stats_cmd_sent) {
5161 if (lancer_chip(adapter))
5162 lancer_cmd_get_pport_stats(adapter,
5163 &adapter->stats_cmd);
5164 else
5165 be_cmd_get_stats(adapter, &adapter->stats_cmd);
5166 }
5167
5168 if (be_physfn(adapter) &&
5169 MODULO(adapter->work_counter, adapter->be_get_temp_freq) == 0)
5170 be_cmd_get_die_temperature(adapter);
5171
5172 for_all_rx_queues(adapter, rxo, i) {
5173 /* Replenish RX-queues starved due to memory
5174 * allocation failures.
5175 */
5176 if (rxo->rx_post_starved)
5177 be_post_rx_frags(rxo, GFP_KERNEL, MAX_RX_POST);
5178 }
5179
5180 be_eqd_update(adapter);
5181
5182 if (adapter->flags & BE_FLAGS_EVT_INCOMPATIBLE_SFP)
5183 be_log_sfp_info(adapter);
5184
5185reschedule:
5186 adapter->work_counter++;
5187 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
5188}
5189
6b7c5b94
SP
5190static void be_unmap_pci_bars(struct be_adapter *adapter)
5191{
c5b3ad4c
SP
5192 if (adapter->csr)
5193 pci_iounmap(adapter->pdev, adapter->csr);
8788fdc2 5194 if (adapter->db)
ce66f781 5195 pci_iounmap(adapter->pdev, adapter->db);
045508a8
PP
5196}
5197
ce66f781
SP
5198static int db_bar(struct be_adapter *adapter)
5199{
5200 if (lancer_chip(adapter) || !be_physfn(adapter))
5201 return 0;
5202 else
5203 return 4;
5204}
5205
5206static int be_roce_map_pci_bars(struct be_adapter *adapter)
045508a8 5207{
dbf0f2a7 5208 if (skyhawk_chip(adapter)) {
ce66f781
SP
5209 adapter->roce_db.size = 4096;
5210 adapter->roce_db.io_addr = pci_resource_start(adapter->pdev,
5211 db_bar(adapter));
5212 adapter->roce_db.total_size = pci_resource_len(adapter->pdev,
5213 db_bar(adapter));
5214 }
045508a8 5215 return 0;
6b7c5b94
SP
5216}
5217
5218static int be_map_pci_bars(struct be_adapter *adapter)
5219{
0fa74a4b 5220 struct pci_dev *pdev = adapter->pdev;
6b7c5b94 5221 u8 __iomem *addr;
78fad34e
SP
5222 u32 sli_intf;
5223
5224 pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
5225 adapter->sli_family = (sli_intf & SLI_INTF_FAMILY_MASK) >>
5226 SLI_INTF_FAMILY_SHIFT;
5227 adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
fe6d2a38 5228
c5b3ad4c 5229 if (BEx_chip(adapter) && be_physfn(adapter)) {
0fa74a4b 5230 adapter->csr = pci_iomap(pdev, 2, 0);
ddf1169f 5231 if (!adapter->csr)
c5b3ad4c
SP
5232 return -ENOMEM;
5233 }
5234
25848c90 5235 addr = pci_iomap(pdev, db_bar(adapter), 0);
ddf1169f 5236 if (!addr)
6b7c5b94 5237 goto pci_map_err;
ba343c77 5238 adapter->db = addr;
ce66f781 5239
25848c90
SR
5240 if (skyhawk_chip(adapter) || BEx_chip(adapter)) {
5241 if (be_physfn(adapter)) {
5242 /* PCICFG is the 2nd BAR in BE2 */
5243 addr = pci_iomap(pdev, BE2_chip(adapter) ? 1 : 0, 0);
5244 if (!addr)
5245 goto pci_map_err;
5246 adapter->pcicfg = addr;
5247 } else {
5248 adapter->pcicfg = adapter->db + SRIOV_VF_PCICFG_OFFSET;
5249 }
5250 }
5251
ce66f781 5252 be_roce_map_pci_bars(adapter);
6b7c5b94 5253 return 0;
ce66f781 5254
6b7c5b94 5255pci_map_err:
25848c90 5256 dev_err(&pdev->dev, "Error in mapping PCI BARs\n");
6b7c5b94
SP
5257 be_unmap_pci_bars(adapter);
5258 return -ENOMEM;
5259}
5260
78fad34e 5261static void be_drv_cleanup(struct be_adapter *adapter)
6b7c5b94 5262{
8788fdc2 5263 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
78fad34e 5264 struct device *dev = &adapter->pdev->dev;
6b7c5b94
SP
5265
5266 if (mem->va)
78fad34e 5267 dma_free_coherent(dev, mem->size, mem->va, mem->dma);
e7b909a6 5268
5b8821b7 5269 mem = &adapter->rx_filter;
e7b909a6 5270 if (mem->va)
78fad34e
SP
5271 dma_free_coherent(dev, mem->size, mem->va, mem->dma);
5272
5273 mem = &adapter->stats_cmd;
5274 if (mem->va)
5275 dma_free_coherent(dev, mem->size, mem->va, mem->dma);
6b7c5b94
SP
5276}
5277
78fad34e
SP
5278/* Allocate and initialize various fields in be_adapter struct */
5279static int be_drv_init(struct be_adapter *adapter)
6b7c5b94 5280{
8788fdc2
SP
5281 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
5282 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
5b8821b7 5283 struct be_dma_mem *rx_filter = &adapter->rx_filter;
78fad34e
SP
5284 struct be_dma_mem *stats_cmd = &adapter->stats_cmd;
5285 struct device *dev = &adapter->pdev->dev;
5286 int status = 0;
6b7c5b94
SP
5287
5288 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
78fad34e 5289 mbox_mem_alloc->va = dma_alloc_coherent(dev, mbox_mem_alloc->size,
2b7bcebf
IV
5290 &mbox_mem_alloc->dma,
5291 GFP_KERNEL);
78fad34e
SP
5292 if (!mbox_mem_alloc->va)
5293 return -ENOMEM;
5294
6b7c5b94
SP
5295 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
5296 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
5297 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
5298 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
e7b909a6 5299
5b8821b7 5300 rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
78fad34e
SP
5301 rx_filter->va = dma_zalloc_coherent(dev, rx_filter->size,
5302 &rx_filter->dma, GFP_KERNEL);
ddf1169f 5303 if (!rx_filter->va) {
e7b909a6
SP
5304 status = -ENOMEM;
5305 goto free_mbox;
5306 }
1f9061d2 5307
78fad34e
SP
5308 if (lancer_chip(adapter))
5309 stats_cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
5310 else if (BE2_chip(adapter))
5311 stats_cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
5312 else if (BE3_chip(adapter))
5313 stats_cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
5314 else
5315 stats_cmd->size = sizeof(struct be_cmd_req_get_stats_v2);
5316 stats_cmd->va = dma_zalloc_coherent(dev, stats_cmd->size,
5317 &stats_cmd->dma, GFP_KERNEL);
5318 if (!stats_cmd->va) {
5319 status = -ENOMEM;
5320 goto free_rx_filter;
5321 }
5322
2984961c 5323 mutex_init(&adapter->mbox_lock);
8788fdc2
SP
5324 spin_lock_init(&adapter->mcc_lock);
5325 spin_lock_init(&adapter->mcc_cq_lock);
5eeff635 5326 init_completion(&adapter->et_cmd_compl);
e7b909a6 5327
78fad34e 5328 pci_save_state(adapter->pdev);
6b7c5b94 5329
78fad34e 5330 INIT_DELAYED_WORK(&adapter->work, be_worker);
eb7dd46c
SP
5331 INIT_DELAYED_WORK(&adapter->be_err_detection_work,
5332 be_err_detection_task);
6b7c5b94 5333
78fad34e
SP
5334 adapter->rx_fc = true;
5335 adapter->tx_fc = true;
6b7c5b94 5336
78fad34e
SP
5337 /* Must be a power of 2 or else MODULO will BUG_ON */
5338 adapter->be_get_temp_freq = 64;
ca34fe38 5339
6b7c5b94 5340 return 0;
78fad34e
SP
5341
5342free_rx_filter:
5343 dma_free_coherent(dev, rx_filter->size, rx_filter->va, rx_filter->dma);
5344free_mbox:
5345 dma_free_coherent(dev, mbox_mem_alloc->size, mbox_mem_alloc->va,
5346 mbox_mem_alloc->dma);
5347 return status;
6b7c5b94
SP
5348}
5349
3bc6b06c 5350static void be_remove(struct pci_dev *pdev)
6b7c5b94
SP
5351{
5352 struct be_adapter *adapter = pci_get_drvdata(pdev);
8d56ff11 5353
6b7c5b94
SP
5354 if (!adapter)
5355 return;
5356
045508a8 5357 be_roce_dev_remove(adapter);
8cef7a78 5358 be_intr_set(adapter, false);
045508a8 5359
eb7dd46c 5360 be_cancel_err_detection(adapter);
f67ef7ba 5361
6b7c5b94
SP
5362 unregister_netdev(adapter->netdev);
5363
5fb379ee
SP
5364 be_clear(adapter);
5365
bf99e50d
PR
5366 /* tell fw we're done with firing cmds */
5367 be_cmd_fw_clean(adapter);
5368
78fad34e
SP
5369 be_unmap_pci_bars(adapter);
5370 be_drv_cleanup(adapter);
6b7c5b94 5371
d6b6d987
SP
5372 pci_disable_pcie_error_reporting(pdev);
5373
6b7c5b94
SP
5374 pci_release_regions(pdev);
5375 pci_disable_device(pdev);
5376
5377 free_netdev(adapter->netdev);
5378}
5379
d379142b
SP
5380static char *mc_name(struct be_adapter *adapter)
5381{
f93f160b
VV
5382 char *str = ""; /* default */
5383
5384 switch (adapter->mc_type) {
5385 case UMC:
5386 str = "UMC";
5387 break;
5388 case FLEX10:
5389 str = "FLEX10";
5390 break;
5391 case vNIC1:
5392 str = "vNIC-1";
5393 break;
5394 case nPAR:
5395 str = "nPAR";
5396 break;
5397 case UFP:
5398 str = "UFP";
5399 break;
5400 case vNIC2:
5401 str = "vNIC-2";
5402 break;
5403 default:
5404 str = "";
5405 }
5406
5407 return str;
d379142b
SP
5408}
5409
5410static inline char *func_name(struct be_adapter *adapter)
5411{
5412 return be_physfn(adapter) ? "PF" : "VF";
5413}
5414
f7062ee5
SP
5415static inline char *nic_name(struct pci_dev *pdev)
5416{
5417 switch (pdev->device) {
5418 case OC_DEVICE_ID1:
5419 return OC_NAME;
5420 case OC_DEVICE_ID2:
5421 return OC_NAME_BE;
5422 case OC_DEVICE_ID3:
5423 case OC_DEVICE_ID4:
5424 return OC_NAME_LANCER;
5425 case BE_DEVICE_ID2:
5426 return BE3_NAME;
5427 case OC_DEVICE_ID5:
5428 case OC_DEVICE_ID6:
5429 return OC_NAME_SH;
5430 default:
5431 return BE_NAME;
5432 }
5433}
5434
1dd06ae8 5435static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id)
6b7c5b94 5436{
6b7c5b94
SP
5437 struct be_adapter *adapter;
5438 struct net_device *netdev;
21252377 5439 int status = 0;
6b7c5b94 5440
acbafeb1
SP
5441 dev_info(&pdev->dev, "%s version is %s\n", DRV_NAME, DRV_VER);
5442
6b7c5b94
SP
5443 status = pci_enable_device(pdev);
5444 if (status)
5445 goto do_none;
5446
5447 status = pci_request_regions(pdev, DRV_NAME);
5448 if (status)
5449 goto disable_dev;
5450 pci_set_master(pdev);
5451
7f640062 5452 netdev = alloc_etherdev_mqs(sizeof(*adapter), MAX_TX_QS, MAX_RX_QS);
ddf1169f 5453 if (!netdev) {
6b7c5b94
SP
5454 status = -ENOMEM;
5455 goto rel_reg;
5456 }
5457 adapter = netdev_priv(netdev);
5458 adapter->pdev = pdev;
5459 pci_set_drvdata(pdev, adapter);
5460 adapter->netdev = netdev;
2243e2e9 5461 SET_NETDEV_DEV(netdev, &pdev->dev);
6b7c5b94 5462
4c15c243 5463 status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
6b7c5b94
SP
5464 if (!status) {
5465 netdev->features |= NETIF_F_HIGHDMA;
5466 } else {
4c15c243 5467 status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
6b7c5b94
SP
5468 if (status) {
5469 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
5470 goto free_netdev;
5471 }
5472 }
5473
2f951a9a
KA
5474 status = pci_enable_pcie_error_reporting(pdev);
5475 if (!status)
5476 dev_info(&pdev->dev, "PCIe error reporting enabled\n");
d6b6d987 5477
78fad34e 5478 status = be_map_pci_bars(adapter);
6b7c5b94 5479 if (status)
39f1d94d 5480 goto free_netdev;
6b7c5b94 5481
78fad34e
SP
5482 status = be_drv_init(adapter);
5483 if (status)
5484 goto unmap_bars;
5485
5fb379ee
SP
5486 status = be_setup(adapter);
5487 if (status)
78fad34e 5488 goto drv_cleanup;
2243e2e9 5489
3abcdeda 5490 be_netdev_init(netdev);
6b7c5b94
SP
5491 status = register_netdev(netdev);
5492 if (status != 0)
5fb379ee 5493 goto unsetup;
6b7c5b94 5494
045508a8
PP
5495 be_roce_dev_add(adapter);
5496
eb7dd46c 5497 be_schedule_err_detection(adapter);
b4e32a71 5498
d379142b 5499 dev_info(&pdev->dev, "%s: %s %s port %c\n", nic_name(pdev),
21252377 5500 func_name(adapter), mc_name(adapter), adapter->port_name);
34b1ef04 5501
6b7c5b94
SP
5502 return 0;
5503
5fb379ee
SP
5504unsetup:
5505 be_clear(adapter);
78fad34e
SP
5506drv_cleanup:
5507 be_drv_cleanup(adapter);
5508unmap_bars:
5509 be_unmap_pci_bars(adapter);
f9449ab7 5510free_netdev:
fe6d2a38 5511 free_netdev(netdev);
6b7c5b94
SP
5512rel_reg:
5513 pci_release_regions(pdev);
5514disable_dev:
5515 pci_disable_device(pdev);
5516do_none:
c4ca2374 5517 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
6b7c5b94
SP
5518 return status;
5519}
5520
5521static int be_suspend(struct pci_dev *pdev, pm_message_t state)
5522{
5523 struct be_adapter *adapter = pci_get_drvdata(pdev);
6b7c5b94 5524
76a9e08e 5525 if (adapter->wol_en)
71d8d1b5
AK
5526 be_setup_wol(adapter, true);
5527
d4360d6f 5528 be_intr_set(adapter, false);
eb7dd46c 5529 be_cancel_err_detection(adapter);
f67ef7ba 5530
87ac1a52 5531 be_cleanup(adapter);
6b7c5b94
SP
5532
5533 pci_save_state(pdev);
5534 pci_disable_device(pdev);
5535 pci_set_power_state(pdev, pci_choose_state(pdev, state));
5536 return 0;
5537}
5538
484d76fd 5539static int be_pci_resume(struct pci_dev *pdev)
6b7c5b94 5540{
6b7c5b94 5541 struct be_adapter *adapter = pci_get_drvdata(pdev);
484d76fd 5542 int status = 0;
6b7c5b94
SP
5543
5544 status = pci_enable_device(pdev);
5545 if (status)
5546 return status;
5547
1ca01512 5548 pci_set_power_state(pdev, PCI_D0);
6b7c5b94
SP
5549 pci_restore_state(pdev);
5550
484d76fd 5551 status = be_resume(adapter);
2243e2e9
SP
5552 if (status)
5553 return status;
5554
eb7dd46c
SP
5555 be_schedule_err_detection(adapter);
5556
76a9e08e 5557 if (adapter->wol_en)
71d8d1b5 5558 be_setup_wol(adapter, false);
a4ca055f 5559
6b7c5b94
SP
5560 return 0;
5561}
5562
82456b03
SP
5563/*
5564 * An FLR will stop BE from DMAing any data.
5565 */
5566static void be_shutdown(struct pci_dev *pdev)
5567{
5568 struct be_adapter *adapter = pci_get_drvdata(pdev);
82456b03 5569
2d5d4154
AK
5570 if (!adapter)
5571 return;
82456b03 5572
d114f99a 5573 be_roce_dev_shutdown(adapter);
0f4a6828 5574 cancel_delayed_work_sync(&adapter->work);
eb7dd46c 5575 be_cancel_err_detection(adapter);
a4ca055f 5576
2d5d4154 5577 netif_device_detach(adapter->netdev);
82456b03 5578
57841869
AK
5579 be_cmd_reset_function(adapter);
5580
82456b03 5581 pci_disable_device(pdev);
82456b03
SP
5582}
5583
cf588477 5584static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
748b539a 5585 pci_channel_state_t state)
cf588477
SP
5586{
5587 struct be_adapter *adapter = pci_get_drvdata(pdev);
cf588477
SP
5588
5589 dev_err(&adapter->pdev->dev, "EEH error detected\n");
5590
01e5b2c4
SK
5591 if (!adapter->eeh_error) {
5592 adapter->eeh_error = true;
cf588477 5593
eb7dd46c 5594 be_cancel_err_detection(adapter);
cf588477 5595
87ac1a52 5596 be_cleanup(adapter);
cf588477 5597 }
cf588477
SP
5598
5599 if (state == pci_channel_io_perm_failure)
5600 return PCI_ERS_RESULT_DISCONNECT;
5601
5602 pci_disable_device(pdev);
5603
eeb7fc7b
SK
5604 /* The error could cause the FW to trigger a flash debug dump.
5605 * Resetting the card while flash dump is in progress
c8a54163
PR
5606 * can cause it not to recover; wait for it to finish.
5607 * Wait only for first function as it is needed only once per
5608 * adapter.
eeb7fc7b 5609 */
c8a54163
PR
5610 if (pdev->devfn == 0)
5611 ssleep(30);
5612
cf588477
SP
5613 return PCI_ERS_RESULT_NEED_RESET;
5614}
5615
5616static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
5617{
5618 struct be_adapter *adapter = pci_get_drvdata(pdev);
5619 int status;
5620
5621 dev_info(&adapter->pdev->dev, "EEH reset\n");
cf588477
SP
5622
5623 status = pci_enable_device(pdev);
5624 if (status)
5625 return PCI_ERS_RESULT_DISCONNECT;
5626
5627 pci_set_master(pdev);
1ca01512 5628 pci_set_power_state(pdev, PCI_D0);
cf588477
SP
5629 pci_restore_state(pdev);
5630
5631 /* Check if card is ok and fw is ready */
c5b3ad4c
SP
5632 dev_info(&adapter->pdev->dev,
5633 "Waiting for FW to be ready after EEH reset\n");
bf99e50d 5634 status = be_fw_wait_ready(adapter);
cf588477
SP
5635 if (status)
5636 return PCI_ERS_RESULT_DISCONNECT;
5637
d6b6d987 5638 pci_cleanup_aer_uncorrect_error_status(pdev);
01e5b2c4 5639 be_clear_all_error(adapter);
cf588477
SP
5640 return PCI_ERS_RESULT_RECOVERED;
5641}
5642
5643static void be_eeh_resume(struct pci_dev *pdev)
5644{
5645 int status = 0;
5646 struct be_adapter *adapter = pci_get_drvdata(pdev);
cf588477
SP
5647
5648 dev_info(&adapter->pdev->dev, "EEH resume\n");
5649
5650 pci_save_state(pdev);
5651
484d76fd 5652 status = be_resume(adapter);
bf99e50d
PR
5653 if (status)
5654 goto err;
5655
eb7dd46c 5656 be_schedule_err_detection(adapter);
cf588477
SP
5657 return;
5658err:
5659 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
cf588477
SP
5660}
5661
ace40aff
VV
5662static int be_pci_sriov_configure(struct pci_dev *pdev, int num_vfs)
5663{
5664 struct be_adapter *adapter = pci_get_drvdata(pdev);
5665 u16 num_vf_qs;
5666 int status;
5667
5668 if (!num_vfs)
5669 be_vf_clear(adapter);
5670
5671 adapter->num_vfs = num_vfs;
5672
5673 if (adapter->num_vfs == 0 && pci_vfs_assigned(pdev)) {
5674 dev_warn(&pdev->dev,
5675 "Cannot disable VFs while they are assigned\n");
5676 return -EBUSY;
5677 }
5678
5679 /* When the HW is in SRIOV capable configuration, the PF-pool resources
5680 * are equally distributed across the max-number of VFs. The user may
5681 * request only a subset of the max-vfs to be enabled.
5682 * Based on num_vfs, redistribute the resources across num_vfs so that
5683 * each VF will have access to more number of resources.
5684 * This facility is not available in BE3 FW.
5685 * Also, this is done by FW in Lancer chip.
5686 */
5687 if (skyhawk_chip(adapter) && !pci_num_vf(pdev)) {
5688 num_vf_qs = be_calculate_vf_qs(adapter, adapter->num_vfs);
5689 status = be_cmd_set_sriov_config(adapter, adapter->pool_res,
5690 adapter->num_vfs, num_vf_qs);
5691 if (status)
5692 dev_err(&pdev->dev,
5693 "Failed to optimize SR-IOV resources\n");
5694 }
5695
5696 status = be_get_resources(adapter);
5697 if (status)
5698 return be_cmd_status(status);
5699
5700 /* Updating real_num_tx/rx_queues() requires rtnl_lock() */
5701 rtnl_lock();
5702 status = be_update_queues(adapter);
5703 rtnl_unlock();
5704 if (status)
5705 return be_cmd_status(status);
5706
5707 if (adapter->num_vfs)
5708 status = be_vf_setup(adapter);
5709
5710 if (!status)
5711 return adapter->num_vfs;
5712
5713 return 0;
5714}
5715
3646f0e5 5716static const struct pci_error_handlers be_eeh_handlers = {
cf588477
SP
5717 .error_detected = be_eeh_err_detected,
5718 .slot_reset = be_eeh_reset,
5719 .resume = be_eeh_resume,
5720};
5721
6b7c5b94
SP
5722static struct pci_driver be_driver = {
5723 .name = DRV_NAME,
5724 .id_table = be_dev_ids,
5725 .probe = be_probe,
5726 .remove = be_remove,
5727 .suspend = be_suspend,
484d76fd 5728 .resume = be_pci_resume,
82456b03 5729 .shutdown = be_shutdown,
ace40aff 5730 .sriov_configure = be_pci_sriov_configure,
cf588477 5731 .err_handler = &be_eeh_handlers
6b7c5b94
SP
5732};
5733
5734static int __init be_init_module(void)
5735{
8e95a202
JP
5736 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
5737 rx_frag_size != 2048) {
6b7c5b94
SP
5738 printk(KERN_WARNING DRV_NAME
5739 " : Module param rx_frag_size must be 2048/4096/8192."
5740 " Using 2048\n");
5741 rx_frag_size = 2048;
5742 }
6b7c5b94 5743
ace40aff
VV
5744 if (num_vfs > 0) {
5745 pr_info(DRV_NAME " : Module param num_vfs is obsolete.");
5746 pr_info(DRV_NAME " : Use sysfs method to enable VFs\n");
5747 }
5748
6b7c5b94
SP
5749 return pci_register_driver(&be_driver);
5750}
5751module_init(be_init_module);
5752
5753static void __exit be_exit_module(void)
5754{
5755 pci_unregister_driver(&be_driver);
5756}
5757module_exit(be_exit_module);