Merge branches 'acpi-resources', 'acpi-battery', 'acpi-doc' and 'acpi-pnp'
[linux-2.6-block.git] / drivers / net / ethernet / emulex / benet / be_main.c
CommitLineData
6b7c5b94 1/*
40263820 2 * Copyright (C) 2005 - 2014 Emulex
6b7c5b94
SP
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
d2145cde 11 * linux-drivers@emulex.com
6b7c5b94 12 *
d2145cde
AK
13 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
6b7c5b94
SP
16 */
17
70c71606 18#include <linux/prefetch.h>
9d9779e7 19#include <linux/module.h>
6b7c5b94 20#include "be.h"
8788fdc2 21#include "be_cmds.h"
65f71b8b 22#include <asm/div64.h>
d6b6d987 23#include <linux/aer.h>
a77dcb8c 24#include <linux/if_bridge.h>
6384a4d0 25#include <net/busy_poll.h>
c9c47142 26#include <net/vxlan.h>
6b7c5b94
SP
27
28MODULE_VERSION(DRV_VER);
6b7c5b94 29MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
00d3d51e 30MODULE_AUTHOR("Emulex Corporation");
6b7c5b94
SP
31MODULE_LICENSE("GPL");
32
ace40aff
VV
33/* num_vfs module param is obsolete.
34 * Use sysfs method to enable/disable VFs.
35 */
ba343c77 36static unsigned int num_vfs;
ba343c77 37module_param(num_vfs, uint, S_IRUGO);
ba343c77 38MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
6b7c5b94 39
11ac75ed
SP
40static ushort rx_frag_size = 2048;
41module_param(rx_frag_size, ushort, S_IRUGO);
42MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
43
9baa3c34 44static const struct pci_device_id be_dev_ids[] = {
c4ca2374 45 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
59fd5d87 46 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
c4ca2374
AK
47 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
48 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
fe6d2a38 49 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
12f4d0a8 50 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
ecedb6ae 51 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
76b73530 52 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID6)},
6b7c5b94
SP
53 { 0 }
54};
55MODULE_DEVICE_TABLE(pci, be_dev_ids);
7c185276 56/* UE Status Low CSR */
42c8b11e 57static const char * const ue_status_low_desc[] = {
7c185276
AK
58 "CEV",
59 "CTX",
60 "DBUF",
61 "ERX",
62 "Host",
63 "MPU",
64 "NDMA",
65 "PTC ",
66 "RDMA ",
67 "RXF ",
68 "RXIPS ",
69 "RXULP0 ",
70 "RXULP1 ",
71 "RXULP2 ",
72 "TIM ",
73 "TPOST ",
74 "TPRE ",
75 "TXIPS ",
76 "TXULP0 ",
77 "TXULP1 ",
78 "UC ",
79 "WDMA ",
80 "TXULP2 ",
81 "HOST1 ",
82 "P0_OB_LINK ",
83 "P1_OB_LINK ",
84 "HOST_GPIO ",
85 "MBOX ",
6bdf8f55
VV
86 "ERX2 ",
87 "SPARE ",
88 "JTAG ",
89 "MPU_INTPEND "
7c185276 90};
e2fb1afa 91
7c185276 92/* UE Status High CSR */
42c8b11e 93static const char * const ue_status_hi_desc[] = {
7c185276
AK
94 "LPCMEMHOST",
95 "MGMT_MAC",
96 "PCS0ONLINE",
97 "MPU_IRAM",
98 "PCS1ONLINE",
99 "PCTL0",
100 "PCTL1",
101 "PMEM",
102 "RR",
103 "TXPB",
104 "RXPP",
105 "XAUI",
106 "TXP",
107 "ARM",
108 "IPC",
109 "HOST2",
110 "HOST3",
111 "HOST4",
112 "HOST5",
113 "HOST6",
114 "HOST7",
6bdf8f55
VV
115 "ECRC",
116 "Poison TLP",
42c8b11e 117 "NETC",
6bdf8f55
VV
118 "PERIPH",
119 "LLTXULP",
120 "D2P",
121 "RCON",
122 "LDMA",
123 "LLTXP",
124 "LLTXPB",
7c185276
AK
125 "Unknown"
126};
6b7c5b94
SP
127
128static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
129{
130 struct be_dma_mem *mem = &q->dma_mem;
03d28ffe 131
1cfafab9 132 if (mem->va) {
2b7bcebf
IV
133 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
134 mem->dma);
1cfafab9
SP
135 mem->va = NULL;
136 }
6b7c5b94
SP
137}
138
139static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
748b539a 140 u16 len, u16 entry_size)
6b7c5b94
SP
141{
142 struct be_dma_mem *mem = &q->dma_mem;
143
144 memset(q, 0, sizeof(*q));
145 q->len = len;
146 q->entry_size = entry_size;
147 mem->size = len * entry_size;
ede23fa8
JP
148 mem->va = dma_zalloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
149 GFP_KERNEL);
6b7c5b94 150 if (!mem->va)
10ef9ab4 151 return -ENOMEM;
6b7c5b94
SP
152 return 0;
153}
154
68c45a2d 155static void be_reg_intr_set(struct be_adapter *adapter, bool enable)
6b7c5b94 156{
db3ea781 157 u32 reg, enabled;
5f0b849e 158
db3ea781 159 pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
748b539a 160 &reg);
db3ea781
SP
161 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
162
5f0b849e 163 if (!enabled && enable)
6b7c5b94 164 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
5f0b849e 165 else if (enabled && !enable)
6b7c5b94 166 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
5f0b849e 167 else
6b7c5b94 168 return;
5f0b849e 169
db3ea781 170 pci_write_config_dword(adapter->pdev,
748b539a 171 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
6b7c5b94
SP
172}
173
68c45a2d
SK
174static void be_intr_set(struct be_adapter *adapter, bool enable)
175{
176 int status = 0;
177
178 /* On lancer interrupts can't be controlled via this register */
179 if (lancer_chip(adapter))
180 return;
181
182 if (adapter->eeh_error)
183 return;
184
185 status = be_cmd_intr_set(adapter, enable);
186 if (status)
187 be_reg_intr_set(adapter, enable);
188}
189
8788fdc2 190static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
6b7c5b94
SP
191{
192 u32 val = 0;
03d28ffe 193
6b7c5b94
SP
194 val |= qid & DB_RQ_RING_ID_MASK;
195 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
f3eb62d2
SP
196
197 wmb();
8788fdc2 198 iowrite32(val, adapter->db + DB_RQ_OFFSET);
6b7c5b94
SP
199}
200
94d73aaa
VV
201static void be_txq_notify(struct be_adapter *adapter, struct be_tx_obj *txo,
202 u16 posted)
6b7c5b94
SP
203{
204 u32 val = 0;
03d28ffe 205
94d73aaa 206 val |= txo->q.id & DB_TXULP_RING_ID_MASK;
6b7c5b94 207 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
f3eb62d2
SP
208
209 wmb();
94d73aaa 210 iowrite32(val, adapter->db + txo->db_offset);
6b7c5b94
SP
211}
212
8788fdc2 213static void be_eq_notify(struct be_adapter *adapter, u16 qid,
748b539a 214 bool arm, bool clear_int, u16 num_popped)
6b7c5b94
SP
215{
216 u32 val = 0;
03d28ffe 217
6b7c5b94 218 val |= qid & DB_EQ_RING_ID_MASK;
748b539a 219 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) << DB_EQ_RING_ID_EXT_MASK_SHIFT);
cf588477 220
f67ef7ba 221 if (adapter->eeh_error)
cf588477
SP
222 return;
223
6b7c5b94
SP
224 if (arm)
225 val |= 1 << DB_EQ_REARM_SHIFT;
226 if (clear_int)
227 val |= 1 << DB_EQ_CLR_SHIFT;
228 val |= 1 << DB_EQ_EVNT_SHIFT;
229 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
8788fdc2 230 iowrite32(val, adapter->db + DB_EQ_OFFSET);
6b7c5b94
SP
231}
232
8788fdc2 233void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
6b7c5b94
SP
234{
235 u32 val = 0;
03d28ffe 236
6b7c5b94 237 val |= qid & DB_CQ_RING_ID_MASK;
fe6d2a38
SP
238 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
239 DB_CQ_RING_ID_EXT_MASK_SHIFT);
cf588477 240
f67ef7ba 241 if (adapter->eeh_error)
cf588477
SP
242 return;
243
6b7c5b94
SP
244 if (arm)
245 val |= 1 << DB_CQ_REARM_SHIFT;
246 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
8788fdc2 247 iowrite32(val, adapter->db + DB_CQ_OFFSET);
6b7c5b94
SP
248}
249
6b7c5b94
SP
250static int be_mac_addr_set(struct net_device *netdev, void *p)
251{
252 struct be_adapter *adapter = netdev_priv(netdev);
5a712c13 253 struct device *dev = &adapter->pdev->dev;
6b7c5b94 254 struct sockaddr *addr = p;
5a712c13
SP
255 int status;
256 u8 mac[ETH_ALEN];
257 u32 old_pmac_id = adapter->pmac_id[0], curr_pmac_id = 0;
6b7c5b94 258
ca9e4988
AK
259 if (!is_valid_ether_addr(addr->sa_data))
260 return -EADDRNOTAVAIL;
261
ff32f8ab
VV
262 /* Proceed further only if, User provided MAC is different
263 * from active MAC
264 */
265 if (ether_addr_equal(addr->sa_data, netdev->dev_addr))
266 return 0;
267
5a712c13
SP
268 /* The PMAC_ADD cmd may fail if the VF doesn't have FILTMGMT
269 * privilege or if PF did not provision the new MAC address.
270 * On BE3, this cmd will always fail if the VF doesn't have the
271 * FILTMGMT privilege. This failure is OK, only if the PF programmed
272 * the MAC for the VF.
704e4c88 273 */
5a712c13
SP
274 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
275 adapter->if_handle, &adapter->pmac_id[0], 0);
276 if (!status) {
277 curr_pmac_id = adapter->pmac_id[0];
278
279 /* Delete the old programmed MAC. This call may fail if the
280 * old MAC was already deleted by the PF driver.
281 */
282 if (adapter->pmac_id[0] != old_pmac_id)
283 be_cmd_pmac_del(adapter, adapter->if_handle,
284 old_pmac_id, 0);
704e4c88
PR
285 }
286
5a712c13
SP
287 /* Decide if the new MAC is successfully activated only after
288 * querying the FW
704e4c88 289 */
b188f090
SR
290 status = be_cmd_get_active_mac(adapter, curr_pmac_id, mac,
291 adapter->if_handle, true, 0);
a65027e4 292 if (status)
e3a7ae2c 293 goto err;
6b7c5b94 294
5a712c13
SP
295 /* The MAC change did not happen, either due to lack of privilege
296 * or PF didn't pre-provision.
297 */
61d23e9f 298 if (!ether_addr_equal(addr->sa_data, mac)) {
5a712c13
SP
299 status = -EPERM;
300 goto err;
301 }
302
e3a7ae2c 303 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
5a712c13 304 dev_info(dev, "MAC address changed to %pM\n", mac);
e3a7ae2c
SK
305 return 0;
306err:
5a712c13 307 dev_warn(dev, "MAC address change to %pM failed\n", addr->sa_data);
6b7c5b94
SP
308 return status;
309}
310
ca34fe38
SP
311/* BE2 supports only v0 cmd */
312static void *hw_stats_from_cmd(struct be_adapter *adapter)
313{
314 if (BE2_chip(adapter)) {
315 struct be_cmd_resp_get_stats_v0 *cmd = adapter->stats_cmd.va;
316
317 return &cmd->hw_stats;
61000861 318 } else if (BE3_chip(adapter)) {
ca34fe38
SP
319 struct be_cmd_resp_get_stats_v1 *cmd = adapter->stats_cmd.va;
320
61000861
AK
321 return &cmd->hw_stats;
322 } else {
323 struct be_cmd_resp_get_stats_v2 *cmd = adapter->stats_cmd.va;
324
ca34fe38
SP
325 return &cmd->hw_stats;
326 }
327}
328
329/* BE2 supports only v0 cmd */
330static void *be_erx_stats_from_cmd(struct be_adapter *adapter)
331{
332 if (BE2_chip(adapter)) {
333 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
334
335 return &hw_stats->erx;
61000861 336 } else if (BE3_chip(adapter)) {
ca34fe38
SP
337 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
338
61000861
AK
339 return &hw_stats->erx;
340 } else {
341 struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
342
ca34fe38
SP
343 return &hw_stats->erx;
344 }
345}
346
347static void populate_be_v0_stats(struct be_adapter *adapter)
89a88ab8 348{
ac124ff9
SP
349 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
350 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
351 struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
89a88ab8 352 struct be_port_rxf_stats_v0 *port_stats =
ac124ff9
SP
353 &rxf_stats->port[adapter->port_num];
354 struct be_drv_stats *drvs = &adapter->drv_stats;
89a88ab8 355
ac124ff9 356 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
89a88ab8
AK
357 drvs->rx_pause_frames = port_stats->rx_pause_frames;
358 drvs->rx_crc_errors = port_stats->rx_crc_errors;
359 drvs->rx_control_frames = port_stats->rx_control_frames;
360 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
361 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
362 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
363 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
364 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
365 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
366 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
367 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
368 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
369 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
370 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
ac124ff9 371 drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
89a88ab8
AK
372 drvs->rx_dropped_header_too_small =
373 port_stats->rx_dropped_header_too_small;
18fb06a1
SR
374 drvs->rx_address_filtered =
375 port_stats->rx_address_filtered +
376 port_stats->rx_vlan_filtered;
89a88ab8
AK
377 drvs->rx_alignment_symbol_errors =
378 port_stats->rx_alignment_symbol_errors;
379
380 drvs->tx_pauseframes = port_stats->tx_pauseframes;
381 drvs->tx_controlframes = port_stats->tx_controlframes;
382
383 if (adapter->port_num)
ac124ff9 384 drvs->jabber_events = rxf_stats->port1_jabber_events;
89a88ab8 385 else
ac124ff9 386 drvs->jabber_events = rxf_stats->port0_jabber_events;
89a88ab8 387 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
89a88ab8 388 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
89a88ab8
AK
389 drvs->forwarded_packets = rxf_stats->forwarded_packets;
390 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
ac124ff9
SP
391 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
392 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
89a88ab8
AK
393 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
394}
395
ca34fe38 396static void populate_be_v1_stats(struct be_adapter *adapter)
89a88ab8 397{
ac124ff9
SP
398 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
399 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
400 struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
89a88ab8 401 struct be_port_rxf_stats_v1 *port_stats =
ac124ff9
SP
402 &rxf_stats->port[adapter->port_num];
403 struct be_drv_stats *drvs = &adapter->drv_stats;
89a88ab8 404
ac124ff9 405 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
02fe7027
AK
406 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
407 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
89a88ab8
AK
408 drvs->rx_pause_frames = port_stats->rx_pause_frames;
409 drvs->rx_crc_errors = port_stats->rx_crc_errors;
410 drvs->rx_control_frames = port_stats->rx_control_frames;
411 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
412 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
413 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
414 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
415 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
416 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
417 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
418 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
419 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
420 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
421 drvs->rx_dropped_header_too_small =
422 port_stats->rx_dropped_header_too_small;
423 drvs->rx_input_fifo_overflow_drop =
424 port_stats->rx_input_fifo_overflow_drop;
18fb06a1 425 drvs->rx_address_filtered = port_stats->rx_address_filtered;
89a88ab8
AK
426 drvs->rx_alignment_symbol_errors =
427 port_stats->rx_alignment_symbol_errors;
ac124ff9 428 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
89a88ab8
AK
429 drvs->tx_pauseframes = port_stats->tx_pauseframes;
430 drvs->tx_controlframes = port_stats->tx_controlframes;
b5adffc4 431 drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
89a88ab8
AK
432 drvs->jabber_events = port_stats->jabber_events;
433 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
89a88ab8 434 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
89a88ab8
AK
435 drvs->forwarded_packets = rxf_stats->forwarded_packets;
436 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
ac124ff9
SP
437 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
438 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
89a88ab8
AK
439 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
440}
441
61000861
AK
442static void populate_be_v2_stats(struct be_adapter *adapter)
443{
444 struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
445 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
446 struct be_rxf_stats_v2 *rxf_stats = &hw_stats->rxf;
447 struct be_port_rxf_stats_v2 *port_stats =
448 &rxf_stats->port[adapter->port_num];
449 struct be_drv_stats *drvs = &adapter->drv_stats;
450
451 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
452 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
453 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
454 drvs->rx_pause_frames = port_stats->rx_pause_frames;
455 drvs->rx_crc_errors = port_stats->rx_crc_errors;
456 drvs->rx_control_frames = port_stats->rx_control_frames;
457 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
458 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
459 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
460 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
461 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
462 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
463 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
464 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
465 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
466 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
467 drvs->rx_dropped_header_too_small =
468 port_stats->rx_dropped_header_too_small;
469 drvs->rx_input_fifo_overflow_drop =
470 port_stats->rx_input_fifo_overflow_drop;
471 drvs->rx_address_filtered = port_stats->rx_address_filtered;
472 drvs->rx_alignment_symbol_errors =
473 port_stats->rx_alignment_symbol_errors;
474 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
475 drvs->tx_pauseframes = port_stats->tx_pauseframes;
476 drvs->tx_controlframes = port_stats->tx_controlframes;
477 drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
478 drvs->jabber_events = port_stats->jabber_events;
479 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
480 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
481 drvs->forwarded_packets = rxf_stats->forwarded_packets;
482 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
483 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
484 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
485 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
748b539a 486 if (be_roce_supported(adapter)) {
461ae379
AK
487 drvs->rx_roce_bytes_lsd = port_stats->roce_bytes_received_lsd;
488 drvs->rx_roce_bytes_msd = port_stats->roce_bytes_received_msd;
489 drvs->rx_roce_frames = port_stats->roce_frames_received;
490 drvs->roce_drops_crc = port_stats->roce_drops_crc;
491 drvs->roce_drops_payload_len =
492 port_stats->roce_drops_payload_len;
493 }
61000861
AK
494}
495
005d5696
SX
496static void populate_lancer_stats(struct be_adapter *adapter)
497{
005d5696 498 struct be_drv_stats *drvs = &adapter->drv_stats;
748b539a 499 struct lancer_pport_stats *pport_stats = pport_stats_from_cmd(adapter);
ac124ff9
SP
500
501 be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
502 drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
503 drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
504 drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
005d5696 505 drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
ac124ff9 506 drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
005d5696
SX
507 drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
508 drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
509 drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
510 drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
511 drvs->rx_dropped_tcp_length =
512 pport_stats->rx_dropped_invalid_tcp_length;
513 drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
514 drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
515 drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
516 drvs->rx_dropped_header_too_small =
517 pport_stats->rx_dropped_header_too_small;
518 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
18fb06a1
SR
519 drvs->rx_address_filtered =
520 pport_stats->rx_address_filtered +
521 pport_stats->rx_vlan_filtered;
ac124ff9 522 drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
005d5696 523 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
ac124ff9
SP
524 drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
525 drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
005d5696 526 drvs->jabber_events = pport_stats->rx_jabbers;
ac124ff9
SP
527 drvs->forwarded_packets = pport_stats->num_forwards_lo;
528 drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
005d5696 529 drvs->rx_drops_too_many_frags =
ac124ff9 530 pport_stats->rx_drops_too_many_frags_lo;
005d5696 531}
89a88ab8 532
09c1c68f
SP
533static void accumulate_16bit_val(u32 *acc, u16 val)
534{
535#define lo(x) (x & 0xFFFF)
536#define hi(x) (x & 0xFFFF0000)
537 bool wrapped = val < lo(*acc);
538 u32 newacc = hi(*acc) + val;
539
540 if (wrapped)
541 newacc += 65536;
542 ACCESS_ONCE(*acc) = newacc;
543}
544
4188e7df 545static void populate_erx_stats(struct be_adapter *adapter,
748b539a 546 struct be_rx_obj *rxo, u32 erx_stat)
a6c578ef
AK
547{
548 if (!BEx_chip(adapter))
549 rx_stats(rxo)->rx_drops_no_frags = erx_stat;
550 else
551 /* below erx HW counter can actually wrap around after
552 * 65535. Driver accumulates a 32-bit value
553 */
554 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
555 (u16)erx_stat);
556}
557
89a88ab8
AK
558void be_parse_stats(struct be_adapter *adapter)
559{
61000861 560 struct be_erx_stats_v2 *erx = be_erx_stats_from_cmd(adapter);
ac124ff9
SP
561 struct be_rx_obj *rxo;
562 int i;
a6c578ef 563 u32 erx_stat;
ac124ff9 564
ca34fe38
SP
565 if (lancer_chip(adapter)) {
566 populate_lancer_stats(adapter);
005d5696 567 } else {
ca34fe38
SP
568 if (BE2_chip(adapter))
569 populate_be_v0_stats(adapter);
61000861
AK
570 else if (BE3_chip(adapter))
571 /* for BE3 */
ca34fe38 572 populate_be_v1_stats(adapter);
61000861
AK
573 else
574 populate_be_v2_stats(adapter);
d51ebd33 575
61000861 576 /* erx_v2 is longer than v0, v1. use v2 for v0, v1 access */
ca34fe38 577 for_all_rx_queues(adapter, rxo, i) {
a6c578ef
AK
578 erx_stat = erx->rx_drops_no_fragments[rxo->q.id];
579 populate_erx_stats(adapter, rxo, erx_stat);
ca34fe38 580 }
09c1c68f 581 }
89a88ab8
AK
582}
583
ab1594e9 584static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
748b539a 585 struct rtnl_link_stats64 *stats)
6b7c5b94 586{
ab1594e9 587 struct be_adapter *adapter = netdev_priv(netdev);
89a88ab8 588 struct be_drv_stats *drvs = &adapter->drv_stats;
3abcdeda 589 struct be_rx_obj *rxo;
3c8def97 590 struct be_tx_obj *txo;
ab1594e9
SP
591 u64 pkts, bytes;
592 unsigned int start;
3abcdeda 593 int i;
6b7c5b94 594
3abcdeda 595 for_all_rx_queues(adapter, rxo, i) {
ab1594e9 596 const struct be_rx_stats *rx_stats = rx_stats(rxo);
03d28ffe 597
ab1594e9 598 do {
57a7744e 599 start = u64_stats_fetch_begin_irq(&rx_stats->sync);
ab1594e9
SP
600 pkts = rx_stats(rxo)->rx_pkts;
601 bytes = rx_stats(rxo)->rx_bytes;
57a7744e 602 } while (u64_stats_fetch_retry_irq(&rx_stats->sync, start));
ab1594e9
SP
603 stats->rx_packets += pkts;
604 stats->rx_bytes += bytes;
605 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
606 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
607 rx_stats(rxo)->rx_drops_no_frags;
3abcdeda
SP
608 }
609
3c8def97 610 for_all_tx_queues(adapter, txo, i) {
ab1594e9 611 const struct be_tx_stats *tx_stats = tx_stats(txo);
03d28ffe 612
ab1594e9 613 do {
57a7744e 614 start = u64_stats_fetch_begin_irq(&tx_stats->sync);
ab1594e9
SP
615 pkts = tx_stats(txo)->tx_pkts;
616 bytes = tx_stats(txo)->tx_bytes;
57a7744e 617 } while (u64_stats_fetch_retry_irq(&tx_stats->sync, start));
ab1594e9
SP
618 stats->tx_packets += pkts;
619 stats->tx_bytes += bytes;
3c8def97 620 }
6b7c5b94
SP
621
622 /* bad pkts received */
ab1594e9 623 stats->rx_errors = drvs->rx_crc_errors +
89a88ab8
AK
624 drvs->rx_alignment_symbol_errors +
625 drvs->rx_in_range_errors +
626 drvs->rx_out_range_errors +
627 drvs->rx_frame_too_long +
628 drvs->rx_dropped_too_small +
629 drvs->rx_dropped_too_short +
630 drvs->rx_dropped_header_too_small +
631 drvs->rx_dropped_tcp_length +
ab1594e9 632 drvs->rx_dropped_runt;
68110868 633
6b7c5b94 634 /* detailed rx errors */
ab1594e9 635 stats->rx_length_errors = drvs->rx_in_range_errors +
89a88ab8
AK
636 drvs->rx_out_range_errors +
637 drvs->rx_frame_too_long;
68110868 638
ab1594e9 639 stats->rx_crc_errors = drvs->rx_crc_errors;
6b7c5b94
SP
640
641 /* frame alignment errors */
ab1594e9 642 stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
68110868 643
6b7c5b94
SP
644 /* receiver fifo overrun */
645 /* drops_no_pbuf is no per i/f, it's per BE card */
ab1594e9 646 stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
89a88ab8
AK
647 drvs->rx_input_fifo_overflow_drop +
648 drvs->rx_drops_no_pbuf;
ab1594e9 649 return stats;
6b7c5b94
SP
650}
651
b236916a 652void be_link_status_update(struct be_adapter *adapter, u8 link_status)
6b7c5b94 653{
6b7c5b94
SP
654 struct net_device *netdev = adapter->netdev;
655
b236916a 656 if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
ea172a01 657 netif_carrier_off(netdev);
b236916a 658 adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
6b7c5b94 659 }
b236916a 660
bdce2ad7 661 if (link_status)
b236916a
AK
662 netif_carrier_on(netdev);
663 else
664 netif_carrier_off(netdev);
6b7c5b94
SP
665}
666
5f07b3c5 667static void be_tx_stats_update(struct be_tx_obj *txo, struct sk_buff *skb)
6b7c5b94 668{
3c8def97
SP
669 struct be_tx_stats *stats = tx_stats(txo);
670
ab1594e9 671 u64_stats_update_begin(&stats->sync);
ac124ff9 672 stats->tx_reqs++;
5f07b3c5
SP
673 stats->tx_bytes += skb->len;
674 stats->tx_pkts += (skb_shinfo(skb)->gso_segs ? : 1);
ab1594e9 675 u64_stats_update_end(&stats->sync);
6b7c5b94
SP
676}
677
5f07b3c5
SP
678/* Returns number of WRBs needed for the skb */
679static u32 skb_wrb_cnt(struct sk_buff *skb)
6b7c5b94 680{
5f07b3c5
SP
681 /* +1 for the header wrb */
682 return 1 + (skb_headlen(skb) ? 1 : 0) + skb_shinfo(skb)->nr_frags;
6b7c5b94
SP
683}
684
685static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
686{
f986afcb
SP
687 wrb->frag_pa_hi = cpu_to_le32(upper_32_bits(addr));
688 wrb->frag_pa_lo = cpu_to_le32(lower_32_bits(addr));
689 wrb->frag_len = cpu_to_le32(len & ETH_WRB_FRAG_LEN_MASK);
690 wrb->rsvd0 = 0;
691}
692
693/* A dummy wrb is just all zeros. Using a separate routine for dummy-wrb
694 * to avoid the swap and shift/mask operations in wrb_fill().
695 */
696static inline void wrb_fill_dummy(struct be_eth_wrb *wrb)
697{
698 wrb->frag_pa_hi = 0;
699 wrb->frag_pa_lo = 0;
700 wrb->frag_len = 0;
89b1f496 701 wrb->rsvd0 = 0;
6b7c5b94
SP
702}
703
1ded132d 704static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
748b539a 705 struct sk_buff *skb)
1ded132d
AK
706{
707 u8 vlan_prio;
708 u16 vlan_tag;
709
df8a39de 710 vlan_tag = skb_vlan_tag_get(skb);
1ded132d
AK
711 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
712 /* If vlan priority provided by OS is NOT in available bmap */
713 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
714 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
715 adapter->recommended_prio;
716
717 return vlan_tag;
718}
719
c9c47142
SP
720/* Used only for IP tunnel packets */
721static u16 skb_inner_ip_proto(struct sk_buff *skb)
722{
723 return (inner_ip_hdr(skb)->version == 4) ?
724 inner_ip_hdr(skb)->protocol : inner_ipv6_hdr(skb)->nexthdr;
725}
726
727static u16 skb_ip_proto(struct sk_buff *skb)
728{
729 return (ip_hdr(skb)->version == 4) ?
730 ip_hdr(skb)->protocol : ipv6_hdr(skb)->nexthdr;
731}
732
cf5671e6
SB
733static inline bool be_is_txq_full(struct be_tx_obj *txo)
734{
735 return atomic_read(&txo->q.used) + BE_MAX_TX_FRAG_COUNT >= txo->q.len;
736}
737
738static inline bool be_can_txq_wake(struct be_tx_obj *txo)
739{
740 return atomic_read(&txo->q.used) < txo->q.len / 2;
741}
742
743static inline bool be_is_tx_compl_pending(struct be_tx_obj *txo)
744{
745 return atomic_read(&txo->q.used) > txo->pend_wrb_cnt;
746}
747
804abcdb
SB
748static void be_get_wrb_params_from_skb(struct be_adapter *adapter,
749 struct sk_buff *skb,
750 struct be_wrb_params *wrb_params)
6b7c5b94 751{
804abcdb 752 u16 proto;
6b7c5b94 753
49e4b847 754 if (skb_is_gso(skb)) {
804abcdb
SB
755 BE_WRB_F_SET(wrb_params->features, LSO, 1);
756 wrb_params->lso_mss = skb_shinfo(skb)->gso_size;
fe6d2a38 757 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
804abcdb 758 BE_WRB_F_SET(wrb_params->features, LSO6, 1);
6b7c5b94 759 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
c9c47142 760 if (skb->encapsulation) {
804abcdb 761 BE_WRB_F_SET(wrb_params->features, IPCS, 1);
c9c47142
SP
762 proto = skb_inner_ip_proto(skb);
763 } else {
764 proto = skb_ip_proto(skb);
765 }
766 if (proto == IPPROTO_TCP)
804abcdb 767 BE_WRB_F_SET(wrb_params->features, TCPCS, 1);
c9c47142 768 else if (proto == IPPROTO_UDP)
804abcdb 769 BE_WRB_F_SET(wrb_params->features, UDPCS, 1);
6b7c5b94
SP
770 }
771
df8a39de 772 if (skb_vlan_tag_present(skb)) {
804abcdb
SB
773 BE_WRB_F_SET(wrb_params->features, VLAN, 1);
774 wrb_params->vlan_tag = be_get_tx_vlan_tag(adapter, skb);
6b7c5b94
SP
775 }
776
804abcdb
SB
777 BE_WRB_F_SET(wrb_params->features, CRC, 1);
778}
5f07b3c5 779
804abcdb
SB
780static void wrb_fill_hdr(struct be_adapter *adapter,
781 struct be_eth_hdr_wrb *hdr,
782 struct be_wrb_params *wrb_params,
783 struct sk_buff *skb)
784{
785 memset(hdr, 0, sizeof(*hdr));
786
787 SET_TX_WRB_HDR_BITS(crc, hdr,
788 BE_WRB_F_GET(wrb_params->features, CRC));
789 SET_TX_WRB_HDR_BITS(ipcs, hdr,
790 BE_WRB_F_GET(wrb_params->features, IPCS));
791 SET_TX_WRB_HDR_BITS(tcpcs, hdr,
792 BE_WRB_F_GET(wrb_params->features, TCPCS));
793 SET_TX_WRB_HDR_BITS(udpcs, hdr,
794 BE_WRB_F_GET(wrb_params->features, UDPCS));
795
796 SET_TX_WRB_HDR_BITS(lso, hdr,
797 BE_WRB_F_GET(wrb_params->features, LSO));
798 SET_TX_WRB_HDR_BITS(lso6, hdr,
799 BE_WRB_F_GET(wrb_params->features, LSO6));
800 SET_TX_WRB_HDR_BITS(lso_mss, hdr, wrb_params->lso_mss);
801
802 /* Hack to skip HW VLAN tagging needs evt = 1, compl = 0. When this
803 * hack is not needed, the evt bit is set while ringing DB.
5f07b3c5 804 */
804abcdb
SB
805 SET_TX_WRB_HDR_BITS(event, hdr,
806 BE_WRB_F_GET(wrb_params->features, VLAN_SKIP_HW));
807 SET_TX_WRB_HDR_BITS(vlan, hdr,
808 BE_WRB_F_GET(wrb_params->features, VLAN));
809 SET_TX_WRB_HDR_BITS(vlan_tag, hdr, wrb_params->vlan_tag);
810
811 SET_TX_WRB_HDR_BITS(num_wrb, hdr, skb_wrb_cnt(skb));
812 SET_TX_WRB_HDR_BITS(len, hdr, skb->len);
6b7c5b94
SP
813}
814
2b7bcebf 815static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
748b539a 816 bool unmap_single)
7101e111
SP
817{
818 dma_addr_t dma;
f986afcb 819 u32 frag_len = le32_to_cpu(wrb->frag_len);
7101e111 820
7101e111 821
f986afcb
SP
822 dma = (u64)le32_to_cpu(wrb->frag_pa_hi) << 32 |
823 (u64)le32_to_cpu(wrb->frag_pa_lo);
824 if (frag_len) {
7101e111 825 if (unmap_single)
f986afcb 826 dma_unmap_single(dev, dma, frag_len, DMA_TO_DEVICE);
7101e111 827 else
f986afcb 828 dma_unmap_page(dev, dma, frag_len, DMA_TO_DEVICE);
7101e111
SP
829 }
830}
6b7c5b94 831
79a0d7d8
SB
832/* Grab a WRB header for xmit */
833static u16 be_tx_get_wrb_hdr(struct be_tx_obj *txo)
834{
835 u16 head = txo->q.head;
836
837 queue_head_inc(&txo->q);
838 return head;
839}
840
841/* Set up the WRB header for xmit */
842static void be_tx_setup_wrb_hdr(struct be_adapter *adapter,
843 struct be_tx_obj *txo,
844 struct be_wrb_params *wrb_params,
845 struct sk_buff *skb, u16 head)
846{
847 u32 num_frags = skb_wrb_cnt(skb);
848 struct be_queue_info *txq = &txo->q;
849 struct be_eth_hdr_wrb *hdr = queue_index_node(txq, head);
850
851 wrb_fill_hdr(adapter, hdr, wrb_params, skb);
852 be_dws_cpu_to_le(hdr, sizeof(*hdr));
853
854 BUG_ON(txo->sent_skb_list[head]);
855 txo->sent_skb_list[head] = skb;
856 txo->last_req_hdr = head;
857 atomic_add(num_frags, &txq->used);
858 txo->last_req_wrb_cnt = num_frags;
859 txo->pend_wrb_cnt += num_frags;
860}
861
862/* Setup a WRB fragment (buffer descriptor) for xmit */
863static void be_tx_setup_wrb_frag(struct be_tx_obj *txo, dma_addr_t busaddr,
864 int len)
865{
866 struct be_eth_wrb *wrb;
867 struct be_queue_info *txq = &txo->q;
868
869 wrb = queue_head_node(txq);
870 wrb_fill(wrb, busaddr, len);
871 queue_head_inc(txq);
872}
873
874/* Bring the queue back to the state it was in before be_xmit_enqueue() routine
875 * was invoked. The producer index is restored to the previous packet and the
876 * WRBs of the current packet are unmapped. Invoked to handle tx setup errors.
877 */
878static void be_xmit_restore(struct be_adapter *adapter,
879 struct be_tx_obj *txo, u16 head, bool map_single,
880 u32 copied)
881{
882 struct device *dev;
883 struct be_eth_wrb *wrb;
884 struct be_queue_info *txq = &txo->q;
885
886 dev = &adapter->pdev->dev;
887 txq->head = head;
888
889 /* skip the first wrb (hdr); it's not mapped */
890 queue_head_inc(txq);
891 while (copied) {
892 wrb = queue_head_node(txq);
893 unmap_tx_frag(dev, wrb, map_single);
894 map_single = false;
895 copied -= le32_to_cpu(wrb->frag_len);
896 queue_head_inc(txq);
897 }
898
899 txq->head = head;
900}
901
902/* Enqueue the given packet for transmit. This routine allocates WRBs for the
903 * packet, dma maps the packet buffers and sets up the WRBs. Returns the number
904 * of WRBs used up by the packet.
905 */
5f07b3c5 906static u32 be_xmit_enqueue(struct be_adapter *adapter, struct be_tx_obj *txo,
804abcdb
SB
907 struct sk_buff *skb,
908 struct be_wrb_params *wrb_params)
6b7c5b94 909{
5f07b3c5 910 u32 i, copied = 0, wrb_cnt = skb_wrb_cnt(skb);
2b7bcebf 911 struct device *dev = &adapter->pdev->dev;
5f07b3c5 912 struct be_queue_info *txq = &txo->q;
7101e111 913 bool map_single = false;
5f07b3c5 914 u16 head = txq->head;
79a0d7d8
SB
915 dma_addr_t busaddr;
916 int len;
6b7c5b94 917
79a0d7d8 918 head = be_tx_get_wrb_hdr(txo);
6b7c5b94 919
ebc8d2ab 920 if (skb->len > skb->data_len) {
79a0d7d8 921 len = skb_headlen(skb);
03d28ffe 922
2b7bcebf
IV
923 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
924 if (dma_mapping_error(dev, busaddr))
7101e111
SP
925 goto dma_err;
926 map_single = true;
79a0d7d8 927 be_tx_setup_wrb_frag(txo, busaddr, len);
ebc8d2ab
DM
928 copied += len;
929 }
6b7c5b94 930
ebc8d2ab 931 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
748b539a 932 const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
79a0d7d8 933 len = skb_frag_size(frag);
03d28ffe 934
79a0d7d8 935 busaddr = skb_frag_dma_map(dev, frag, 0, len, DMA_TO_DEVICE);
2b7bcebf 936 if (dma_mapping_error(dev, busaddr))
7101e111 937 goto dma_err;
79a0d7d8
SB
938 be_tx_setup_wrb_frag(txo, busaddr, len);
939 copied += len;
6b7c5b94
SP
940 }
941
79a0d7d8 942 be_tx_setup_wrb_hdr(adapter, txo, wrb_params, skb, head);
6b7c5b94 943
5f07b3c5
SP
944 be_tx_stats_update(txo, skb);
945 return wrb_cnt;
6b7c5b94 946
7101e111 947dma_err:
79a0d7d8
SB
948 adapter->drv_stats.dma_map_errors++;
949 be_xmit_restore(adapter, txo, head, map_single, copied);
7101e111 950 return 0;
6b7c5b94
SP
951}
952
f7062ee5
SP
953static inline int qnq_async_evt_rcvd(struct be_adapter *adapter)
954{
955 return adapter->flags & BE_FLAGS_QNQ_ASYNC_EVT_RCVD;
956}
957
93040ae5 958static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
bc0c3405 959 struct sk_buff *skb,
804abcdb
SB
960 struct be_wrb_params
961 *wrb_params)
93040ae5
SK
962{
963 u16 vlan_tag = 0;
964
965 skb = skb_share_check(skb, GFP_ATOMIC);
966 if (unlikely(!skb))
967 return skb;
968
df8a39de 969 if (skb_vlan_tag_present(skb))
93040ae5 970 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
52fe29e4
SB
971
972 if (qnq_async_evt_rcvd(adapter) && adapter->pvid) {
973 if (!vlan_tag)
974 vlan_tag = adapter->pvid;
975 /* f/w workaround to set skip_hw_vlan = 1, informs the F/W to
976 * skip VLAN insertion
977 */
804abcdb 978 BE_WRB_F_SET(wrb_params->features, VLAN_SKIP_HW, 1);
52fe29e4 979 }
bc0c3405
AK
980
981 if (vlan_tag) {
62749e2c
JP
982 skb = vlan_insert_tag_set_proto(skb, htons(ETH_P_8021Q),
983 vlan_tag);
bc0c3405
AK
984 if (unlikely(!skb))
985 return skb;
bc0c3405
AK
986 skb->vlan_tci = 0;
987 }
988
989 /* Insert the outer VLAN, if any */
990 if (adapter->qnq_vid) {
991 vlan_tag = adapter->qnq_vid;
62749e2c
JP
992 skb = vlan_insert_tag_set_proto(skb, htons(ETH_P_8021Q),
993 vlan_tag);
bc0c3405
AK
994 if (unlikely(!skb))
995 return skb;
804abcdb 996 BE_WRB_F_SET(wrb_params->features, VLAN_SKIP_HW, 1);
bc0c3405
AK
997 }
998
93040ae5
SK
999 return skb;
1000}
1001
bc0c3405
AK
1002static bool be_ipv6_exthdr_check(struct sk_buff *skb)
1003{
1004 struct ethhdr *eh = (struct ethhdr *)skb->data;
1005 u16 offset = ETH_HLEN;
1006
1007 if (eh->h_proto == htons(ETH_P_IPV6)) {
1008 struct ipv6hdr *ip6h = (struct ipv6hdr *)(skb->data + offset);
1009
1010 offset += sizeof(struct ipv6hdr);
1011 if (ip6h->nexthdr != NEXTHDR_TCP &&
1012 ip6h->nexthdr != NEXTHDR_UDP) {
1013 struct ipv6_opt_hdr *ehdr =
504fbf1e 1014 (struct ipv6_opt_hdr *)(skb->data + offset);
bc0c3405
AK
1015
1016 /* offending pkt: 2nd byte following IPv6 hdr is 0xff */
1017 if (ehdr->hdrlen == 0xff)
1018 return true;
1019 }
1020 }
1021 return false;
1022}
1023
1024static int be_vlan_tag_tx_chk(struct be_adapter *adapter, struct sk_buff *skb)
1025{
df8a39de 1026 return skb_vlan_tag_present(skb) || adapter->pvid || adapter->qnq_vid;
bc0c3405
AK
1027}
1028
748b539a 1029static int be_ipv6_tx_stall_chk(struct be_adapter *adapter, struct sk_buff *skb)
bc0c3405 1030{
ee9c799c 1031 return BE3_chip(adapter) && be_ipv6_exthdr_check(skb);
bc0c3405
AK
1032}
1033
ec495fac
VV
1034static struct sk_buff *be_lancer_xmit_workarounds(struct be_adapter *adapter,
1035 struct sk_buff *skb,
804abcdb
SB
1036 struct be_wrb_params
1037 *wrb_params)
6b7c5b94 1038{
d2cb6ce7 1039 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
ee9c799c
SP
1040 unsigned int eth_hdr_len;
1041 struct iphdr *ip;
93040ae5 1042
1297f9db
AK
1043 /* For padded packets, BE HW modifies tot_len field in IP header
1044 * incorrecly when VLAN tag is inserted by HW.
3904dcc4 1045 * For padded packets, Lancer computes incorrect checksum.
1ded132d 1046 */
ee9c799c
SP
1047 eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
1048 VLAN_ETH_HLEN : ETH_HLEN;
3904dcc4 1049 if (skb->len <= 60 &&
df8a39de 1050 (lancer_chip(adapter) || skb_vlan_tag_present(skb)) &&
ee9c799c 1051 is_ipv4_pkt(skb)) {
93040ae5
SK
1052 ip = (struct iphdr *)ip_hdr(skb);
1053 pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len));
1054 }
1ded132d 1055
d2cb6ce7 1056 /* If vlan tag is already inlined in the packet, skip HW VLAN
f93f160b 1057 * tagging in pvid-tagging mode
d2cb6ce7 1058 */
f93f160b 1059 if (be_pvid_tagging_enabled(adapter) &&
d2cb6ce7 1060 veh->h_vlan_proto == htons(ETH_P_8021Q))
804abcdb 1061 BE_WRB_F_SET(wrb_params->features, VLAN_SKIP_HW, 1);
d2cb6ce7 1062
93040ae5
SK
1063 /* HW has a bug wherein it will calculate CSUM for VLAN
1064 * pkts even though it is disabled.
1065 * Manually insert VLAN in pkt.
1066 */
1067 if (skb->ip_summed != CHECKSUM_PARTIAL &&
df8a39de 1068 skb_vlan_tag_present(skb)) {
804abcdb 1069 skb = be_insert_vlan_in_pkt(adapter, skb, wrb_params);
bc0c3405 1070 if (unlikely(!skb))
c9128951 1071 goto err;
bc0c3405
AK
1072 }
1073
1074 /* HW may lockup when VLAN HW tagging is requested on
1075 * certain ipv6 packets. Drop such pkts if the HW workaround to
1076 * skip HW tagging is not enabled by FW.
1077 */
1078 if (unlikely(be_ipv6_tx_stall_chk(adapter, skb) &&
cd3307aa
KA
1079 (adapter->pvid || adapter->qnq_vid) &&
1080 !qnq_async_evt_rcvd(adapter)))
bc0c3405
AK
1081 goto tx_drop;
1082
1083 /* Manual VLAN tag insertion to prevent:
1084 * ASIC lockup when the ASIC inserts VLAN tag into
1085 * certain ipv6 packets. Insert VLAN tags in driver,
1086 * and set event, completion, vlan bits accordingly
1087 * in the Tx WRB.
1088 */
1089 if (be_ipv6_tx_stall_chk(adapter, skb) &&
1090 be_vlan_tag_tx_chk(adapter, skb)) {
804abcdb 1091 skb = be_insert_vlan_in_pkt(adapter, skb, wrb_params);
1ded132d 1092 if (unlikely(!skb))
c9128951 1093 goto err;
1ded132d
AK
1094 }
1095
ee9c799c
SP
1096 return skb;
1097tx_drop:
1098 dev_kfree_skb_any(skb);
c9128951 1099err:
ee9c799c
SP
1100 return NULL;
1101}
1102
ec495fac
VV
1103static struct sk_buff *be_xmit_workarounds(struct be_adapter *adapter,
1104 struct sk_buff *skb,
804abcdb 1105 struct be_wrb_params *wrb_params)
ec495fac
VV
1106{
1107 /* Lancer, SH-R ASICs have a bug wherein Packets that are 32 bytes or
1108 * less may cause a transmit stall on that port. So the work-around is
1109 * to pad short packets (<= 32 bytes) to a 36-byte length.
1110 */
1111 if (unlikely(!BEx_chip(adapter) && skb->len <= 32)) {
74b6939d 1112 if (skb_put_padto(skb, 36))
ec495fac 1113 return NULL;
ec495fac
VV
1114 }
1115
1116 if (BEx_chip(adapter) || lancer_chip(adapter)) {
804abcdb 1117 skb = be_lancer_xmit_workarounds(adapter, skb, wrb_params);
ec495fac
VV
1118 if (!skb)
1119 return NULL;
1120 }
1121
1122 return skb;
1123}
1124
5f07b3c5
SP
1125static void be_xmit_flush(struct be_adapter *adapter, struct be_tx_obj *txo)
1126{
1127 struct be_queue_info *txq = &txo->q;
1128 struct be_eth_hdr_wrb *hdr = queue_index_node(txq, txo->last_req_hdr);
1129
1130 /* Mark the last request eventable if it hasn't been marked already */
1131 if (!(hdr->dw[2] & cpu_to_le32(TX_HDR_WRB_EVT)))
1132 hdr->dw[2] |= cpu_to_le32(TX_HDR_WRB_EVT | TX_HDR_WRB_COMPL);
1133
1134 /* compose a dummy wrb if there are odd set of wrbs to notify */
1135 if (!lancer_chip(adapter) && (txo->pend_wrb_cnt & 1)) {
f986afcb 1136 wrb_fill_dummy(queue_head_node(txq));
5f07b3c5
SP
1137 queue_head_inc(txq);
1138 atomic_inc(&txq->used);
1139 txo->pend_wrb_cnt++;
1140 hdr->dw[2] &= ~cpu_to_le32(TX_HDR_WRB_NUM_MASK <<
1141 TX_HDR_WRB_NUM_SHIFT);
1142 hdr->dw[2] |= cpu_to_le32((txo->last_req_wrb_cnt + 1) <<
1143 TX_HDR_WRB_NUM_SHIFT);
1144 }
1145 be_txq_notify(adapter, txo, txo->pend_wrb_cnt);
1146 txo->pend_wrb_cnt = 0;
1147}
1148
ee9c799c
SP
1149static netdev_tx_t be_xmit(struct sk_buff *skb, struct net_device *netdev)
1150{
1151 struct be_adapter *adapter = netdev_priv(netdev);
5f07b3c5
SP
1152 u16 q_idx = skb_get_queue_mapping(skb);
1153 struct be_tx_obj *txo = &adapter->tx_obj[q_idx];
804abcdb 1154 struct be_wrb_params wrb_params = { 0 };
804abcdb 1155 bool flush = !skb->xmit_more;
5f07b3c5 1156 u16 wrb_cnt;
ee9c799c 1157
804abcdb 1158 skb = be_xmit_workarounds(adapter, skb, &wrb_params);
5f07b3c5
SP
1159 if (unlikely(!skb))
1160 goto drop;
6b7c5b94 1161
804abcdb
SB
1162 be_get_wrb_params_from_skb(adapter, skb, &wrb_params);
1163
1164 wrb_cnt = be_xmit_enqueue(adapter, txo, skb, &wrb_params);
5f07b3c5
SP
1165 if (unlikely(!wrb_cnt)) {
1166 dev_kfree_skb_any(skb);
1167 goto drop;
1168 }
cd8f76c0 1169
cf5671e6 1170 if (be_is_txq_full(txo)) {
5f07b3c5
SP
1171 netif_stop_subqueue(netdev, q_idx);
1172 tx_stats(txo)->tx_stops++;
1173 }
c190e3c8 1174
5f07b3c5
SP
1175 if (flush || __netif_subqueue_stopped(netdev, q_idx))
1176 be_xmit_flush(adapter, txo);
6b7c5b94 1177
5f07b3c5
SP
1178 return NETDEV_TX_OK;
1179drop:
1180 tx_stats(txo)->tx_drv_drops++;
1181 /* Flush the already enqueued tx requests */
1182 if (flush && txo->pend_wrb_cnt)
1183 be_xmit_flush(adapter, txo);
6b7c5b94 1184
6b7c5b94
SP
1185 return NETDEV_TX_OK;
1186}
1187
1188static int be_change_mtu(struct net_device *netdev, int new_mtu)
1189{
1190 struct be_adapter *adapter = netdev_priv(netdev);
0d3f5cce
KA
1191 struct device *dev = &adapter->pdev->dev;
1192
1193 if (new_mtu < BE_MIN_MTU || new_mtu > BE_MAX_MTU) {
1194 dev_info(dev, "MTU must be between %d and %d bytes\n",
1195 BE_MIN_MTU, BE_MAX_MTU);
6b7c5b94
SP
1196 return -EINVAL;
1197 }
0d3f5cce
KA
1198
1199 dev_info(dev, "MTU changed from %d to %d bytes\n",
748b539a 1200 netdev->mtu, new_mtu);
6b7c5b94
SP
1201 netdev->mtu = new_mtu;
1202 return 0;
1203}
1204
f66b7cfd
SP
1205static inline bool be_in_all_promisc(struct be_adapter *adapter)
1206{
1207 return (adapter->if_flags & BE_IF_FLAGS_ALL_PROMISCUOUS) ==
1208 BE_IF_FLAGS_ALL_PROMISCUOUS;
1209}
1210
1211static int be_set_vlan_promisc(struct be_adapter *adapter)
1212{
1213 struct device *dev = &adapter->pdev->dev;
1214 int status;
1215
1216 if (adapter->if_flags & BE_IF_FLAGS_VLAN_PROMISCUOUS)
1217 return 0;
1218
1219 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_VLAN_PROMISCUOUS, ON);
1220 if (!status) {
1221 dev_info(dev, "Enabled VLAN promiscuous mode\n");
1222 adapter->if_flags |= BE_IF_FLAGS_VLAN_PROMISCUOUS;
1223 } else {
1224 dev_err(dev, "Failed to enable VLAN promiscuous mode\n");
1225 }
1226 return status;
1227}
1228
1229static int be_clear_vlan_promisc(struct be_adapter *adapter)
1230{
1231 struct device *dev = &adapter->pdev->dev;
1232 int status;
1233
1234 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_VLAN_PROMISCUOUS, OFF);
1235 if (!status) {
1236 dev_info(dev, "Disabling VLAN promiscuous mode\n");
1237 adapter->if_flags &= ~BE_IF_FLAGS_VLAN_PROMISCUOUS;
1238 }
1239 return status;
1240}
1241
6b7c5b94 1242/*
82903e4b
AK
1243 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
1244 * If the user configures more, place BE in vlan promiscuous mode.
6b7c5b94 1245 */
10329df8 1246static int be_vid_config(struct be_adapter *adapter)
6b7c5b94 1247{
50762667 1248 struct device *dev = &adapter->pdev->dev;
10329df8 1249 u16 vids[BE_NUM_VLANS_SUPPORTED];
f6cbd364 1250 u16 num = 0, i = 0;
82903e4b 1251 int status = 0;
1da87b7f 1252
c0e64ef4 1253 /* No need to further configure vids if in promiscuous mode */
f66b7cfd 1254 if (be_in_all_promisc(adapter))
c0e64ef4
SP
1255 return 0;
1256
92bf14ab 1257 if (adapter->vlans_added > be_max_vlans(adapter))
f66b7cfd 1258 return be_set_vlan_promisc(adapter);
0fc16ebf
PR
1259
1260 /* Construct VLAN Table to give to HW */
f6cbd364
RN
1261 for_each_set_bit(i, adapter->vids, VLAN_N_VID)
1262 vids[num++] = cpu_to_le16(i);
0fc16ebf 1263
435452aa 1264 status = be_cmd_vlan_config(adapter, adapter->if_handle, vids, num, 0);
0fc16ebf 1265 if (status) {
f66b7cfd 1266 dev_err(dev, "Setting HW VLAN filtering failed\n");
d9d604f8 1267 /* Set to VLAN promisc mode as setting VLAN filter failed */
4c60005f
KA
1268 if (addl_status(status) ==
1269 MCC_ADDL_STATUS_INSUFFICIENT_RESOURCES)
f66b7cfd
SP
1270 return be_set_vlan_promisc(adapter);
1271 } else if (adapter->if_flags & BE_IF_FLAGS_VLAN_PROMISCUOUS) {
1272 status = be_clear_vlan_promisc(adapter);
6b7c5b94 1273 }
0fc16ebf 1274 return status;
6b7c5b94
SP
1275}
1276
80d5c368 1277static int be_vlan_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
6b7c5b94
SP
1278{
1279 struct be_adapter *adapter = netdev_priv(netdev);
80817cbf 1280 int status = 0;
6b7c5b94 1281
a85e9986
PR
1282 /* Packets with VID 0 are always received by Lancer by default */
1283 if (lancer_chip(adapter) && vid == 0)
48291c22
VV
1284 return status;
1285
f6cbd364 1286 if (test_bit(vid, adapter->vids))
48291c22 1287 return status;
a85e9986 1288
f6cbd364 1289 set_bit(vid, adapter->vids);
a6b74e01 1290 adapter->vlans_added++;
8e586137 1291
a6b74e01
SK
1292 status = be_vid_config(adapter);
1293 if (status) {
1294 adapter->vlans_added--;
f6cbd364 1295 clear_bit(vid, adapter->vids);
a6b74e01 1296 }
48291c22 1297
80817cbf 1298 return status;
6b7c5b94
SP
1299}
1300
80d5c368 1301static int be_vlan_rem_vid(struct net_device *netdev, __be16 proto, u16 vid)
6b7c5b94
SP
1302{
1303 struct be_adapter *adapter = netdev_priv(netdev);
1304
a85e9986
PR
1305 /* Packets with VID 0 are always received by Lancer by default */
1306 if (lancer_chip(adapter) && vid == 0)
9d4dfe4a 1307 return 0;
a85e9986 1308
f6cbd364 1309 clear_bit(vid, adapter->vids);
9d4dfe4a
KA
1310 adapter->vlans_added--;
1311
1312 return be_vid_config(adapter);
6b7c5b94
SP
1313}
1314
f66b7cfd 1315static void be_clear_all_promisc(struct be_adapter *adapter)
7ad09458 1316{
ac34b743 1317 be_cmd_rx_filter(adapter, BE_IF_FLAGS_ALL_PROMISCUOUS, OFF);
f66b7cfd 1318 adapter->if_flags &= ~BE_IF_FLAGS_ALL_PROMISCUOUS;
7ad09458
S
1319}
1320
f66b7cfd
SP
1321static void be_set_all_promisc(struct be_adapter *adapter)
1322{
1323 be_cmd_rx_filter(adapter, BE_IF_FLAGS_ALL_PROMISCUOUS, ON);
1324 adapter->if_flags |= BE_IF_FLAGS_ALL_PROMISCUOUS;
1325}
1326
1327static void be_set_mc_promisc(struct be_adapter *adapter)
6b7c5b94 1328{
0fc16ebf 1329 int status;
6b7c5b94 1330
f66b7cfd
SP
1331 if (adapter->if_flags & BE_IF_FLAGS_MCAST_PROMISCUOUS)
1332 return;
6b7c5b94 1333
f66b7cfd
SP
1334 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_MCAST_PROMISCUOUS, ON);
1335 if (!status)
1336 adapter->if_flags |= BE_IF_FLAGS_MCAST_PROMISCUOUS;
1337}
1338
1339static void be_set_mc_list(struct be_adapter *adapter)
1340{
1341 int status;
1342
1343 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_MULTICAST, ON);
1344 if (!status)
1345 adapter->if_flags &= ~BE_IF_FLAGS_MCAST_PROMISCUOUS;
1346 else
1347 be_set_mc_promisc(adapter);
1348}
1349
1350static void be_set_uc_list(struct be_adapter *adapter)
1351{
1352 struct netdev_hw_addr *ha;
1353 int i = 1; /* First slot is claimed by the Primary MAC */
1354
1355 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++)
1356 be_cmd_pmac_del(adapter, adapter->if_handle,
1357 adapter->pmac_id[i], 0);
1358
1359 if (netdev_uc_count(adapter->netdev) > be_max_uc(adapter)) {
1360 be_set_all_promisc(adapter);
1361 return;
6b7c5b94
SP
1362 }
1363
f66b7cfd
SP
1364 netdev_for_each_uc_addr(ha, adapter->netdev) {
1365 adapter->uc_macs++; /* First slot is for Primary MAC */
1366 be_cmd_pmac_add(adapter, (u8 *)ha->addr, adapter->if_handle,
1367 &adapter->pmac_id[adapter->uc_macs], 0);
1368 }
1369}
6b7c5b94 1370
f66b7cfd
SP
1371static void be_clear_uc_list(struct be_adapter *adapter)
1372{
1373 int i;
fbc13f01 1374
f66b7cfd
SP
1375 for (i = 1; i < (adapter->uc_macs + 1); i++)
1376 be_cmd_pmac_del(adapter, adapter->if_handle,
1377 adapter->pmac_id[i], 0);
1378 adapter->uc_macs = 0;
1379}
fbc13f01 1380
f66b7cfd
SP
1381static void be_set_rx_mode(struct net_device *netdev)
1382{
1383 struct be_adapter *adapter = netdev_priv(netdev);
fbc13f01 1384
f66b7cfd
SP
1385 if (netdev->flags & IFF_PROMISC) {
1386 be_set_all_promisc(adapter);
1387 return;
fbc13f01
AK
1388 }
1389
f66b7cfd
SP
1390 /* Interface was previously in promiscuous mode; disable it */
1391 if (be_in_all_promisc(adapter)) {
1392 be_clear_all_promisc(adapter);
1393 if (adapter->vlans_added)
1394 be_vid_config(adapter);
0fc16ebf 1395 }
a0794885 1396
f66b7cfd
SP
1397 /* Enable multicast promisc if num configured exceeds what we support */
1398 if (netdev->flags & IFF_ALLMULTI ||
1399 netdev_mc_count(netdev) > be_max_mc(adapter)) {
1400 be_set_mc_promisc(adapter);
a0794885 1401 return;
f66b7cfd 1402 }
a0794885 1403
f66b7cfd
SP
1404 if (netdev_uc_count(netdev) != adapter->uc_macs)
1405 be_set_uc_list(adapter);
1406
1407 be_set_mc_list(adapter);
6b7c5b94
SP
1408}
1409
ba343c77
SB
1410static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
1411{
1412 struct be_adapter *adapter = netdev_priv(netdev);
11ac75ed 1413 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
ba343c77
SB
1414 int status;
1415
11ac75ed 1416 if (!sriov_enabled(adapter))
ba343c77
SB
1417 return -EPERM;
1418
11ac75ed 1419 if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
ba343c77
SB
1420 return -EINVAL;
1421
3c31aaf3
VV
1422 /* Proceed further only if user provided MAC is different
1423 * from active MAC
1424 */
1425 if (ether_addr_equal(mac, vf_cfg->mac_addr))
1426 return 0;
1427
3175d8c2
SP
1428 if (BEx_chip(adapter)) {
1429 be_cmd_pmac_del(adapter, vf_cfg->if_handle, vf_cfg->pmac_id,
1430 vf + 1);
ba343c77 1431
11ac75ed
SP
1432 status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
1433 &vf_cfg->pmac_id, vf + 1);
3175d8c2
SP
1434 } else {
1435 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
1436 vf + 1);
590c391d
PR
1437 }
1438
abccf23e
KA
1439 if (status) {
1440 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed: %#x",
1441 mac, vf, status);
1442 return be_cmd_status(status);
1443 }
64600ea5 1444
abccf23e
KA
1445 ether_addr_copy(vf_cfg->mac_addr, mac);
1446
1447 return 0;
ba343c77
SB
1448}
1449
64600ea5 1450static int be_get_vf_config(struct net_device *netdev, int vf,
748b539a 1451 struct ifla_vf_info *vi)
64600ea5
AK
1452{
1453 struct be_adapter *adapter = netdev_priv(netdev);
11ac75ed 1454 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
64600ea5 1455
11ac75ed 1456 if (!sriov_enabled(adapter))
64600ea5
AK
1457 return -EPERM;
1458
11ac75ed 1459 if (vf >= adapter->num_vfs)
64600ea5
AK
1460 return -EINVAL;
1461
1462 vi->vf = vf;
ed616689
SC
1463 vi->max_tx_rate = vf_cfg->tx_rate;
1464 vi->min_tx_rate = 0;
a60b3a13
AK
1465 vi->vlan = vf_cfg->vlan_tag & VLAN_VID_MASK;
1466 vi->qos = vf_cfg->vlan_tag >> VLAN_PRIO_SHIFT;
11ac75ed 1467 memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
bdce2ad7 1468 vi->linkstate = adapter->vf_cfg[vf].plink_tracking;
64600ea5
AK
1469
1470 return 0;
1471}
1472
435452aa
VV
1473static int be_set_vf_tvt(struct be_adapter *adapter, int vf, u16 vlan)
1474{
1475 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1476 u16 vids[BE_NUM_VLANS_SUPPORTED];
1477 int vf_if_id = vf_cfg->if_handle;
1478 int status;
1479
1480 /* Enable Transparent VLAN Tagging */
1481 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1, vf_if_id, 0);
1482 if (status)
1483 return status;
1484
1485 /* Clear pre-programmed VLAN filters on VF if any, if TVT is enabled */
1486 vids[0] = 0;
1487 status = be_cmd_vlan_config(adapter, vf_if_id, vids, 1, vf + 1);
1488 if (!status)
1489 dev_info(&adapter->pdev->dev,
1490 "Cleared guest VLANs on VF%d", vf);
1491
1492 /* After TVT is enabled, disallow VFs to program VLAN filters */
1493 if (vf_cfg->privileges & BE_PRIV_FILTMGMT) {
1494 status = be_cmd_set_fn_privileges(adapter, vf_cfg->privileges &
1495 ~BE_PRIV_FILTMGMT, vf + 1);
1496 if (!status)
1497 vf_cfg->privileges &= ~BE_PRIV_FILTMGMT;
1498 }
1499 return 0;
1500}
1501
1502static int be_clear_vf_tvt(struct be_adapter *adapter, int vf)
1503{
1504 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1505 struct device *dev = &adapter->pdev->dev;
1506 int status;
1507
1508 /* Reset Transparent VLAN Tagging. */
1509 status = be_cmd_set_hsw_config(adapter, BE_RESET_VLAN_TAG_ID, vf + 1,
1510 vf_cfg->if_handle, 0);
1511 if (status)
1512 return status;
1513
1514 /* Allow VFs to program VLAN filtering */
1515 if (!(vf_cfg->privileges & BE_PRIV_FILTMGMT)) {
1516 status = be_cmd_set_fn_privileges(adapter, vf_cfg->privileges |
1517 BE_PRIV_FILTMGMT, vf + 1);
1518 if (!status) {
1519 vf_cfg->privileges |= BE_PRIV_FILTMGMT;
1520 dev_info(dev, "VF%d: FILTMGMT priv enabled", vf);
1521 }
1522 }
1523
1524 dev_info(dev,
1525 "Disable/re-enable i/f in VM to clear Transparent VLAN tag");
1526 return 0;
1527}
1528
748b539a 1529static int be_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos)
1da87b7f
AK
1530{
1531 struct be_adapter *adapter = netdev_priv(netdev);
b9fc0e53 1532 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
435452aa 1533 int status;
1da87b7f 1534
11ac75ed 1535 if (!sriov_enabled(adapter))
1da87b7f
AK
1536 return -EPERM;
1537
b9fc0e53 1538 if (vf >= adapter->num_vfs || vlan > 4095 || qos > 7)
1da87b7f
AK
1539 return -EINVAL;
1540
b9fc0e53
AK
1541 if (vlan || qos) {
1542 vlan |= qos << VLAN_PRIO_SHIFT;
435452aa 1543 status = be_set_vf_tvt(adapter, vf, vlan);
1da87b7f 1544 } else {
435452aa 1545 status = be_clear_vf_tvt(adapter, vf);
1da87b7f
AK
1546 }
1547
abccf23e
KA
1548 if (status) {
1549 dev_err(&adapter->pdev->dev,
435452aa
VV
1550 "VLAN %d config on VF %d failed : %#x\n", vlan, vf,
1551 status);
abccf23e
KA
1552 return be_cmd_status(status);
1553 }
1554
1555 vf_cfg->vlan_tag = vlan;
abccf23e 1556 return 0;
1da87b7f
AK
1557}
1558
ed616689
SC
1559static int be_set_vf_tx_rate(struct net_device *netdev, int vf,
1560 int min_tx_rate, int max_tx_rate)
e1d18735
AK
1561{
1562 struct be_adapter *adapter = netdev_priv(netdev);
0f77ba73
RN
1563 struct device *dev = &adapter->pdev->dev;
1564 int percent_rate, status = 0;
1565 u16 link_speed = 0;
1566 u8 link_status;
e1d18735 1567
11ac75ed 1568 if (!sriov_enabled(adapter))
e1d18735
AK
1569 return -EPERM;
1570
94f434c2 1571 if (vf >= adapter->num_vfs)
e1d18735
AK
1572 return -EINVAL;
1573
ed616689
SC
1574 if (min_tx_rate)
1575 return -EINVAL;
1576
0f77ba73
RN
1577 if (!max_tx_rate)
1578 goto config_qos;
1579
1580 status = be_cmd_link_status_query(adapter, &link_speed,
1581 &link_status, 0);
1582 if (status)
1583 goto err;
1584
1585 if (!link_status) {
1586 dev_err(dev, "TX-rate setting not allowed when link is down\n");
940a3fcd 1587 status = -ENETDOWN;
0f77ba73
RN
1588 goto err;
1589 }
1590
1591 if (max_tx_rate < 100 || max_tx_rate > link_speed) {
1592 dev_err(dev, "TX-rate must be between 100 and %d Mbps\n",
1593 link_speed);
1594 status = -EINVAL;
1595 goto err;
1596 }
1597
1598 /* On Skyhawk the QOS setting must be done only as a % value */
1599 percent_rate = link_speed / 100;
1600 if (skyhawk_chip(adapter) && (max_tx_rate % percent_rate)) {
1601 dev_err(dev, "TX-rate must be a multiple of %d Mbps\n",
1602 percent_rate);
1603 status = -EINVAL;
1604 goto err;
94f434c2 1605 }
e1d18735 1606
0f77ba73
RN
1607config_qos:
1608 status = be_cmd_config_qos(adapter, max_tx_rate, link_speed, vf + 1);
e1d18735 1609 if (status)
0f77ba73
RN
1610 goto err;
1611
1612 adapter->vf_cfg[vf].tx_rate = max_tx_rate;
1613 return 0;
1614
1615err:
1616 dev_err(dev, "TX-rate setting of %dMbps on VF%d failed\n",
1617 max_tx_rate, vf);
abccf23e 1618 return be_cmd_status(status);
e1d18735 1619}
e2fb1afa 1620
bdce2ad7
SR
1621static int be_set_vf_link_state(struct net_device *netdev, int vf,
1622 int link_state)
1623{
1624 struct be_adapter *adapter = netdev_priv(netdev);
1625 int status;
1626
1627 if (!sriov_enabled(adapter))
1628 return -EPERM;
1629
1630 if (vf >= adapter->num_vfs)
1631 return -EINVAL;
1632
1633 status = be_cmd_set_logical_link_config(adapter, link_state, vf+1);
abccf23e
KA
1634 if (status) {
1635 dev_err(&adapter->pdev->dev,
1636 "Link state change on VF %d failed: %#x\n", vf, status);
1637 return be_cmd_status(status);
1638 }
bdce2ad7 1639
abccf23e
KA
1640 adapter->vf_cfg[vf].plink_tracking = link_state;
1641
1642 return 0;
bdce2ad7 1643}
e1d18735 1644
2632bafd
SP
1645static void be_aic_update(struct be_aic_obj *aic, u64 rx_pkts, u64 tx_pkts,
1646 ulong now)
6b7c5b94 1647{
2632bafd
SP
1648 aic->rx_pkts_prev = rx_pkts;
1649 aic->tx_reqs_prev = tx_pkts;
1650 aic->jiffies = now;
1651}
ac124ff9 1652
2632bafd
SP
1653static void be_eqd_update(struct be_adapter *adapter)
1654{
1655 struct be_set_eqd set_eqd[MAX_EVT_QS];
1656 int eqd, i, num = 0, start;
1657 struct be_aic_obj *aic;
1658 struct be_eq_obj *eqo;
1659 struct be_rx_obj *rxo;
1660 struct be_tx_obj *txo;
1661 u64 rx_pkts, tx_pkts;
1662 ulong now;
1663 u32 pps, delta;
10ef9ab4 1664
2632bafd
SP
1665 for_all_evt_queues(adapter, eqo, i) {
1666 aic = &adapter->aic_obj[eqo->idx];
1667 if (!aic->enable) {
1668 if (aic->jiffies)
1669 aic->jiffies = 0;
1670 eqd = aic->et_eqd;
1671 goto modify_eqd;
1672 }
6b7c5b94 1673
2632bafd
SP
1674 rxo = &adapter->rx_obj[eqo->idx];
1675 do {
57a7744e 1676 start = u64_stats_fetch_begin_irq(&rxo->stats.sync);
2632bafd 1677 rx_pkts = rxo->stats.rx_pkts;
57a7744e 1678 } while (u64_stats_fetch_retry_irq(&rxo->stats.sync, start));
10ef9ab4 1679
2632bafd
SP
1680 txo = &adapter->tx_obj[eqo->idx];
1681 do {
57a7744e 1682 start = u64_stats_fetch_begin_irq(&txo->stats.sync);
2632bafd 1683 tx_pkts = txo->stats.tx_reqs;
57a7744e 1684 } while (u64_stats_fetch_retry_irq(&txo->stats.sync, start));
6b7c5b94 1685
2632bafd
SP
1686 /* Skip, if wrapped around or first calculation */
1687 now = jiffies;
1688 if (!aic->jiffies || time_before(now, aic->jiffies) ||
1689 rx_pkts < aic->rx_pkts_prev ||
1690 tx_pkts < aic->tx_reqs_prev) {
1691 be_aic_update(aic, rx_pkts, tx_pkts, now);
1692 continue;
1693 }
1694
1695 delta = jiffies_to_msecs(now - aic->jiffies);
1696 pps = (((u32)(rx_pkts - aic->rx_pkts_prev) * 1000) / delta) +
1697 (((u32)(tx_pkts - aic->tx_reqs_prev) * 1000) / delta);
1698 eqd = (pps / 15000) << 2;
10ef9ab4 1699
2632bafd
SP
1700 if (eqd < 8)
1701 eqd = 0;
1702 eqd = min_t(u32, eqd, aic->max_eqd);
1703 eqd = max_t(u32, eqd, aic->min_eqd);
1704
1705 be_aic_update(aic, rx_pkts, tx_pkts, now);
10ef9ab4 1706modify_eqd:
2632bafd
SP
1707 if (eqd != aic->prev_eqd) {
1708 set_eqd[num].delay_multiplier = (eqd * 65)/100;
1709 set_eqd[num].eq_id = eqo->q.id;
1710 aic->prev_eqd = eqd;
1711 num++;
1712 }
ac124ff9 1713 }
2632bafd
SP
1714
1715 if (num)
1716 be_cmd_modify_eqd(adapter, set_eqd, num);
6b7c5b94
SP
1717}
1718
3abcdeda 1719static void be_rx_stats_update(struct be_rx_obj *rxo,
748b539a 1720 struct be_rx_compl_info *rxcp)
4097f663 1721{
ac124ff9 1722 struct be_rx_stats *stats = rx_stats(rxo);
1ef78abe 1723
ab1594e9 1724 u64_stats_update_begin(&stats->sync);
3abcdeda 1725 stats->rx_compl++;
2e588f84 1726 stats->rx_bytes += rxcp->pkt_size;
3abcdeda 1727 stats->rx_pkts++;
2e588f84 1728 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
3abcdeda 1729 stats->rx_mcast_pkts++;
2e588f84 1730 if (rxcp->err)
ac124ff9 1731 stats->rx_compl_err++;
ab1594e9 1732 u64_stats_update_end(&stats->sync);
4097f663
SP
1733}
1734
2e588f84 1735static inline bool csum_passed(struct be_rx_compl_info *rxcp)
728a9972 1736{
19fad86f 1737 /* L4 checksum is not reliable for non TCP/UDP packets.
c9c47142
SP
1738 * Also ignore ipcksm for ipv6 pkts
1739 */
2e588f84 1740 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
c9c47142 1741 (rxcp->ip_csum || rxcp->ipv6) && !rxcp->err;
728a9972
AK
1742}
1743
0b0ef1d0 1744static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo)
6b7c5b94 1745{
10ef9ab4 1746 struct be_adapter *adapter = rxo->adapter;
6b7c5b94 1747 struct be_rx_page_info *rx_page_info;
3abcdeda 1748 struct be_queue_info *rxq = &rxo->q;
0b0ef1d0 1749 u16 frag_idx = rxq->tail;
6b7c5b94 1750
3abcdeda 1751 rx_page_info = &rxo->page_info_tbl[frag_idx];
6b7c5b94
SP
1752 BUG_ON(!rx_page_info->page);
1753
e50287be 1754 if (rx_page_info->last_frag) {
2b7bcebf
IV
1755 dma_unmap_page(&adapter->pdev->dev,
1756 dma_unmap_addr(rx_page_info, bus),
1757 adapter->big_page_size, DMA_FROM_DEVICE);
e50287be
SP
1758 rx_page_info->last_frag = false;
1759 } else {
1760 dma_sync_single_for_cpu(&adapter->pdev->dev,
1761 dma_unmap_addr(rx_page_info, bus),
1762 rx_frag_size, DMA_FROM_DEVICE);
205859a2 1763 }
6b7c5b94 1764
0b0ef1d0 1765 queue_tail_inc(rxq);
6b7c5b94
SP
1766 atomic_dec(&rxq->used);
1767 return rx_page_info;
1768}
1769
1770/* Throwaway the data in the Rx completion */
10ef9ab4
SP
1771static void be_rx_compl_discard(struct be_rx_obj *rxo,
1772 struct be_rx_compl_info *rxcp)
6b7c5b94 1773{
6b7c5b94 1774 struct be_rx_page_info *page_info;
2e588f84 1775 u16 i, num_rcvd = rxcp->num_rcvd;
6b7c5b94 1776
e80d9da6 1777 for (i = 0; i < num_rcvd; i++) {
0b0ef1d0 1778 page_info = get_rx_page_info(rxo);
e80d9da6
PR
1779 put_page(page_info->page);
1780 memset(page_info, 0, sizeof(*page_info));
6b7c5b94
SP
1781 }
1782}
1783
1784/*
1785 * skb_fill_rx_data forms a complete skb for an ether frame
1786 * indicated by rxcp.
1787 */
10ef9ab4
SP
1788static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
1789 struct be_rx_compl_info *rxcp)
6b7c5b94 1790{
6b7c5b94 1791 struct be_rx_page_info *page_info;
2e588f84
SP
1792 u16 i, j;
1793 u16 hdr_len, curr_frag_len, remaining;
6b7c5b94 1794 u8 *start;
6b7c5b94 1795
0b0ef1d0 1796 page_info = get_rx_page_info(rxo);
6b7c5b94
SP
1797 start = page_address(page_info->page) + page_info->page_offset;
1798 prefetch(start);
1799
1800 /* Copy data in the first descriptor of this completion */
2e588f84 1801 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
6b7c5b94 1802
6b7c5b94
SP
1803 skb->len = curr_frag_len;
1804 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
ac1ae5f3 1805 memcpy(skb->data, start, curr_frag_len);
6b7c5b94
SP
1806 /* Complete packet has now been moved to data */
1807 put_page(page_info->page);
1808 skb->data_len = 0;
1809 skb->tail += curr_frag_len;
1810 } else {
ac1ae5f3
ED
1811 hdr_len = ETH_HLEN;
1812 memcpy(skb->data, start, hdr_len);
6b7c5b94 1813 skb_shinfo(skb)->nr_frags = 1;
b061b39e 1814 skb_frag_set_page(skb, 0, page_info->page);
6b7c5b94
SP
1815 skb_shinfo(skb)->frags[0].page_offset =
1816 page_info->page_offset + hdr_len;
748b539a
SP
1817 skb_frag_size_set(&skb_shinfo(skb)->frags[0],
1818 curr_frag_len - hdr_len);
6b7c5b94 1819 skb->data_len = curr_frag_len - hdr_len;
bdb28a97 1820 skb->truesize += rx_frag_size;
6b7c5b94
SP
1821 skb->tail += hdr_len;
1822 }
205859a2 1823 page_info->page = NULL;
6b7c5b94 1824
2e588f84
SP
1825 if (rxcp->pkt_size <= rx_frag_size) {
1826 BUG_ON(rxcp->num_rcvd != 1);
1827 return;
6b7c5b94
SP
1828 }
1829
1830 /* More frags present for this completion */
2e588f84
SP
1831 remaining = rxcp->pkt_size - curr_frag_len;
1832 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
0b0ef1d0 1833 page_info = get_rx_page_info(rxo);
2e588f84 1834 curr_frag_len = min(remaining, rx_frag_size);
6b7c5b94 1835
bd46cb6c
AK
1836 /* Coalesce all frags from the same physical page in one slot */
1837 if (page_info->page_offset == 0) {
1838 /* Fresh page */
1839 j++;
b061b39e 1840 skb_frag_set_page(skb, j, page_info->page);
bd46cb6c
AK
1841 skb_shinfo(skb)->frags[j].page_offset =
1842 page_info->page_offset;
9e903e08 1843 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
bd46cb6c
AK
1844 skb_shinfo(skb)->nr_frags++;
1845 } else {
1846 put_page(page_info->page);
1847 }
1848
9e903e08 1849 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
6b7c5b94
SP
1850 skb->len += curr_frag_len;
1851 skb->data_len += curr_frag_len;
bdb28a97 1852 skb->truesize += rx_frag_size;
2e588f84 1853 remaining -= curr_frag_len;
205859a2 1854 page_info->page = NULL;
6b7c5b94 1855 }
bd46cb6c 1856 BUG_ON(j > MAX_SKB_FRAGS);
6b7c5b94
SP
1857}
1858
5be93b9a 1859/* Process the RX completion indicated by rxcp when GRO is disabled */
6384a4d0 1860static void be_rx_compl_process(struct be_rx_obj *rxo, struct napi_struct *napi,
10ef9ab4 1861 struct be_rx_compl_info *rxcp)
6b7c5b94 1862{
10ef9ab4 1863 struct be_adapter *adapter = rxo->adapter;
6332c8d3 1864 struct net_device *netdev = adapter->netdev;
6b7c5b94 1865 struct sk_buff *skb;
89420424 1866
bb349bb4 1867 skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE);
a058a632 1868 if (unlikely(!skb)) {
ac124ff9 1869 rx_stats(rxo)->rx_drops_no_skbs++;
10ef9ab4 1870 be_rx_compl_discard(rxo, rxcp);
6b7c5b94
SP
1871 return;
1872 }
1873
10ef9ab4 1874 skb_fill_rx_data(rxo, skb, rxcp);
6b7c5b94 1875
6332c8d3 1876 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
728a9972 1877 skb->ip_summed = CHECKSUM_UNNECESSARY;
c6ce2f4b
SK
1878 else
1879 skb_checksum_none_assert(skb);
6b7c5b94 1880
6332c8d3 1881 skb->protocol = eth_type_trans(skb, netdev);
aaa6daec 1882 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
10ef9ab4 1883 if (netdev->features & NETIF_F_RXHASH)
d2464c8c 1884 skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
c9c47142 1885
b6c0e89d 1886 skb->csum_level = rxcp->tunneled;
6384a4d0 1887 skb_mark_napi_id(skb, napi);
6b7c5b94 1888
343e43c0 1889 if (rxcp->vlanf)
86a9bad3 1890 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
4c5102f9
AK
1891
1892 netif_receive_skb(skb);
6b7c5b94
SP
1893}
1894
5be93b9a 1895/* Process the RX completion indicated by rxcp when GRO is enabled */
4188e7df
JH
1896static void be_rx_compl_process_gro(struct be_rx_obj *rxo,
1897 struct napi_struct *napi,
1898 struct be_rx_compl_info *rxcp)
6b7c5b94 1899{
10ef9ab4 1900 struct be_adapter *adapter = rxo->adapter;
6b7c5b94 1901 struct be_rx_page_info *page_info;
5be93b9a 1902 struct sk_buff *skb = NULL;
2e588f84
SP
1903 u16 remaining, curr_frag_len;
1904 u16 i, j;
3968fa1e 1905
10ef9ab4 1906 skb = napi_get_frags(napi);
5be93b9a 1907 if (!skb) {
10ef9ab4 1908 be_rx_compl_discard(rxo, rxcp);
5be93b9a
AK
1909 return;
1910 }
1911
2e588f84
SP
1912 remaining = rxcp->pkt_size;
1913 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
0b0ef1d0 1914 page_info = get_rx_page_info(rxo);
6b7c5b94
SP
1915
1916 curr_frag_len = min(remaining, rx_frag_size);
1917
bd46cb6c
AK
1918 /* Coalesce all frags from the same physical page in one slot */
1919 if (i == 0 || page_info->page_offset == 0) {
1920 /* First frag or Fresh page */
1921 j++;
b061b39e 1922 skb_frag_set_page(skb, j, page_info->page);
5be93b9a
AK
1923 skb_shinfo(skb)->frags[j].page_offset =
1924 page_info->page_offset;
9e903e08 1925 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
bd46cb6c
AK
1926 } else {
1927 put_page(page_info->page);
1928 }
9e903e08 1929 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
bdb28a97 1930 skb->truesize += rx_frag_size;
bd46cb6c 1931 remaining -= curr_frag_len;
6b7c5b94
SP
1932 memset(page_info, 0, sizeof(*page_info));
1933 }
bd46cb6c 1934 BUG_ON(j > MAX_SKB_FRAGS);
6b7c5b94 1935
5be93b9a 1936 skb_shinfo(skb)->nr_frags = j + 1;
2e588f84
SP
1937 skb->len = rxcp->pkt_size;
1938 skb->data_len = rxcp->pkt_size;
5be93b9a 1939 skb->ip_summed = CHECKSUM_UNNECESSARY;
aaa6daec 1940 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
4b972914 1941 if (adapter->netdev->features & NETIF_F_RXHASH)
d2464c8c 1942 skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
c9c47142 1943
b6c0e89d 1944 skb->csum_level = rxcp->tunneled;
6384a4d0 1945 skb_mark_napi_id(skb, napi);
5be93b9a 1946
343e43c0 1947 if (rxcp->vlanf)
86a9bad3 1948 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
4c5102f9 1949
10ef9ab4 1950 napi_gro_frags(napi);
2e588f84
SP
1951}
1952
10ef9ab4
SP
1953static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
1954 struct be_rx_compl_info *rxcp)
2e588f84 1955{
c3c18bc1
SP
1956 rxcp->pkt_size = GET_RX_COMPL_V1_BITS(pktsize, compl);
1957 rxcp->vlanf = GET_RX_COMPL_V1_BITS(vtp, compl);
1958 rxcp->err = GET_RX_COMPL_V1_BITS(err, compl);
1959 rxcp->tcpf = GET_RX_COMPL_V1_BITS(tcpf, compl);
1960 rxcp->udpf = GET_RX_COMPL_V1_BITS(udpf, compl);
1961 rxcp->ip_csum = GET_RX_COMPL_V1_BITS(ipcksm, compl);
1962 rxcp->l4_csum = GET_RX_COMPL_V1_BITS(l4_cksm, compl);
1963 rxcp->ipv6 = GET_RX_COMPL_V1_BITS(ip_version, compl);
1964 rxcp->num_rcvd = GET_RX_COMPL_V1_BITS(numfrags, compl);
1965 rxcp->pkt_type = GET_RX_COMPL_V1_BITS(cast_enc, compl);
1966 rxcp->rss_hash = GET_RX_COMPL_V1_BITS(rsshash, compl);
15d72184 1967 if (rxcp->vlanf) {
c3c18bc1
SP
1968 rxcp->qnq = GET_RX_COMPL_V1_BITS(qnq, compl);
1969 rxcp->vlan_tag = GET_RX_COMPL_V1_BITS(vlan_tag, compl);
15d72184 1970 }
c3c18bc1 1971 rxcp->port = GET_RX_COMPL_V1_BITS(port, compl);
c9c47142 1972 rxcp->tunneled =
c3c18bc1 1973 GET_RX_COMPL_V1_BITS(tunneled, compl);
2e588f84
SP
1974}
1975
10ef9ab4
SP
1976static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
1977 struct be_rx_compl_info *rxcp)
2e588f84 1978{
c3c18bc1
SP
1979 rxcp->pkt_size = GET_RX_COMPL_V0_BITS(pktsize, compl);
1980 rxcp->vlanf = GET_RX_COMPL_V0_BITS(vtp, compl);
1981 rxcp->err = GET_RX_COMPL_V0_BITS(err, compl);
1982 rxcp->tcpf = GET_RX_COMPL_V0_BITS(tcpf, compl);
1983 rxcp->udpf = GET_RX_COMPL_V0_BITS(udpf, compl);
1984 rxcp->ip_csum = GET_RX_COMPL_V0_BITS(ipcksm, compl);
1985 rxcp->l4_csum = GET_RX_COMPL_V0_BITS(l4_cksm, compl);
1986 rxcp->ipv6 = GET_RX_COMPL_V0_BITS(ip_version, compl);
1987 rxcp->num_rcvd = GET_RX_COMPL_V0_BITS(numfrags, compl);
1988 rxcp->pkt_type = GET_RX_COMPL_V0_BITS(cast_enc, compl);
1989 rxcp->rss_hash = GET_RX_COMPL_V0_BITS(rsshash, compl);
15d72184 1990 if (rxcp->vlanf) {
c3c18bc1
SP
1991 rxcp->qnq = GET_RX_COMPL_V0_BITS(qnq, compl);
1992 rxcp->vlan_tag = GET_RX_COMPL_V0_BITS(vlan_tag, compl);
15d72184 1993 }
c3c18bc1
SP
1994 rxcp->port = GET_RX_COMPL_V0_BITS(port, compl);
1995 rxcp->ip_frag = GET_RX_COMPL_V0_BITS(ip_frag, compl);
2e588f84
SP
1996}
1997
1998static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1999{
2000 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
2001 struct be_rx_compl_info *rxcp = &rxo->rxcp;
2002 struct be_adapter *adapter = rxo->adapter;
6b7c5b94 2003
2e588f84
SP
2004 /* For checking the valid bit it is Ok to use either definition as the
2005 * valid bit is at the same position in both v0 and v1 Rx compl */
2006 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
2007 return NULL;
6b7c5b94 2008
2e588f84
SP
2009 rmb();
2010 be_dws_le_to_cpu(compl, sizeof(*compl));
6b7c5b94 2011
2e588f84 2012 if (adapter->be3_native)
10ef9ab4 2013 be_parse_rx_compl_v1(compl, rxcp);
2e588f84 2014 else
10ef9ab4 2015 be_parse_rx_compl_v0(compl, rxcp);
6b7c5b94 2016
e38b1706
SK
2017 if (rxcp->ip_frag)
2018 rxcp->l4_csum = 0;
2019
15d72184 2020 if (rxcp->vlanf) {
f93f160b
VV
2021 /* In QNQ modes, if qnq bit is not set, then the packet was
2022 * tagged only with the transparent outer vlan-tag and must
2023 * not be treated as a vlan packet by host
2024 */
2025 if (be_is_qnq_mode(adapter) && !rxcp->qnq)
15d72184 2026 rxcp->vlanf = 0;
6b7c5b94 2027
15d72184 2028 if (!lancer_chip(adapter))
3c709f8f 2029 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
6b7c5b94 2030
939cf306 2031 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
f6cbd364 2032 !test_bit(rxcp->vlan_tag, adapter->vids))
15d72184
SP
2033 rxcp->vlanf = 0;
2034 }
2e588f84
SP
2035
2036 /* As the compl has been parsed, reset it; we wont touch it again */
2037 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
6b7c5b94 2038
3abcdeda 2039 queue_tail_inc(&rxo->cq);
6b7c5b94
SP
2040 return rxcp;
2041}
2042
1829b086 2043static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
6b7c5b94 2044{
6b7c5b94 2045 u32 order = get_order(size);
1829b086 2046
6b7c5b94 2047 if (order > 0)
1829b086
ED
2048 gfp |= __GFP_COMP;
2049 return alloc_pages(gfp, order);
6b7c5b94
SP
2050}
2051
2052/*
2053 * Allocate a page, split it to fragments of size rx_frag_size and post as
2054 * receive buffers to BE
2055 */
c30d7266 2056static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp, u32 frags_needed)
6b7c5b94 2057{
3abcdeda 2058 struct be_adapter *adapter = rxo->adapter;
26d92f92 2059 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
3abcdeda 2060 struct be_queue_info *rxq = &rxo->q;
6b7c5b94 2061 struct page *pagep = NULL;
ba42fad0 2062 struct device *dev = &adapter->pdev->dev;
6b7c5b94
SP
2063 struct be_eth_rx_d *rxd;
2064 u64 page_dmaaddr = 0, frag_dmaaddr;
c30d7266 2065 u32 posted, page_offset = 0, notify = 0;
6b7c5b94 2066
3abcdeda 2067 page_info = &rxo->page_info_tbl[rxq->head];
c30d7266 2068 for (posted = 0; posted < frags_needed && !page_info->page; posted++) {
6b7c5b94 2069 if (!pagep) {
1829b086 2070 pagep = be_alloc_pages(adapter->big_page_size, gfp);
6b7c5b94 2071 if (unlikely(!pagep)) {
ac124ff9 2072 rx_stats(rxo)->rx_post_fail++;
6b7c5b94
SP
2073 break;
2074 }
ba42fad0
IV
2075 page_dmaaddr = dma_map_page(dev, pagep, 0,
2076 adapter->big_page_size,
2b7bcebf 2077 DMA_FROM_DEVICE);
ba42fad0
IV
2078 if (dma_mapping_error(dev, page_dmaaddr)) {
2079 put_page(pagep);
2080 pagep = NULL;
d3de1540 2081 adapter->drv_stats.dma_map_errors++;
ba42fad0
IV
2082 break;
2083 }
e50287be 2084 page_offset = 0;
6b7c5b94
SP
2085 } else {
2086 get_page(pagep);
e50287be 2087 page_offset += rx_frag_size;
6b7c5b94 2088 }
e50287be 2089 page_info->page_offset = page_offset;
6b7c5b94 2090 page_info->page = pagep;
6b7c5b94
SP
2091
2092 rxd = queue_head_node(rxq);
e50287be 2093 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
6b7c5b94
SP
2094 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
2095 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
6b7c5b94
SP
2096
2097 /* Any space left in the current big page for another frag? */
2098 if ((page_offset + rx_frag_size + rx_frag_size) >
2099 adapter->big_page_size) {
2100 pagep = NULL;
e50287be
SP
2101 page_info->last_frag = true;
2102 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
2103 } else {
2104 dma_unmap_addr_set(page_info, bus, frag_dmaaddr);
6b7c5b94 2105 }
26d92f92
SP
2106
2107 prev_page_info = page_info;
2108 queue_head_inc(rxq);
10ef9ab4 2109 page_info = &rxo->page_info_tbl[rxq->head];
6b7c5b94 2110 }
e50287be
SP
2111
2112 /* Mark the last frag of a page when we break out of the above loop
2113 * with no more slots available in the RXQ
2114 */
2115 if (pagep) {
2116 prev_page_info->last_frag = true;
2117 dma_unmap_addr_set(prev_page_info, bus, page_dmaaddr);
2118 }
6b7c5b94
SP
2119
2120 if (posted) {
6b7c5b94 2121 atomic_add(posted, &rxq->used);
6384a4d0
SP
2122 if (rxo->rx_post_starved)
2123 rxo->rx_post_starved = false;
c30d7266 2124 do {
69304cc9 2125 notify = min(MAX_NUM_POST_ERX_DB, posted);
c30d7266
AK
2126 be_rxq_notify(adapter, rxq->id, notify);
2127 posted -= notify;
2128 } while (posted);
ea1dae11
SP
2129 } else if (atomic_read(&rxq->used) == 0) {
2130 /* Let be_worker replenish when memory is available */
3abcdeda 2131 rxo->rx_post_starved = true;
6b7c5b94 2132 }
6b7c5b94
SP
2133}
2134
152ffe5b 2135static struct be_tx_compl_info *be_tx_compl_get(struct be_tx_obj *txo)
6b7c5b94 2136{
152ffe5b
SB
2137 struct be_queue_info *tx_cq = &txo->cq;
2138 struct be_tx_compl_info *txcp = &txo->txcp;
2139 struct be_eth_tx_compl *compl = queue_tail_node(tx_cq);
6b7c5b94 2140
152ffe5b 2141 if (compl->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
6b7c5b94
SP
2142 return NULL;
2143
152ffe5b 2144 /* Ensure load ordering of valid bit dword and other dwords below */
f3eb62d2 2145 rmb();
152ffe5b 2146 be_dws_le_to_cpu(compl, sizeof(*compl));
6b7c5b94 2147
152ffe5b
SB
2148 txcp->status = GET_TX_COMPL_BITS(status, compl);
2149 txcp->end_index = GET_TX_COMPL_BITS(wrb_index, compl);
6b7c5b94 2150
152ffe5b 2151 compl->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
6b7c5b94
SP
2152 queue_tail_inc(tx_cq);
2153 return txcp;
2154}
2155
3c8def97 2156static u16 be_tx_compl_process(struct be_adapter *adapter,
748b539a 2157 struct be_tx_obj *txo, u16 last_index)
6b7c5b94 2158{
5f07b3c5 2159 struct sk_buff **sent_skbs = txo->sent_skb_list;
3c8def97 2160 struct be_queue_info *txq = &txo->q;
5f07b3c5
SP
2161 u16 frag_index, num_wrbs = 0;
2162 struct sk_buff *skb = NULL;
2163 bool unmap_skb_hdr = false;
a73b796e 2164 struct be_eth_wrb *wrb;
6b7c5b94 2165
ec43b1a6 2166 do {
5f07b3c5
SP
2167 if (sent_skbs[txq->tail]) {
2168 /* Free skb from prev req */
2169 if (skb)
2170 dev_consume_skb_any(skb);
2171 skb = sent_skbs[txq->tail];
2172 sent_skbs[txq->tail] = NULL;
2173 queue_tail_inc(txq); /* skip hdr wrb */
2174 num_wrbs++;
2175 unmap_skb_hdr = true;
2176 }
a73b796e 2177 wrb = queue_tail_node(txq);
5f07b3c5 2178 frag_index = txq->tail;
2b7bcebf 2179 unmap_tx_frag(&adapter->pdev->dev, wrb,
5f07b3c5 2180 (unmap_skb_hdr && skb_headlen(skb)));
ec43b1a6 2181 unmap_skb_hdr = false;
6b7c5b94 2182 queue_tail_inc(txq);
5f07b3c5
SP
2183 num_wrbs++;
2184 } while (frag_index != last_index);
2185 dev_consume_skb_any(skb);
6b7c5b94 2186
4d586b82 2187 return num_wrbs;
6b7c5b94
SP
2188}
2189
10ef9ab4
SP
2190/* Return the number of events in the event queue */
2191static inline int events_get(struct be_eq_obj *eqo)
859b1e4e 2192{
10ef9ab4
SP
2193 struct be_eq_entry *eqe;
2194 int num = 0;
859b1e4e 2195
10ef9ab4
SP
2196 do {
2197 eqe = queue_tail_node(&eqo->q);
2198 if (eqe->evt == 0)
2199 break;
859b1e4e 2200
10ef9ab4
SP
2201 rmb();
2202 eqe->evt = 0;
2203 num++;
2204 queue_tail_inc(&eqo->q);
2205 } while (true);
2206
2207 return num;
859b1e4e
SP
2208}
2209
10ef9ab4
SP
2210/* Leaves the EQ is disarmed state */
2211static void be_eq_clean(struct be_eq_obj *eqo)
859b1e4e 2212{
10ef9ab4 2213 int num = events_get(eqo);
859b1e4e 2214
10ef9ab4 2215 be_eq_notify(eqo->adapter, eqo->q.id, false, true, num);
859b1e4e
SP
2216}
2217
10ef9ab4 2218static void be_rx_cq_clean(struct be_rx_obj *rxo)
6b7c5b94
SP
2219{
2220 struct be_rx_page_info *page_info;
3abcdeda
SP
2221 struct be_queue_info *rxq = &rxo->q;
2222 struct be_queue_info *rx_cq = &rxo->cq;
2e588f84 2223 struct be_rx_compl_info *rxcp;
d23e946c
SP
2224 struct be_adapter *adapter = rxo->adapter;
2225 int flush_wait = 0;
6b7c5b94 2226
d23e946c
SP
2227 /* Consume pending rx completions.
2228 * Wait for the flush completion (identified by zero num_rcvd)
2229 * to arrive. Notify CQ even when there are no more CQ entries
2230 * for HW to flush partially coalesced CQ entries.
2231 * In Lancer, there is no need to wait for flush compl.
2232 */
2233 for (;;) {
2234 rxcp = be_rx_compl_get(rxo);
ddf1169f 2235 if (!rxcp) {
d23e946c
SP
2236 if (lancer_chip(adapter))
2237 break;
2238
2239 if (flush_wait++ > 10 || be_hw_error(adapter)) {
2240 dev_warn(&adapter->pdev->dev,
2241 "did not receive flush compl\n");
2242 break;
2243 }
2244 be_cq_notify(adapter, rx_cq->id, true, 0);
2245 mdelay(1);
2246 } else {
2247 be_rx_compl_discard(rxo, rxcp);
3f5dffe6 2248 be_cq_notify(adapter, rx_cq->id, false, 1);
d23e946c
SP
2249 if (rxcp->num_rcvd == 0)
2250 break;
2251 }
6b7c5b94
SP
2252 }
2253
d23e946c
SP
2254 /* After cleanup, leave the CQ in unarmed state */
2255 be_cq_notify(adapter, rx_cq->id, false, 0);
2256
2257 /* Then free posted rx buffers that were not used */
0b0ef1d0
SR
2258 while (atomic_read(&rxq->used) > 0) {
2259 page_info = get_rx_page_info(rxo);
6b7c5b94
SP
2260 put_page(page_info->page);
2261 memset(page_info, 0, sizeof(*page_info));
2262 }
2263 BUG_ON(atomic_read(&rxq->used));
5f820b6c
KA
2264 rxq->tail = 0;
2265 rxq->head = 0;
6b7c5b94
SP
2266}
2267
0ae57bb3 2268static void be_tx_compl_clean(struct be_adapter *adapter)
6b7c5b94 2269{
5f07b3c5
SP
2270 u16 end_idx, notified_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
2271 struct device *dev = &adapter->pdev->dev;
152ffe5b 2272 struct be_tx_compl_info *txcp;
0ae57bb3 2273 struct be_queue_info *txq;
152ffe5b 2274 struct be_tx_obj *txo;
0ae57bb3 2275 int i, pending_txqs;
a8e9179a 2276
1a3d0717 2277 /* Stop polling for compls when HW has been silent for 10ms */
a8e9179a 2278 do {
0ae57bb3
SP
2279 pending_txqs = adapter->num_tx_qs;
2280
2281 for_all_tx_queues(adapter, txo, i) {
1a3d0717
VV
2282 cmpl = 0;
2283 num_wrbs = 0;
0ae57bb3 2284 txq = &txo->q;
152ffe5b
SB
2285 while ((txcp = be_tx_compl_get(txo))) {
2286 num_wrbs +=
2287 be_tx_compl_process(adapter, txo,
2288 txcp->end_index);
0ae57bb3
SP
2289 cmpl++;
2290 }
2291 if (cmpl) {
2292 be_cq_notify(adapter, txo->cq.id, false, cmpl);
2293 atomic_sub(num_wrbs, &txq->used);
1a3d0717 2294 timeo = 0;
0ae57bb3 2295 }
cf5671e6 2296 if (!be_is_tx_compl_pending(txo))
0ae57bb3 2297 pending_txqs--;
a8e9179a
SP
2298 }
2299
1a3d0717 2300 if (pending_txqs == 0 || ++timeo > 10 || be_hw_error(adapter))
a8e9179a
SP
2301 break;
2302
2303 mdelay(1);
2304 } while (true);
2305
5f07b3c5 2306 /* Free enqueued TX that was never notified to HW */
0ae57bb3
SP
2307 for_all_tx_queues(adapter, txo, i) {
2308 txq = &txo->q;
0ae57bb3 2309
5f07b3c5
SP
2310 if (atomic_read(&txq->used)) {
2311 dev_info(dev, "txq%d: cleaning %d pending tx-wrbs\n",
2312 i, atomic_read(&txq->used));
2313 notified_idx = txq->tail;
0ae57bb3 2314 end_idx = txq->tail;
5f07b3c5
SP
2315 index_adv(&end_idx, atomic_read(&txq->used) - 1,
2316 txq->len);
2317 /* Use the tx-compl process logic to handle requests
2318 * that were not sent to the HW.
2319 */
0ae57bb3
SP
2320 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
2321 atomic_sub(num_wrbs, &txq->used);
5f07b3c5
SP
2322 BUG_ON(atomic_read(&txq->used));
2323 txo->pend_wrb_cnt = 0;
2324 /* Since hw was never notified of these requests,
2325 * reset TXQ indices
2326 */
2327 txq->head = notified_idx;
2328 txq->tail = notified_idx;
0ae57bb3 2329 }
b03388d6 2330 }
6b7c5b94
SP
2331}
2332
10ef9ab4
SP
2333static void be_evt_queues_destroy(struct be_adapter *adapter)
2334{
2335 struct be_eq_obj *eqo;
2336 int i;
2337
2338 for_all_evt_queues(adapter, eqo, i) {
19d59aa7
PR
2339 if (eqo->q.created) {
2340 be_eq_clean(eqo);
10ef9ab4 2341 be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
6384a4d0 2342 napi_hash_del(&eqo->napi);
68d7bdcb 2343 netif_napi_del(&eqo->napi);
19d59aa7 2344 }
d658d98a 2345 free_cpumask_var(eqo->affinity_mask);
10ef9ab4
SP
2346 be_queue_free(adapter, &eqo->q);
2347 }
2348}
2349
2350static int be_evt_queues_create(struct be_adapter *adapter)
2351{
2352 struct be_queue_info *eq;
2353 struct be_eq_obj *eqo;
2632bafd 2354 struct be_aic_obj *aic;
10ef9ab4
SP
2355 int i, rc;
2356
92bf14ab
SP
2357 adapter->num_evt_qs = min_t(u16, num_irqs(adapter),
2358 adapter->cfg_num_qs);
10ef9ab4
SP
2359
2360 for_all_evt_queues(adapter, eqo, i) {
d658d98a
PR
2361 if (!zalloc_cpumask_var(&eqo->affinity_mask, GFP_KERNEL))
2362 return -ENOMEM;
2363 cpumask_set_cpu_local_first(i, dev_to_node(&adapter->pdev->dev),
2364 eqo->affinity_mask);
2365
68d7bdcb
SP
2366 netif_napi_add(adapter->netdev, &eqo->napi, be_poll,
2367 BE_NAPI_WEIGHT);
6384a4d0 2368 napi_hash_add(&eqo->napi);
2632bafd 2369 aic = &adapter->aic_obj[i];
10ef9ab4 2370 eqo->adapter = adapter;
10ef9ab4 2371 eqo->idx = i;
2632bafd
SP
2372 aic->max_eqd = BE_MAX_EQD;
2373 aic->enable = true;
10ef9ab4
SP
2374
2375 eq = &eqo->q;
2376 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
748b539a 2377 sizeof(struct be_eq_entry));
10ef9ab4
SP
2378 if (rc)
2379 return rc;
2380
f2f781a7 2381 rc = be_cmd_eq_create(adapter, eqo);
10ef9ab4
SP
2382 if (rc)
2383 return rc;
2384 }
1cfafab9 2385 return 0;
10ef9ab4
SP
2386}
2387
5fb379ee
SP
2388static void be_mcc_queues_destroy(struct be_adapter *adapter)
2389{
2390 struct be_queue_info *q;
5fb379ee 2391
8788fdc2 2392 q = &adapter->mcc_obj.q;
5fb379ee 2393 if (q->created)
8788fdc2 2394 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
5fb379ee
SP
2395 be_queue_free(adapter, q);
2396
8788fdc2 2397 q = &adapter->mcc_obj.cq;
5fb379ee 2398 if (q->created)
8788fdc2 2399 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
5fb379ee
SP
2400 be_queue_free(adapter, q);
2401}
2402
2403/* Must be called only after TX qs are created as MCC shares TX EQ */
2404static int be_mcc_queues_create(struct be_adapter *adapter)
2405{
2406 struct be_queue_info *q, *cq;
5fb379ee 2407
8788fdc2 2408 cq = &adapter->mcc_obj.cq;
5fb379ee 2409 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
748b539a 2410 sizeof(struct be_mcc_compl)))
5fb379ee
SP
2411 goto err;
2412
10ef9ab4
SP
2413 /* Use the default EQ for MCC completions */
2414 if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0))
5fb379ee
SP
2415 goto mcc_cq_free;
2416
8788fdc2 2417 q = &adapter->mcc_obj.q;
5fb379ee
SP
2418 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
2419 goto mcc_cq_destroy;
2420
8788fdc2 2421 if (be_cmd_mccq_create(adapter, q, cq))
5fb379ee
SP
2422 goto mcc_q_free;
2423
2424 return 0;
2425
2426mcc_q_free:
2427 be_queue_free(adapter, q);
2428mcc_cq_destroy:
8788fdc2 2429 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
5fb379ee
SP
2430mcc_cq_free:
2431 be_queue_free(adapter, cq);
2432err:
2433 return -1;
2434}
2435
6b7c5b94
SP
2436static void be_tx_queues_destroy(struct be_adapter *adapter)
2437{
2438 struct be_queue_info *q;
3c8def97
SP
2439 struct be_tx_obj *txo;
2440 u8 i;
6b7c5b94 2441
3c8def97
SP
2442 for_all_tx_queues(adapter, txo, i) {
2443 q = &txo->q;
2444 if (q->created)
2445 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
2446 be_queue_free(adapter, q);
6b7c5b94 2447
3c8def97
SP
2448 q = &txo->cq;
2449 if (q->created)
2450 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2451 be_queue_free(adapter, q);
2452 }
6b7c5b94
SP
2453}
2454
7707133c 2455static int be_tx_qs_create(struct be_adapter *adapter)
6b7c5b94 2456{
73f394e6 2457 struct be_queue_info *cq;
3c8def97 2458 struct be_tx_obj *txo;
73f394e6 2459 struct be_eq_obj *eqo;
92bf14ab 2460 int status, i;
6b7c5b94 2461
92bf14ab 2462 adapter->num_tx_qs = min(adapter->num_evt_qs, be_max_txqs(adapter));
dafc0fe3 2463
10ef9ab4
SP
2464 for_all_tx_queues(adapter, txo, i) {
2465 cq = &txo->cq;
2466 status = be_queue_alloc(adapter, cq, TX_CQ_LEN,
2467 sizeof(struct be_eth_tx_compl));
2468 if (status)
2469 return status;
3c8def97 2470
827da44c
JS
2471 u64_stats_init(&txo->stats.sync);
2472 u64_stats_init(&txo->stats.sync_compl);
2473
10ef9ab4
SP
2474 /* If num_evt_qs is less than num_tx_qs, then more than
2475 * one txq share an eq
2476 */
73f394e6
SP
2477 eqo = &adapter->eq_obj[i % adapter->num_evt_qs];
2478 status = be_cmd_cq_create(adapter, cq, &eqo->q, false, 3);
10ef9ab4
SP
2479 if (status)
2480 return status;
6b7c5b94 2481
10ef9ab4
SP
2482 status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
2483 sizeof(struct be_eth_wrb));
2484 if (status)
2485 return status;
6b7c5b94 2486
94d73aaa 2487 status = be_cmd_txq_create(adapter, txo);
10ef9ab4
SP
2488 if (status)
2489 return status;
73f394e6
SP
2490
2491 netif_set_xps_queue(adapter->netdev, eqo->affinity_mask,
2492 eqo->idx);
3c8def97 2493 }
6b7c5b94 2494
d379142b
SP
2495 dev_info(&adapter->pdev->dev, "created %d TX queue(s)\n",
2496 adapter->num_tx_qs);
10ef9ab4 2497 return 0;
6b7c5b94
SP
2498}
2499
10ef9ab4 2500static void be_rx_cqs_destroy(struct be_adapter *adapter)
6b7c5b94
SP
2501{
2502 struct be_queue_info *q;
3abcdeda
SP
2503 struct be_rx_obj *rxo;
2504 int i;
2505
2506 for_all_rx_queues(adapter, rxo, i) {
3abcdeda
SP
2507 q = &rxo->cq;
2508 if (q->created)
2509 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2510 be_queue_free(adapter, q);
ac6a0c4a
SP
2511 }
2512}
2513
10ef9ab4 2514static int be_rx_cqs_create(struct be_adapter *adapter)
6b7c5b94 2515{
10ef9ab4 2516 struct be_queue_info *eq, *cq;
3abcdeda
SP
2517 struct be_rx_obj *rxo;
2518 int rc, i;
6b7c5b94 2519
92bf14ab 2520 /* We can create as many RSS rings as there are EQs. */
71bb8bd0 2521 adapter->num_rss_qs = adapter->num_evt_qs;
92bf14ab 2522
71bb8bd0
VV
2523 /* We'll use RSS only if atleast 2 RSS rings are supported. */
2524 if (adapter->num_rss_qs <= 1)
2525 adapter->num_rss_qs = 0;
2526
2527 adapter->num_rx_qs = adapter->num_rss_qs + adapter->need_def_rxq;
2528
2529 /* When the interface is not capable of RSS rings (and there is no
2530 * need to create a default RXQ) we'll still need one RXQ
10ef9ab4 2531 */
71bb8bd0
VV
2532 if (adapter->num_rx_qs == 0)
2533 adapter->num_rx_qs = 1;
92bf14ab 2534
6b7c5b94 2535 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
3abcdeda
SP
2536 for_all_rx_queues(adapter, rxo, i) {
2537 rxo->adapter = adapter;
3abcdeda
SP
2538 cq = &rxo->cq;
2539 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
748b539a 2540 sizeof(struct be_eth_rx_compl));
3abcdeda 2541 if (rc)
10ef9ab4 2542 return rc;
3abcdeda 2543
827da44c 2544 u64_stats_init(&rxo->stats.sync);
10ef9ab4
SP
2545 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2546 rc = be_cmd_cq_create(adapter, cq, eq, false, 3);
3abcdeda 2547 if (rc)
10ef9ab4 2548 return rc;
3abcdeda 2549 }
6b7c5b94 2550
d379142b 2551 dev_info(&adapter->pdev->dev,
71bb8bd0 2552 "created %d RX queue(s)\n", adapter->num_rx_qs);
10ef9ab4 2553 return 0;
b628bde2
SP
2554}
2555
6b7c5b94
SP
2556static irqreturn_t be_intx(int irq, void *dev)
2557{
e49cc34f
SP
2558 struct be_eq_obj *eqo = dev;
2559 struct be_adapter *adapter = eqo->adapter;
2560 int num_evts = 0;
6b7c5b94 2561
d0b9cec3
SP
2562 /* IRQ is not expected when NAPI is scheduled as the EQ
2563 * will not be armed.
2564 * But, this can happen on Lancer INTx where it takes
2565 * a while to de-assert INTx or in BE2 where occasionaly
2566 * an interrupt may be raised even when EQ is unarmed.
2567 * If NAPI is already scheduled, then counting & notifying
2568 * events will orphan them.
e49cc34f 2569 */
d0b9cec3 2570 if (napi_schedule_prep(&eqo->napi)) {
e49cc34f 2571 num_evts = events_get(eqo);
d0b9cec3
SP
2572 __napi_schedule(&eqo->napi);
2573 if (num_evts)
2574 eqo->spurious_intr = 0;
2575 }
2576 be_eq_notify(adapter, eqo->q.id, false, true, num_evts);
e49cc34f 2577
d0b9cec3
SP
2578 /* Return IRQ_HANDLED only for the the first spurious intr
2579 * after a valid intr to stop the kernel from branding
2580 * this irq as a bad one!
e49cc34f 2581 */
d0b9cec3
SP
2582 if (num_evts || eqo->spurious_intr++ == 0)
2583 return IRQ_HANDLED;
2584 else
2585 return IRQ_NONE;
6b7c5b94
SP
2586}
2587
10ef9ab4 2588static irqreturn_t be_msix(int irq, void *dev)
6b7c5b94 2589{
10ef9ab4 2590 struct be_eq_obj *eqo = dev;
6b7c5b94 2591
0b545a62
SP
2592 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
2593 napi_schedule(&eqo->napi);
6b7c5b94
SP
2594 return IRQ_HANDLED;
2595}
2596
2e588f84 2597static inline bool do_gro(struct be_rx_compl_info *rxcp)
6b7c5b94 2598{
e38b1706 2599 return (rxcp->tcpf && !rxcp->err && rxcp->l4_csum) ? true : false;
6b7c5b94
SP
2600}
2601
10ef9ab4 2602static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
748b539a 2603 int budget, int polling)
6b7c5b94 2604{
3abcdeda
SP
2605 struct be_adapter *adapter = rxo->adapter;
2606 struct be_queue_info *rx_cq = &rxo->cq;
2e588f84 2607 struct be_rx_compl_info *rxcp;
6b7c5b94 2608 u32 work_done;
c30d7266 2609 u32 frags_consumed = 0;
6b7c5b94
SP
2610
2611 for (work_done = 0; work_done < budget; work_done++) {
3abcdeda 2612 rxcp = be_rx_compl_get(rxo);
6b7c5b94
SP
2613 if (!rxcp)
2614 break;
2615
12004ae9
SP
2616 /* Is it a flush compl that has no data */
2617 if (unlikely(rxcp->num_rcvd == 0))
2618 goto loop_continue;
2619
2620 /* Discard compl with partial DMA Lancer B0 */
2621 if (unlikely(!rxcp->pkt_size)) {
10ef9ab4 2622 be_rx_compl_discard(rxo, rxcp);
12004ae9
SP
2623 goto loop_continue;
2624 }
2625
2626 /* On BE drop pkts that arrive due to imperfect filtering in
2627 * promiscuous mode on some skews
2628 */
2629 if (unlikely(rxcp->port != adapter->port_num &&
748b539a 2630 !lancer_chip(adapter))) {
10ef9ab4 2631 be_rx_compl_discard(rxo, rxcp);
12004ae9 2632 goto loop_continue;
64642811 2633 }
009dd872 2634
6384a4d0
SP
2635 /* Don't do gro when we're busy_polling */
2636 if (do_gro(rxcp) && polling != BUSY_POLLING)
10ef9ab4 2637 be_rx_compl_process_gro(rxo, napi, rxcp);
12004ae9 2638 else
6384a4d0
SP
2639 be_rx_compl_process(rxo, napi, rxcp);
2640
12004ae9 2641loop_continue:
c30d7266 2642 frags_consumed += rxcp->num_rcvd;
2e588f84 2643 be_rx_stats_update(rxo, rxcp);
6b7c5b94
SP
2644 }
2645
10ef9ab4
SP
2646 if (work_done) {
2647 be_cq_notify(adapter, rx_cq->id, true, work_done);
9372cacb 2648
6384a4d0
SP
2649 /* When an rx-obj gets into post_starved state, just
2650 * let be_worker do the posting.
2651 */
2652 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM &&
2653 !rxo->rx_post_starved)
c30d7266
AK
2654 be_post_rx_frags(rxo, GFP_ATOMIC,
2655 max_t(u32, MAX_RX_POST,
2656 frags_consumed));
6b7c5b94 2657 }
10ef9ab4 2658
6b7c5b94
SP
2659 return work_done;
2660}
2661
152ffe5b 2662static inline void be_update_tx_err(struct be_tx_obj *txo, u8 status)
512bb8a2
KA
2663{
2664 switch (status) {
2665 case BE_TX_COMP_HDR_PARSE_ERR:
2666 tx_stats(txo)->tx_hdr_parse_err++;
2667 break;
2668 case BE_TX_COMP_NDMA_ERR:
2669 tx_stats(txo)->tx_dma_err++;
2670 break;
2671 case BE_TX_COMP_ACL_ERR:
2672 tx_stats(txo)->tx_spoof_check_err++;
2673 break;
2674 }
2675}
2676
152ffe5b 2677static inline void lancer_update_tx_err(struct be_tx_obj *txo, u8 status)
512bb8a2
KA
2678{
2679 switch (status) {
2680 case LANCER_TX_COMP_LSO_ERR:
2681 tx_stats(txo)->tx_tso_err++;
2682 break;
2683 case LANCER_TX_COMP_HSW_DROP_MAC_ERR:
2684 case LANCER_TX_COMP_HSW_DROP_VLAN_ERR:
2685 tx_stats(txo)->tx_spoof_check_err++;
2686 break;
2687 case LANCER_TX_COMP_QINQ_ERR:
2688 tx_stats(txo)->tx_qinq_err++;
2689 break;
2690 case LANCER_TX_COMP_PARITY_ERR:
2691 tx_stats(txo)->tx_internal_parity_err++;
2692 break;
2693 case LANCER_TX_COMP_DMA_ERR:
2694 tx_stats(txo)->tx_dma_err++;
2695 break;
2696 }
2697}
2698
c8f64615
SP
2699static void be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
2700 int idx)
6b7c5b94 2701{
c8f64615 2702 int num_wrbs = 0, work_done = 0;
152ffe5b 2703 struct be_tx_compl_info *txcp;
c8f64615 2704
152ffe5b
SB
2705 while ((txcp = be_tx_compl_get(txo))) {
2706 num_wrbs += be_tx_compl_process(adapter, txo, txcp->end_index);
c8f64615 2707 work_done++;
3c8def97 2708
152ffe5b 2709 if (txcp->status) {
512bb8a2 2710 if (lancer_chip(adapter))
152ffe5b 2711 lancer_update_tx_err(txo, txcp->status);
512bb8a2 2712 else
152ffe5b 2713 be_update_tx_err(txo, txcp->status);
512bb8a2 2714 }
10ef9ab4 2715 }
6b7c5b94 2716
10ef9ab4
SP
2717 if (work_done) {
2718 be_cq_notify(adapter, txo->cq.id, true, work_done);
2719 atomic_sub(num_wrbs, &txo->q.used);
3c8def97 2720
10ef9ab4
SP
2721 /* As Tx wrbs have been freed up, wake up netdev queue
2722 * if it was stopped due to lack of tx wrbs. */
2723 if (__netif_subqueue_stopped(adapter->netdev, idx) &&
cf5671e6 2724 be_can_txq_wake(txo)) {
10ef9ab4 2725 netif_wake_subqueue(adapter->netdev, idx);
3c8def97 2726 }
10ef9ab4
SP
2727
2728 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
2729 tx_stats(txo)->tx_compl += work_done;
2730 u64_stats_update_end(&tx_stats(txo)->sync_compl);
6b7c5b94 2731 }
10ef9ab4 2732}
6b7c5b94 2733
f7062ee5
SP
2734#ifdef CONFIG_NET_RX_BUSY_POLL
2735static inline bool be_lock_napi(struct be_eq_obj *eqo)
2736{
2737 bool status = true;
2738
2739 spin_lock(&eqo->lock); /* BH is already disabled */
2740 if (eqo->state & BE_EQ_LOCKED) {
2741 WARN_ON(eqo->state & BE_EQ_NAPI);
2742 eqo->state |= BE_EQ_NAPI_YIELD;
2743 status = false;
2744 } else {
2745 eqo->state = BE_EQ_NAPI;
2746 }
2747 spin_unlock(&eqo->lock);
2748 return status;
2749}
2750
2751static inline void be_unlock_napi(struct be_eq_obj *eqo)
2752{
2753 spin_lock(&eqo->lock); /* BH is already disabled */
2754
2755 WARN_ON(eqo->state & (BE_EQ_POLL | BE_EQ_NAPI_YIELD));
2756 eqo->state = BE_EQ_IDLE;
2757
2758 spin_unlock(&eqo->lock);
2759}
2760
2761static inline bool be_lock_busy_poll(struct be_eq_obj *eqo)
2762{
2763 bool status = true;
2764
2765 spin_lock_bh(&eqo->lock);
2766 if (eqo->state & BE_EQ_LOCKED) {
2767 eqo->state |= BE_EQ_POLL_YIELD;
2768 status = false;
2769 } else {
2770 eqo->state |= BE_EQ_POLL;
2771 }
2772 spin_unlock_bh(&eqo->lock);
2773 return status;
2774}
2775
2776static inline void be_unlock_busy_poll(struct be_eq_obj *eqo)
2777{
2778 spin_lock_bh(&eqo->lock);
2779
2780 WARN_ON(eqo->state & (BE_EQ_NAPI));
2781 eqo->state = BE_EQ_IDLE;
2782
2783 spin_unlock_bh(&eqo->lock);
2784}
2785
2786static inline void be_enable_busy_poll(struct be_eq_obj *eqo)
2787{
2788 spin_lock_init(&eqo->lock);
2789 eqo->state = BE_EQ_IDLE;
2790}
2791
2792static inline void be_disable_busy_poll(struct be_eq_obj *eqo)
2793{
2794 local_bh_disable();
2795
2796 /* It's enough to just acquire napi lock on the eqo to stop
2797 * be_busy_poll() from processing any queueus.
2798 */
2799 while (!be_lock_napi(eqo))
2800 mdelay(1);
2801
2802 local_bh_enable();
2803}
2804
2805#else /* CONFIG_NET_RX_BUSY_POLL */
2806
2807static inline bool be_lock_napi(struct be_eq_obj *eqo)
2808{
2809 return true;
2810}
2811
2812static inline void be_unlock_napi(struct be_eq_obj *eqo)
2813{
2814}
2815
2816static inline bool be_lock_busy_poll(struct be_eq_obj *eqo)
2817{
2818 return false;
2819}
2820
2821static inline void be_unlock_busy_poll(struct be_eq_obj *eqo)
2822{
2823}
2824
2825static inline void be_enable_busy_poll(struct be_eq_obj *eqo)
2826{
2827}
2828
2829static inline void be_disable_busy_poll(struct be_eq_obj *eqo)
2830{
2831}
2832#endif /* CONFIG_NET_RX_BUSY_POLL */
2833
68d7bdcb 2834int be_poll(struct napi_struct *napi, int budget)
10ef9ab4
SP
2835{
2836 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2837 struct be_adapter *adapter = eqo->adapter;
0b545a62 2838 int max_work = 0, work, i, num_evts;
6384a4d0 2839 struct be_rx_obj *rxo;
a4906ea0 2840 struct be_tx_obj *txo;
f31e50a8 2841
0b545a62
SP
2842 num_evts = events_get(eqo);
2843
a4906ea0
SP
2844 for_all_tx_queues_on_eq(adapter, eqo, txo, i)
2845 be_process_tx(adapter, txo, i);
f31e50a8 2846
6384a4d0
SP
2847 if (be_lock_napi(eqo)) {
2848 /* This loop will iterate twice for EQ0 in which
2849 * completions of the last RXQ (default one) are also processed
2850 * For other EQs the loop iterates only once
2851 */
2852 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
2853 work = be_process_rx(rxo, napi, budget, NAPI_POLLING);
2854 max_work = max(work, max_work);
2855 }
2856 be_unlock_napi(eqo);
2857 } else {
2858 max_work = budget;
10ef9ab4 2859 }
6b7c5b94 2860
10ef9ab4
SP
2861 if (is_mcc_eqo(eqo))
2862 be_process_mcc(adapter);
93c86700 2863
10ef9ab4
SP
2864 if (max_work < budget) {
2865 napi_complete(napi);
0b545a62 2866 be_eq_notify(adapter, eqo->q.id, true, false, num_evts);
10ef9ab4
SP
2867 } else {
2868 /* As we'll continue in polling mode, count and clear events */
0b545a62 2869 be_eq_notify(adapter, eqo->q.id, false, false, num_evts);
93c86700 2870 }
10ef9ab4 2871 return max_work;
6b7c5b94
SP
2872}
2873
6384a4d0
SP
2874#ifdef CONFIG_NET_RX_BUSY_POLL
2875static int be_busy_poll(struct napi_struct *napi)
2876{
2877 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2878 struct be_adapter *adapter = eqo->adapter;
2879 struct be_rx_obj *rxo;
2880 int i, work = 0;
2881
2882 if (!be_lock_busy_poll(eqo))
2883 return LL_FLUSH_BUSY;
2884
2885 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
2886 work = be_process_rx(rxo, napi, 4, BUSY_POLLING);
2887 if (work)
2888 break;
2889 }
2890
2891 be_unlock_busy_poll(eqo);
2892 return work;
2893}
2894#endif
2895
f67ef7ba 2896void be_detect_error(struct be_adapter *adapter)
7c185276 2897{
e1cfb67a
PR
2898 u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
2899 u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
7c185276 2900 u32 i;
eb0eecc1
SK
2901 bool error_detected = false;
2902 struct device *dev = &adapter->pdev->dev;
2903 struct net_device *netdev = adapter->netdev;
7c185276 2904
d23e946c 2905 if (be_hw_error(adapter))
72f02485
SP
2906 return;
2907
e1cfb67a
PR
2908 if (lancer_chip(adapter)) {
2909 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2910 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2911 sliport_err1 = ioread32(adapter->db +
748b539a 2912 SLIPORT_ERROR1_OFFSET);
e1cfb67a 2913 sliport_err2 = ioread32(adapter->db +
748b539a 2914 SLIPORT_ERROR2_OFFSET);
eb0eecc1 2915 adapter->hw_error = true;
d0e1b319 2916 error_detected = true;
eb0eecc1
SK
2917 /* Do not log error messages if its a FW reset */
2918 if (sliport_err1 == SLIPORT_ERROR_FW_RESET1 &&
2919 sliport_err2 == SLIPORT_ERROR_FW_RESET2) {
2920 dev_info(dev, "Firmware update in progress\n");
2921 } else {
eb0eecc1
SK
2922 dev_err(dev, "Error detected in the card\n");
2923 dev_err(dev, "ERR: sliport status 0x%x\n",
2924 sliport_status);
2925 dev_err(dev, "ERR: sliport error1 0x%x\n",
2926 sliport_err1);
2927 dev_err(dev, "ERR: sliport error2 0x%x\n",
2928 sliport_err2);
2929 }
e1cfb67a
PR
2930 }
2931 } else {
25848c90
SR
2932 ue_lo = ioread32(adapter->pcicfg + PCICFG_UE_STATUS_LOW);
2933 ue_hi = ioread32(adapter->pcicfg + PCICFG_UE_STATUS_HIGH);
2934 ue_lo_mask = ioread32(adapter->pcicfg +
2935 PCICFG_UE_STATUS_LOW_MASK);
2936 ue_hi_mask = ioread32(adapter->pcicfg +
2937 PCICFG_UE_STATUS_HI_MASK);
e1cfb67a 2938
f67ef7ba
PR
2939 ue_lo = (ue_lo & ~ue_lo_mask);
2940 ue_hi = (ue_hi & ~ue_hi_mask);
7c185276 2941
eb0eecc1
SK
2942 /* On certain platforms BE hardware can indicate spurious UEs.
2943 * Allow HW to stop working completely in case of a real UE.
2944 * Hence not setting the hw_error for UE detection.
2945 */
f67ef7ba 2946
eb0eecc1
SK
2947 if (ue_lo || ue_hi) {
2948 error_detected = true;
2949 dev_err(dev,
2950 "Unrecoverable Error detected in the adapter");
2951 dev_err(dev, "Please reboot server to recover");
2952 if (skyhawk_chip(adapter))
2953 adapter->hw_error = true;
2954 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
2955 if (ue_lo & 1)
2956 dev_err(dev, "UE: %s bit set\n",
2957 ue_status_low_desc[i]);
2958 }
2959 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
2960 if (ue_hi & 1)
2961 dev_err(dev, "UE: %s bit set\n",
2962 ue_status_hi_desc[i]);
2963 }
7c185276
AK
2964 }
2965 }
eb0eecc1
SK
2966 if (error_detected)
2967 netif_carrier_off(netdev);
7c185276
AK
2968}
2969
8d56ff11
SP
2970static void be_msix_disable(struct be_adapter *adapter)
2971{
ac6a0c4a 2972 if (msix_enabled(adapter)) {
8d56ff11 2973 pci_disable_msix(adapter->pdev);
ac6a0c4a 2974 adapter->num_msix_vec = 0;
68d7bdcb 2975 adapter->num_msix_roce_vec = 0;
3abcdeda
SP
2976 }
2977}
2978
c2bba3df 2979static int be_msix_enable(struct be_adapter *adapter)
6b7c5b94 2980{
7dc4c064 2981 int i, num_vec;
d379142b 2982 struct device *dev = &adapter->pdev->dev;
6b7c5b94 2983
92bf14ab
SP
2984 /* If RoCE is supported, program the max number of NIC vectors that
2985 * may be configured via set-channels, along with vectors needed for
2986 * RoCe. Else, just program the number we'll use initially.
2987 */
2988 if (be_roce_supported(adapter))
2989 num_vec = min_t(int, 2 * be_max_eqs(adapter),
2990 2 * num_online_cpus());
2991 else
2992 num_vec = adapter->cfg_num_qs;
3abcdeda 2993
ac6a0c4a 2994 for (i = 0; i < num_vec; i++)
6b7c5b94
SP
2995 adapter->msix_entries[i].entry = i;
2996
7dc4c064
AG
2997 num_vec = pci_enable_msix_range(adapter->pdev, adapter->msix_entries,
2998 MIN_MSIX_VECTORS, num_vec);
2999 if (num_vec < 0)
3000 goto fail;
92bf14ab 3001
92bf14ab
SP
3002 if (be_roce_supported(adapter) && num_vec > MIN_MSIX_VECTORS) {
3003 adapter->num_msix_roce_vec = num_vec / 2;
3004 dev_info(dev, "enabled %d MSI-x vector(s) for RoCE\n",
3005 adapter->num_msix_roce_vec);
3006 }
3007
3008 adapter->num_msix_vec = num_vec - adapter->num_msix_roce_vec;
3009
3010 dev_info(dev, "enabled %d MSI-x vector(s) for NIC\n",
3011 adapter->num_msix_vec);
c2bba3df 3012 return 0;
7dc4c064
AG
3013
3014fail:
3015 dev_warn(dev, "MSIx enable failed\n");
3016
3017 /* INTx is not supported in VFs, so fail probe if enable_msix fails */
3018 if (!be_physfn(adapter))
3019 return num_vec;
3020 return 0;
6b7c5b94
SP
3021}
3022
fe6d2a38 3023static inline int be_msix_vec_get(struct be_adapter *adapter,
748b539a 3024 struct be_eq_obj *eqo)
b628bde2 3025{
f2f781a7 3026 return adapter->msix_entries[eqo->msix_idx].vector;
b628bde2 3027}
6b7c5b94 3028
b628bde2
SP
3029static int be_msix_register(struct be_adapter *adapter)
3030{
10ef9ab4
SP
3031 struct net_device *netdev = adapter->netdev;
3032 struct be_eq_obj *eqo;
3033 int status, i, vec;
6b7c5b94 3034
10ef9ab4
SP
3035 for_all_evt_queues(adapter, eqo, i) {
3036 sprintf(eqo->desc, "%s-q%d", netdev->name, i);
3037 vec = be_msix_vec_get(adapter, eqo);
3038 status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
3abcdeda
SP
3039 if (status)
3040 goto err_msix;
d658d98a
PR
3041
3042 irq_set_affinity_hint(vec, eqo->affinity_mask);
3abcdeda 3043 }
b628bde2 3044
6b7c5b94 3045 return 0;
3abcdeda 3046err_msix:
10ef9ab4
SP
3047 for (i--, eqo = &adapter->eq_obj[i]; i >= 0; i--, eqo--)
3048 free_irq(be_msix_vec_get(adapter, eqo), eqo);
3049 dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
748b539a 3050 status);
ac6a0c4a 3051 be_msix_disable(adapter);
6b7c5b94
SP
3052 return status;
3053}
3054
3055static int be_irq_register(struct be_adapter *adapter)
3056{
3057 struct net_device *netdev = adapter->netdev;
3058 int status;
3059
ac6a0c4a 3060 if (msix_enabled(adapter)) {
6b7c5b94
SP
3061 status = be_msix_register(adapter);
3062 if (status == 0)
3063 goto done;
ba343c77
SB
3064 /* INTx is not supported for VF */
3065 if (!be_physfn(adapter))
3066 return status;
6b7c5b94
SP
3067 }
3068
e49cc34f 3069 /* INTx: only the first EQ is used */
6b7c5b94
SP
3070 netdev->irq = adapter->pdev->irq;
3071 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
e49cc34f 3072 &adapter->eq_obj[0]);
6b7c5b94
SP
3073 if (status) {
3074 dev_err(&adapter->pdev->dev,
3075 "INTx request IRQ failed - err %d\n", status);
3076 return status;
3077 }
3078done:
3079 adapter->isr_registered = true;
3080 return 0;
3081}
3082
3083static void be_irq_unregister(struct be_adapter *adapter)
3084{
3085 struct net_device *netdev = adapter->netdev;
10ef9ab4 3086 struct be_eq_obj *eqo;
d658d98a 3087 int i, vec;
6b7c5b94
SP
3088
3089 if (!adapter->isr_registered)
3090 return;
3091
3092 /* INTx */
ac6a0c4a 3093 if (!msix_enabled(adapter)) {
e49cc34f 3094 free_irq(netdev->irq, &adapter->eq_obj[0]);
6b7c5b94
SP
3095 goto done;
3096 }
3097
3098 /* MSIx */
d658d98a
PR
3099 for_all_evt_queues(adapter, eqo, i) {
3100 vec = be_msix_vec_get(adapter, eqo);
3101 irq_set_affinity_hint(vec, NULL);
3102 free_irq(vec, eqo);
3103 }
3abcdeda 3104
6b7c5b94
SP
3105done:
3106 adapter->isr_registered = false;
6b7c5b94
SP
3107}
3108
10ef9ab4 3109static void be_rx_qs_destroy(struct be_adapter *adapter)
482c9e79
SP
3110{
3111 struct be_queue_info *q;
3112 struct be_rx_obj *rxo;
3113 int i;
3114
3115 for_all_rx_queues(adapter, rxo, i) {
3116 q = &rxo->q;
3117 if (q->created) {
3118 be_cmd_rxq_destroy(adapter, q);
10ef9ab4 3119 be_rx_cq_clean(rxo);
482c9e79 3120 }
10ef9ab4 3121 be_queue_free(adapter, q);
482c9e79
SP
3122 }
3123}
3124
889cd4b2
SP
3125static int be_close(struct net_device *netdev)
3126{
3127 struct be_adapter *adapter = netdev_priv(netdev);
10ef9ab4
SP
3128 struct be_eq_obj *eqo;
3129 int i;
889cd4b2 3130
e1ad8e33
KA
3131 /* This protection is needed as be_close() may be called even when the
3132 * adapter is in cleared state (after eeh perm failure)
3133 */
3134 if (!(adapter->flags & BE_FLAGS_SETUP_DONE))
3135 return 0;
3136
045508a8
PP
3137 be_roce_dev_close(adapter);
3138
dff345c5
IV
3139 if (adapter->flags & BE_FLAGS_NAPI_ENABLED) {
3140 for_all_evt_queues(adapter, eqo, i) {
04d3d624 3141 napi_disable(&eqo->napi);
6384a4d0
SP
3142 be_disable_busy_poll(eqo);
3143 }
71237b6f 3144 adapter->flags &= ~BE_FLAGS_NAPI_ENABLED;
04d3d624 3145 }
a323d9bf
SP
3146
3147 be_async_mcc_disable(adapter);
3148
3149 /* Wait for all pending tx completions to arrive so that
3150 * all tx skbs are freed.
3151 */
fba87559 3152 netif_tx_disable(netdev);
6e1f9975 3153 be_tx_compl_clean(adapter);
a323d9bf
SP
3154
3155 be_rx_qs_destroy(adapter);
f66b7cfd 3156 be_clear_uc_list(adapter);
d11a347d 3157
a323d9bf 3158 for_all_evt_queues(adapter, eqo, i) {
10ef9ab4
SP
3159 if (msix_enabled(adapter))
3160 synchronize_irq(be_msix_vec_get(adapter, eqo));
3161 else
3162 synchronize_irq(netdev->irq);
3163 be_eq_clean(eqo);
63fcb27f
PR
3164 }
3165
889cd4b2
SP
3166 be_irq_unregister(adapter);
3167
482c9e79
SP
3168 return 0;
3169}
3170
10ef9ab4 3171static int be_rx_qs_create(struct be_adapter *adapter)
482c9e79 3172{
1dcf7b1c
ED
3173 struct rss_info *rss = &adapter->rss_info;
3174 u8 rss_key[RSS_HASH_KEY_LEN];
482c9e79 3175 struct be_rx_obj *rxo;
e9008ee9 3176 int rc, i, j;
482c9e79
SP
3177
3178 for_all_rx_queues(adapter, rxo, i) {
10ef9ab4
SP
3179 rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
3180 sizeof(struct be_eth_rx_d));
3181 if (rc)
3182 return rc;
3183 }
3184
71bb8bd0
VV
3185 if (adapter->need_def_rxq || !adapter->num_rss_qs) {
3186 rxo = default_rxo(adapter);
3187 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
3188 rx_frag_size, adapter->if_handle,
3189 false, &rxo->rss_id);
3190 if (rc)
3191 return rc;
3192 }
10ef9ab4
SP
3193
3194 for_all_rss_queues(adapter, rxo, i) {
482c9e79 3195 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
10ef9ab4
SP
3196 rx_frag_size, adapter->if_handle,
3197 true, &rxo->rss_id);
482c9e79
SP
3198 if (rc)
3199 return rc;
3200 }
3201
3202 if (be_multi_rxq(adapter)) {
71bb8bd0 3203 for (j = 0; j < RSS_INDIR_TABLE_LEN; j += adapter->num_rss_qs) {
e9008ee9 3204 for_all_rss_queues(adapter, rxo, i) {
e2557877 3205 if ((j + i) >= RSS_INDIR_TABLE_LEN)
e9008ee9 3206 break;
e2557877
VD
3207 rss->rsstable[j + i] = rxo->rss_id;
3208 rss->rss_queue[j + i] = i;
e9008ee9
PR
3209 }
3210 }
e2557877
VD
3211 rss->rss_flags = RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_IPV4 |
3212 RSS_ENABLE_TCP_IPV6 | RSS_ENABLE_IPV6;
594ad54a
SR
3213
3214 if (!BEx_chip(adapter))
e2557877
VD
3215 rss->rss_flags |= RSS_ENABLE_UDP_IPV4 |
3216 RSS_ENABLE_UDP_IPV6;
da1388d6
VV
3217 } else {
3218 /* Disable RSS, if only default RX Q is created */
e2557877 3219 rss->rss_flags = RSS_ENABLE_NONE;
da1388d6 3220 }
594ad54a 3221
1dcf7b1c 3222 netdev_rss_key_fill(rss_key, RSS_HASH_KEY_LEN);
748b539a 3223 rc = be_cmd_rss_config(adapter, rss->rsstable, rss->rss_flags,
1dcf7b1c 3224 128, rss_key);
da1388d6 3225 if (rc) {
e2557877 3226 rss->rss_flags = RSS_ENABLE_NONE;
da1388d6 3227 return rc;
482c9e79
SP
3228 }
3229
1dcf7b1c 3230 memcpy(rss->rss_hkey, rss_key, RSS_HASH_KEY_LEN);
e2557877 3231
482c9e79 3232 /* First time posting */
10ef9ab4 3233 for_all_rx_queues(adapter, rxo, i)
c30d7266 3234 be_post_rx_frags(rxo, GFP_KERNEL, MAX_RX_POST);
889cd4b2
SP
3235 return 0;
3236}
3237
6b7c5b94
SP
3238static int be_open(struct net_device *netdev)
3239{
3240 struct be_adapter *adapter = netdev_priv(netdev);
10ef9ab4 3241 struct be_eq_obj *eqo;
3abcdeda 3242 struct be_rx_obj *rxo;
10ef9ab4 3243 struct be_tx_obj *txo;
b236916a 3244 u8 link_status;
3abcdeda 3245 int status, i;
5fb379ee 3246
10ef9ab4 3247 status = be_rx_qs_create(adapter);
482c9e79
SP
3248 if (status)
3249 goto err;
3250
c2bba3df
SK
3251 status = be_irq_register(adapter);
3252 if (status)
3253 goto err;
5fb379ee 3254
10ef9ab4 3255 for_all_rx_queues(adapter, rxo, i)
3abcdeda 3256 be_cq_notify(adapter, rxo->cq.id, true, 0);
5fb379ee 3257
10ef9ab4
SP
3258 for_all_tx_queues(adapter, txo, i)
3259 be_cq_notify(adapter, txo->cq.id, true, 0);
3260
7a1e9b20
SP
3261 be_async_mcc_enable(adapter);
3262
10ef9ab4
SP
3263 for_all_evt_queues(adapter, eqo, i) {
3264 napi_enable(&eqo->napi);
6384a4d0 3265 be_enable_busy_poll(eqo);
4cad9f3b 3266 be_eq_notify(adapter, eqo->q.id, true, true, 0);
10ef9ab4 3267 }
04d3d624 3268 adapter->flags |= BE_FLAGS_NAPI_ENABLED;
10ef9ab4 3269
323ff71e 3270 status = be_cmd_link_status_query(adapter, NULL, &link_status, 0);
b236916a
AK
3271 if (!status)
3272 be_link_status_update(adapter, link_status);
3273
fba87559 3274 netif_tx_start_all_queues(netdev);
045508a8 3275 be_roce_dev_open(adapter);
c9c47142 3276
c5abe7c0 3277#ifdef CONFIG_BE2NET_VXLAN
c9c47142
SP
3278 if (skyhawk_chip(adapter))
3279 vxlan_get_rx_port(netdev);
c5abe7c0
SP
3280#endif
3281
889cd4b2
SP
3282 return 0;
3283err:
3284 be_close(adapter->netdev);
3285 return -EIO;
5fb379ee
SP
3286}
3287
71d8d1b5
AK
3288static int be_setup_wol(struct be_adapter *adapter, bool enable)
3289{
3290 struct be_dma_mem cmd;
3291 int status = 0;
3292 u8 mac[ETH_ALEN];
3293
c7bf7169 3294 eth_zero_addr(mac);
71d8d1b5
AK
3295
3296 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
ede23fa8
JP
3297 cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
3298 GFP_KERNEL);
ddf1169f 3299 if (!cmd.va)
6b568689 3300 return -ENOMEM;
71d8d1b5
AK
3301
3302 if (enable) {
3303 status = pci_write_config_dword(adapter->pdev,
748b539a
SP
3304 PCICFG_PM_CONTROL_OFFSET,
3305 PCICFG_PM_CONTROL_MASK);
71d8d1b5
AK
3306 if (status) {
3307 dev_err(&adapter->pdev->dev,
2381a55c 3308 "Could not enable Wake-on-lan\n");
2b7bcebf
IV
3309 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
3310 cmd.dma);
71d8d1b5
AK
3311 return status;
3312 }
3313 status = be_cmd_enable_magic_wol(adapter,
748b539a
SP
3314 adapter->netdev->dev_addr,
3315 &cmd);
71d8d1b5
AK
3316 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
3317 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
3318 } else {
3319 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
3320 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
3321 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
3322 }
3323
2b7bcebf 3324 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
71d8d1b5
AK
3325 return status;
3326}
3327
f7062ee5
SP
3328static void be_vf_eth_addr_generate(struct be_adapter *adapter, u8 *mac)
3329{
3330 u32 addr;
3331
3332 addr = jhash(adapter->netdev->dev_addr, ETH_ALEN, 0);
3333
3334 mac[5] = (u8)(addr & 0xFF);
3335 mac[4] = (u8)((addr >> 8) & 0xFF);
3336 mac[3] = (u8)((addr >> 16) & 0xFF);
3337 /* Use the OUI from the current MAC address */
3338 memcpy(mac, adapter->netdev->dev_addr, 3);
3339}
3340
6d87f5c3
AK
3341/*
3342 * Generate a seed MAC address from the PF MAC Address using jhash.
3343 * MAC Address for VFs are assigned incrementally starting from the seed.
3344 * These addresses are programmed in the ASIC by the PF and the VF driver
3345 * queries for the MAC address during its probe.
3346 */
4c876616 3347static int be_vf_eth_addr_config(struct be_adapter *adapter)
6d87f5c3 3348{
f9449ab7 3349 u32 vf;
3abcdeda 3350 int status = 0;
6d87f5c3 3351 u8 mac[ETH_ALEN];
11ac75ed 3352 struct be_vf_cfg *vf_cfg;
6d87f5c3
AK
3353
3354 be_vf_eth_addr_generate(adapter, mac);
3355
11ac75ed 3356 for_all_vfs(adapter, vf_cfg, vf) {
3175d8c2 3357 if (BEx_chip(adapter))
590c391d 3358 status = be_cmd_pmac_add(adapter, mac,
11ac75ed
SP
3359 vf_cfg->if_handle,
3360 &vf_cfg->pmac_id, vf + 1);
3175d8c2
SP
3361 else
3362 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
3363 vf + 1);
590c391d 3364
6d87f5c3
AK
3365 if (status)
3366 dev_err(&adapter->pdev->dev,
748b539a
SP
3367 "Mac address assignment failed for VF %d\n",
3368 vf);
6d87f5c3 3369 else
11ac75ed 3370 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
6d87f5c3
AK
3371
3372 mac[5] += 1;
3373 }
3374 return status;
3375}
3376
4c876616
SP
3377static int be_vfs_mac_query(struct be_adapter *adapter)
3378{
3379 int status, vf;
3380 u8 mac[ETH_ALEN];
3381 struct be_vf_cfg *vf_cfg;
4c876616
SP
3382
3383 for_all_vfs(adapter, vf_cfg, vf) {
b188f090
SR
3384 status = be_cmd_get_active_mac(adapter, vf_cfg->pmac_id,
3385 mac, vf_cfg->if_handle,
3386 false, vf+1);
4c876616
SP
3387 if (status)
3388 return status;
3389 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
3390 }
3391 return 0;
3392}
3393
f9449ab7 3394static void be_vf_clear(struct be_adapter *adapter)
6d87f5c3 3395{
11ac75ed 3396 struct be_vf_cfg *vf_cfg;
6d87f5c3
AK
3397 u32 vf;
3398
257a3feb 3399 if (pci_vfs_assigned(adapter->pdev)) {
4c876616
SP
3400 dev_warn(&adapter->pdev->dev,
3401 "VFs are assigned to VMs: not disabling VFs\n");
39f1d94d
SP
3402 goto done;
3403 }
3404
b4c1df93
SP
3405 pci_disable_sriov(adapter->pdev);
3406
11ac75ed 3407 for_all_vfs(adapter, vf_cfg, vf) {
3175d8c2 3408 if (BEx_chip(adapter))
11ac75ed
SP
3409 be_cmd_pmac_del(adapter, vf_cfg->if_handle,
3410 vf_cfg->pmac_id, vf + 1);
3175d8c2
SP
3411 else
3412 be_cmd_set_mac(adapter, NULL, vf_cfg->if_handle,
3413 vf + 1);
f9449ab7 3414
11ac75ed
SP
3415 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
3416 }
39f1d94d
SP
3417done:
3418 kfree(adapter->vf_cfg);
3419 adapter->num_vfs = 0;
f174c7ec 3420 adapter->flags &= ~BE_FLAGS_SRIOV_ENABLED;
6d87f5c3
AK
3421}
3422
7707133c
SP
3423static void be_clear_queues(struct be_adapter *adapter)
3424{
3425 be_mcc_queues_destroy(adapter);
3426 be_rx_cqs_destroy(adapter);
3427 be_tx_queues_destroy(adapter);
3428 be_evt_queues_destroy(adapter);
3429}
3430
68d7bdcb 3431static void be_cancel_worker(struct be_adapter *adapter)
a54769f5 3432{
191eb756
SP
3433 if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) {
3434 cancel_delayed_work_sync(&adapter->work);
3435 adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED;
3436 }
68d7bdcb
SP
3437}
3438
eb7dd46c
SP
3439static void be_cancel_err_detection(struct be_adapter *adapter)
3440{
3441 if (adapter->flags & BE_FLAGS_ERR_DETECTION_SCHEDULED) {
3442 cancel_delayed_work_sync(&adapter->be_err_detection_work);
3443 adapter->flags &= ~BE_FLAGS_ERR_DETECTION_SCHEDULED;
3444 }
3445}
3446
b05004ad 3447static void be_mac_clear(struct be_adapter *adapter)
68d7bdcb 3448{
b05004ad 3449 if (adapter->pmac_id) {
f66b7cfd
SP
3450 be_cmd_pmac_del(adapter, adapter->if_handle,
3451 adapter->pmac_id[0], 0);
b05004ad
SK
3452 kfree(adapter->pmac_id);
3453 adapter->pmac_id = NULL;
3454 }
3455}
3456
c5abe7c0 3457#ifdef CONFIG_BE2NET_VXLAN
c9c47142
SP
3458static void be_disable_vxlan_offloads(struct be_adapter *adapter)
3459{
630f4b70
SB
3460 struct net_device *netdev = adapter->netdev;
3461
c9c47142
SP
3462 if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS)
3463 be_cmd_manage_iface(adapter, adapter->if_handle,
3464 OP_CONVERT_TUNNEL_TO_NORMAL);
3465
3466 if (adapter->vxlan_port)
3467 be_cmd_set_vxlan_port(adapter, 0);
3468
3469 adapter->flags &= ~BE_FLAGS_VXLAN_OFFLOADS;
3470 adapter->vxlan_port = 0;
630f4b70
SB
3471
3472 netdev->hw_enc_features = 0;
3473 netdev->hw_features &= ~(NETIF_F_GSO_UDP_TUNNEL);
ac9a3d84 3474 netdev->features &= ~(NETIF_F_GSO_UDP_TUNNEL);
c9c47142 3475}
c5abe7c0 3476#endif
c9c47142 3477
f2858738
VV
3478static u16 be_calculate_vf_qs(struct be_adapter *adapter, u16 num_vfs)
3479{
3480 struct be_resources res = adapter->pool_res;
3481 u16 num_vf_qs = 1;
3482
3483 /* Distribute the queue resources equally among the PF and it's VFs
3484 * Do not distribute queue resources in multi-channel configuration.
3485 */
3486 if (num_vfs && !be_is_mc(adapter)) {
3487 /* If number of VFs requested is 8 less than max supported,
3488 * assign 8 queue pairs to the PF and divide the remaining
3489 * resources evenly among the VFs
3490 */
3491 if (num_vfs < (be_max_vfs(adapter) - 8))
3492 num_vf_qs = (res.max_rss_qs - 8) / num_vfs;
3493 else
3494 num_vf_qs = res.max_rss_qs / num_vfs;
3495
3496 /* Skyhawk-R chip supports only MAX_RSS_IFACES RSS capable
3497 * interfaces per port. Provide RSS on VFs, only if number
3498 * of VFs requested is less than MAX_RSS_IFACES limit.
3499 */
3500 if (num_vfs >= MAX_RSS_IFACES)
3501 num_vf_qs = 1;
3502 }
3503 return num_vf_qs;
3504}
3505
b05004ad
SK
3506static int be_clear(struct be_adapter *adapter)
3507{
f2858738
VV
3508 struct pci_dev *pdev = adapter->pdev;
3509 u16 num_vf_qs;
3510
68d7bdcb 3511 be_cancel_worker(adapter);
191eb756 3512
11ac75ed 3513 if (sriov_enabled(adapter))
f9449ab7
SP
3514 be_vf_clear(adapter);
3515
bec84e6b
VV
3516 /* Re-configure FW to distribute resources evenly across max-supported
3517 * number of VFs, only when VFs are not already enabled.
3518 */
ace40aff
VV
3519 if (skyhawk_chip(adapter) && be_physfn(adapter) &&
3520 !pci_vfs_assigned(pdev)) {
f2858738
VV
3521 num_vf_qs = be_calculate_vf_qs(adapter,
3522 pci_sriov_get_totalvfs(pdev));
bec84e6b 3523 be_cmd_set_sriov_config(adapter, adapter->pool_res,
f2858738
VV
3524 pci_sriov_get_totalvfs(pdev),
3525 num_vf_qs);
3526 }
bec84e6b 3527
c5abe7c0 3528#ifdef CONFIG_BE2NET_VXLAN
c9c47142 3529 be_disable_vxlan_offloads(adapter);
c5abe7c0 3530#endif
2d17f403 3531 /* delete the primary mac along with the uc-mac list */
b05004ad 3532 be_mac_clear(adapter);
fbc13f01 3533
f9449ab7 3534 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
a54769f5 3535
7707133c 3536 be_clear_queues(adapter);
a54769f5 3537
10ef9ab4 3538 be_msix_disable(adapter);
e1ad8e33 3539 adapter->flags &= ~BE_FLAGS_SETUP_DONE;
a54769f5
SP
3540 return 0;
3541}
3542
0700d816
KA
3543static int be_if_create(struct be_adapter *adapter, u32 *if_handle,
3544 u32 cap_flags, u32 vf)
3545{
3546 u32 en_flags;
0700d816
KA
3547
3548 en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
3549 BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS |
71bb8bd0 3550 BE_IF_FLAGS_RSS | BE_IF_FLAGS_DEFQ_RSS;
0700d816
KA
3551
3552 en_flags &= cap_flags;
3553
435452aa 3554 return be_cmd_if_create(adapter, cap_flags, en_flags, if_handle, vf);
0700d816
KA
3555}
3556
4c876616 3557static int be_vfs_if_create(struct be_adapter *adapter)
abb93951 3558{
92bf14ab 3559 struct be_resources res = {0};
4c876616 3560 struct be_vf_cfg *vf_cfg;
0700d816
KA
3561 u32 cap_flags, vf;
3562 int status;
abb93951 3563
0700d816 3564 /* If a FW profile exists, then cap_flags are updated */
4c876616
SP
3565 cap_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
3566 BE_IF_FLAGS_MULTICAST;
abb93951 3567
4c876616 3568 for_all_vfs(adapter, vf_cfg, vf) {
92bf14ab
SP
3569 if (!BE3_chip(adapter)) {
3570 status = be_cmd_get_profile_config(adapter, &res,
f2858738 3571 RESOURCE_LIMITS,
92bf14ab 3572 vf + 1);
435452aa 3573 if (!status) {
92bf14ab 3574 cap_flags = res.if_cap_flags;
435452aa
VV
3575 /* Prevent VFs from enabling VLAN promiscuous
3576 * mode
3577 */
3578 cap_flags &= ~BE_IF_FLAGS_VLAN_PROMISCUOUS;
3579 }
92bf14ab 3580 }
4c876616 3581
0700d816
KA
3582 status = be_if_create(adapter, &vf_cfg->if_handle,
3583 cap_flags, vf + 1);
4c876616 3584 if (status)
0700d816 3585 return status;
4c876616 3586 }
0700d816
KA
3587
3588 return 0;
abb93951
PR
3589}
3590
39f1d94d 3591static int be_vf_setup_init(struct be_adapter *adapter)
30128031 3592{
11ac75ed 3593 struct be_vf_cfg *vf_cfg;
30128031
SP
3594 int vf;
3595
39f1d94d
SP
3596 adapter->vf_cfg = kcalloc(adapter->num_vfs, sizeof(*vf_cfg),
3597 GFP_KERNEL);
3598 if (!adapter->vf_cfg)
3599 return -ENOMEM;
3600
11ac75ed
SP
3601 for_all_vfs(adapter, vf_cfg, vf) {
3602 vf_cfg->if_handle = -1;
3603 vf_cfg->pmac_id = -1;
30128031 3604 }
39f1d94d 3605 return 0;
30128031
SP
3606}
3607
f9449ab7
SP
3608static int be_vf_setup(struct be_adapter *adapter)
3609{
c502224e 3610 struct device *dev = &adapter->pdev->dev;
11ac75ed 3611 struct be_vf_cfg *vf_cfg;
4c876616 3612 int status, old_vfs, vf;
39f1d94d 3613
257a3feb 3614 old_vfs = pci_num_vf(adapter->pdev);
39f1d94d
SP
3615
3616 status = be_vf_setup_init(adapter);
3617 if (status)
3618 goto err;
30128031 3619
4c876616
SP
3620 if (old_vfs) {
3621 for_all_vfs(adapter, vf_cfg, vf) {
3622 status = be_cmd_get_if_id(adapter, vf_cfg, vf);
3623 if (status)
3624 goto err;
3625 }
f9449ab7 3626
4c876616
SP
3627 status = be_vfs_mac_query(adapter);
3628 if (status)
3629 goto err;
3630 } else {
bec84e6b
VV
3631 status = be_vfs_if_create(adapter);
3632 if (status)
3633 goto err;
3634
39f1d94d
SP
3635 status = be_vf_eth_addr_config(adapter);
3636 if (status)
3637 goto err;
3638 }
f9449ab7 3639
11ac75ed 3640 for_all_vfs(adapter, vf_cfg, vf) {
04a06028 3641 /* Allow VFs to programs MAC/VLAN filters */
435452aa
VV
3642 status = be_cmd_get_fn_privileges(adapter, &vf_cfg->privileges,
3643 vf + 1);
3644 if (!status && !(vf_cfg->privileges & BE_PRIV_FILTMGMT)) {
04a06028 3645 status = be_cmd_set_fn_privileges(adapter,
435452aa 3646 vf_cfg->privileges |
04a06028
SP
3647 BE_PRIV_FILTMGMT,
3648 vf + 1);
435452aa
VV
3649 if (!status) {
3650 vf_cfg->privileges |= BE_PRIV_FILTMGMT;
04a06028
SP
3651 dev_info(dev, "VF%d has FILTMGMT privilege\n",
3652 vf);
435452aa 3653 }
04a06028
SP
3654 }
3655
0f77ba73
RN
3656 /* Allow full available bandwidth */
3657 if (!old_vfs)
3658 be_cmd_config_qos(adapter, 0, 0, vf + 1);
f1f3ee1b 3659
bdce2ad7 3660 if (!old_vfs) {
0599863d 3661 be_cmd_enable_vf(adapter, vf + 1);
bdce2ad7
SR
3662 be_cmd_set_logical_link_config(adapter,
3663 IFLA_VF_LINK_STATE_AUTO,
3664 vf+1);
3665 }
f9449ab7 3666 }
b4c1df93
SP
3667
3668 if (!old_vfs) {
3669 status = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
3670 if (status) {
3671 dev_err(dev, "SRIOV enable failed\n");
3672 adapter->num_vfs = 0;
3673 goto err;
3674 }
3675 }
f174c7ec
VV
3676
3677 adapter->flags |= BE_FLAGS_SRIOV_ENABLED;
f9449ab7
SP
3678 return 0;
3679err:
4c876616
SP
3680 dev_err(dev, "VF setup failed\n");
3681 be_vf_clear(adapter);
f9449ab7
SP
3682 return status;
3683}
3684
f93f160b
VV
3685/* Converting function_mode bits on BE3 to SH mc_type enums */
3686
3687static u8 be_convert_mc_type(u32 function_mode)
3688{
66064dbc 3689 if (function_mode & VNIC_MODE && function_mode & QNQ_MODE)
f93f160b 3690 return vNIC1;
66064dbc 3691 else if (function_mode & QNQ_MODE)
f93f160b
VV
3692 return FLEX10;
3693 else if (function_mode & VNIC_MODE)
3694 return vNIC2;
3695 else if (function_mode & UMC_ENABLED)
3696 return UMC;
3697 else
3698 return MC_NONE;
3699}
3700
92bf14ab
SP
3701/* On BE2/BE3 FW does not suggest the supported limits */
3702static void BEx_get_resources(struct be_adapter *adapter,
3703 struct be_resources *res)
3704{
bec84e6b 3705 bool use_sriov = adapter->num_vfs ? 1 : 0;
92bf14ab
SP
3706
3707 if (be_physfn(adapter))
3708 res->max_uc_mac = BE_UC_PMAC_COUNT;
3709 else
3710 res->max_uc_mac = BE_VF_UC_PMAC_COUNT;
3711
f93f160b
VV
3712 adapter->mc_type = be_convert_mc_type(adapter->function_mode);
3713
3714 if (be_is_mc(adapter)) {
3715 /* Assuming that there are 4 channels per port,
3716 * when multi-channel is enabled
3717 */
3718 if (be_is_qnq_mode(adapter))
3719 res->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
3720 else
3721 /* In a non-qnq multichannel mode, the pvid
3722 * takes up one vlan entry
3723 */
3724 res->max_vlans = (BE_NUM_VLANS_SUPPORTED / 4) - 1;
3725 } else {
92bf14ab 3726 res->max_vlans = BE_NUM_VLANS_SUPPORTED;
f93f160b
VV
3727 }
3728
92bf14ab
SP
3729 res->max_mcast_mac = BE_MAX_MC;
3730
a5243dab
VV
3731 /* 1) For BE3 1Gb ports, FW does not support multiple TXQs
3732 * 2) Create multiple TX rings on a BE3-R multi-channel interface
3733 * *only* if it is RSS-capable.
3734 */
3735 if (BE2_chip(adapter) || use_sriov || (adapter->port_num > 1) ||
3736 !be_physfn(adapter) || (be_is_mc(adapter) &&
a28277dc 3737 !(adapter->function_caps & BE_FUNCTION_CAPS_RSS))) {
92bf14ab 3738 res->max_tx_qs = 1;
a28277dc
SR
3739 } else if (adapter->function_caps & BE_FUNCTION_CAPS_SUPER_NIC) {
3740 struct be_resources super_nic_res = {0};
3741
3742 /* On a SuperNIC profile, the driver needs to use the
3743 * GET_PROFILE_CONFIG cmd to query the per-function TXQ limits
3744 */
f2858738
VV
3745 be_cmd_get_profile_config(adapter, &super_nic_res,
3746 RESOURCE_LIMITS, 0);
a28277dc
SR
3747 /* Some old versions of BE3 FW don't report max_tx_qs value */
3748 res->max_tx_qs = super_nic_res.max_tx_qs ? : BE3_MAX_TX_QS;
3749 } else {
92bf14ab 3750 res->max_tx_qs = BE3_MAX_TX_QS;
a28277dc 3751 }
92bf14ab
SP
3752
3753 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
3754 !use_sriov && be_physfn(adapter))
3755 res->max_rss_qs = (adapter->be3_native) ?
3756 BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
3757 res->max_rx_qs = res->max_rss_qs + 1;
3758
e3dc867c 3759 if (be_physfn(adapter))
d3518e21 3760 res->max_evt_qs = (be_max_vfs(adapter) > 0) ?
e3dc867c
SR
3761 BE3_SRIOV_MAX_EVT_QS : BE3_MAX_EVT_QS;
3762 else
3763 res->max_evt_qs = 1;
92bf14ab
SP
3764
3765 res->if_cap_flags = BE_IF_CAP_FLAGS_WANT;
71bb8bd0 3766 res->if_cap_flags &= ~BE_IF_FLAGS_DEFQ_RSS;
92bf14ab
SP
3767 if (!(adapter->function_caps & BE_FUNCTION_CAPS_RSS))
3768 res->if_cap_flags &= ~BE_IF_FLAGS_RSS;
3769}
3770
30128031
SP
3771static void be_setup_init(struct be_adapter *adapter)
3772{
3773 adapter->vlan_prio_bmap = 0xff;
42f11cf2 3774 adapter->phy.link_speed = -1;
30128031
SP
3775 adapter->if_handle = -1;
3776 adapter->be3_native = false;
f66b7cfd 3777 adapter->if_flags = 0;
f25b119c
PR
3778 if (be_physfn(adapter))
3779 adapter->cmd_privileges = MAX_PRIVILEGES;
3780 else
3781 adapter->cmd_privileges = MIN_PRIVILEGES;
30128031
SP
3782}
3783
bec84e6b
VV
3784static int be_get_sriov_config(struct be_adapter *adapter)
3785{
bec84e6b 3786 struct be_resources res = {0};
d3d18312 3787 int max_vfs, old_vfs;
bec84e6b 3788
f2858738 3789 be_cmd_get_profile_config(adapter, &res, RESOURCE_LIMITS, 0);
d3d18312 3790
ace40aff 3791 /* Some old versions of BE3 FW don't report max_vfs value */
bec84e6b
VV
3792 if (BE3_chip(adapter) && !res.max_vfs) {
3793 max_vfs = pci_sriov_get_totalvfs(adapter->pdev);
3794 res.max_vfs = max_vfs > 0 ? min(MAX_VFS, max_vfs) : 0;
3795 }
3796
d3d18312 3797 adapter->pool_res = res;
bec84e6b 3798
ace40aff
VV
3799 /* If during previous unload of the driver, the VFs were not disabled,
3800 * then we cannot rely on the PF POOL limits for the TotalVFs value.
3801 * Instead use the TotalVFs value stored in the pci-dev struct.
3802 */
bec84e6b
VV
3803 old_vfs = pci_num_vf(adapter->pdev);
3804 if (old_vfs) {
ace40aff
VV
3805 dev_info(&adapter->pdev->dev, "%d VFs are already enabled\n",
3806 old_vfs);
3807
3808 adapter->pool_res.max_vfs =
3809 pci_sriov_get_totalvfs(adapter->pdev);
bec84e6b 3810 adapter->num_vfs = old_vfs;
bec84e6b
VV
3811 }
3812
3813 return 0;
3814}
3815
ace40aff
VV
3816static void be_alloc_sriov_res(struct be_adapter *adapter)
3817{
3818 int old_vfs = pci_num_vf(adapter->pdev);
3819 u16 num_vf_qs;
3820 int status;
3821
3822 be_get_sriov_config(adapter);
3823
3824 if (!old_vfs)
3825 pci_sriov_set_totalvfs(adapter->pdev, be_max_vfs(adapter));
3826
3827 /* When the HW is in SRIOV capable configuration, the PF-pool
3828 * resources are given to PF during driver load, if there are no
3829 * old VFs. This facility is not available in BE3 FW.
3830 * Also, this is done by FW in Lancer chip.
3831 */
3832 if (skyhawk_chip(adapter) && be_max_vfs(adapter) && !old_vfs) {
3833 num_vf_qs = be_calculate_vf_qs(adapter, 0);
3834 status = be_cmd_set_sriov_config(adapter, adapter->pool_res, 0,
3835 num_vf_qs);
3836 if (status)
3837 dev_err(&adapter->pdev->dev,
3838 "Failed to optimize SRIOV resources\n");
3839 }
3840}
3841
92bf14ab 3842static int be_get_resources(struct be_adapter *adapter)
abb93951 3843{
92bf14ab
SP
3844 struct device *dev = &adapter->pdev->dev;
3845 struct be_resources res = {0};
3846 int status;
abb93951 3847
92bf14ab
SP
3848 if (BEx_chip(adapter)) {
3849 BEx_get_resources(adapter, &res);
3850 adapter->res = res;
abb93951
PR
3851 }
3852
92bf14ab
SP
3853 /* For Lancer, SH etc read per-function resource limits from FW.
3854 * GET_FUNC_CONFIG returns per function guaranteed limits.
3855 * GET_PROFILE_CONFIG returns PCI-E related limits PF-pool limits
3856 */
3857 if (!BEx_chip(adapter)) {
3858 status = be_cmd_get_func_config(adapter, &res);
3859 if (status)
3860 return status;
abb93951 3861
71bb8bd0
VV
3862 /* If a deafault RXQ must be created, we'll use up one RSSQ*/
3863 if (res.max_rss_qs && res.max_rss_qs == res.max_rx_qs &&
3864 !(res.if_cap_flags & BE_IF_FLAGS_DEFQ_RSS))
3865 res.max_rss_qs -= 1;
3866
92bf14ab
SP
3867 /* If RoCE may be enabled stash away half the EQs for RoCE */
3868 if (be_roce_supported(adapter))
3869 res.max_evt_qs /= 2;
3870 adapter->res = res;
abb93951 3871 }
4c876616 3872
71bb8bd0
VV
3873 /* If FW supports RSS default queue, then skip creating non-RSS
3874 * queue for non-IP traffic.
3875 */
3876 adapter->need_def_rxq = (be_if_cap_flags(adapter) &
3877 BE_IF_FLAGS_DEFQ_RSS) ? 0 : 1;
3878
acbafeb1
SP
3879 dev_info(dev, "Max: txqs %d, rxqs %d, rss %d, eqs %d, vfs %d\n",
3880 be_max_txqs(adapter), be_max_rxqs(adapter),
3881 be_max_rss(adapter), be_max_eqs(adapter),
3882 be_max_vfs(adapter));
3883 dev_info(dev, "Max: uc-macs %d, mc-macs %d, vlans %d\n",
3884 be_max_uc(adapter), be_max_mc(adapter),
3885 be_max_vlans(adapter));
3886
ace40aff
VV
3887 /* Sanitize cfg_num_qs based on HW and platform limits */
3888 adapter->cfg_num_qs = min_t(u16, netif_get_num_default_rss_queues(),
3889 be_max_qs(adapter));
92bf14ab 3890 return 0;
abb93951
PR
3891}
3892
39f1d94d
SP
3893static int be_get_config(struct be_adapter *adapter)
3894{
6b085ba9 3895 int status, level;
542963b7 3896 u16 profile_id;
6b085ba9
SP
3897
3898 status = be_cmd_get_cntl_attributes(adapter);
3899 if (status)
3900 return status;
39f1d94d 3901
e97e3cda 3902 status = be_cmd_query_fw_cfg(adapter);
abb93951 3903 if (status)
92bf14ab 3904 return status;
abb93951 3905
6b085ba9
SP
3906 if (BEx_chip(adapter)) {
3907 level = be_cmd_get_fw_log_level(adapter);
3908 adapter->msg_enable =
3909 level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0;
3910 }
3911
3912 be_cmd_get_acpi_wol_cap(adapter);
3913
21252377
VV
3914 be_cmd_query_port_name(adapter);
3915
3916 if (be_physfn(adapter)) {
542963b7
VV
3917 status = be_cmd_get_active_profile(adapter, &profile_id);
3918 if (!status)
3919 dev_info(&adapter->pdev->dev,
3920 "Using profile 0x%x\n", profile_id);
962bcb75 3921 }
bec84e6b 3922
92bf14ab
SP
3923 status = be_get_resources(adapter);
3924 if (status)
3925 return status;
abb93951 3926
46ee9c14
RN
3927 adapter->pmac_id = kcalloc(be_max_uc(adapter),
3928 sizeof(*adapter->pmac_id), GFP_KERNEL);
92bf14ab
SP
3929 if (!adapter->pmac_id)
3930 return -ENOMEM;
abb93951 3931
92bf14ab 3932 return 0;
39f1d94d
SP
3933}
3934
95046b92
SP
3935static int be_mac_setup(struct be_adapter *adapter)
3936{
3937 u8 mac[ETH_ALEN];
3938 int status;
3939
3940 if (is_zero_ether_addr(adapter->netdev->dev_addr)) {
3941 status = be_cmd_get_perm_mac(adapter, mac);
3942 if (status)
3943 return status;
3944
3945 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
3946 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
3947 } else {
3948 /* Maybe the HW was reset; dev_addr must be re-programmed */
3949 memcpy(mac, adapter->netdev->dev_addr, ETH_ALEN);
3950 }
3951
2c7a9dc1
AK
3952 /* For BE3-R VFs, the PF programs the initial MAC address */
3953 if (!(BEx_chip(adapter) && be_virtfn(adapter)))
3954 be_cmd_pmac_add(adapter, mac, adapter->if_handle,
3955 &adapter->pmac_id[0], 0);
95046b92
SP
3956 return 0;
3957}
3958
68d7bdcb
SP
3959static void be_schedule_worker(struct be_adapter *adapter)
3960{
3961 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
3962 adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
3963}
3964
eb7dd46c
SP
3965static void be_schedule_err_detection(struct be_adapter *adapter)
3966{
3967 schedule_delayed_work(&adapter->be_err_detection_work,
3968 msecs_to_jiffies(1000));
3969 adapter->flags |= BE_FLAGS_ERR_DETECTION_SCHEDULED;
3970}
3971
7707133c 3972static int be_setup_queues(struct be_adapter *adapter)
5fb379ee 3973{
68d7bdcb 3974 struct net_device *netdev = adapter->netdev;
10ef9ab4 3975 int status;
ba343c77 3976
7707133c 3977 status = be_evt_queues_create(adapter);
abb93951
PR
3978 if (status)
3979 goto err;
73d540f2 3980
7707133c 3981 status = be_tx_qs_create(adapter);
c2bba3df
SK
3982 if (status)
3983 goto err;
10ef9ab4 3984
7707133c 3985 status = be_rx_cqs_create(adapter);
10ef9ab4 3986 if (status)
a54769f5 3987 goto err;
6b7c5b94 3988
7707133c 3989 status = be_mcc_queues_create(adapter);
10ef9ab4
SP
3990 if (status)
3991 goto err;
3992
68d7bdcb
SP
3993 status = netif_set_real_num_rx_queues(netdev, adapter->num_rx_qs);
3994 if (status)
3995 goto err;
3996
3997 status = netif_set_real_num_tx_queues(netdev, adapter->num_tx_qs);
3998 if (status)
3999 goto err;
4000
7707133c
SP
4001 return 0;
4002err:
4003 dev_err(&adapter->pdev->dev, "queue_setup failed\n");
4004 return status;
4005}
4006
68d7bdcb
SP
4007int be_update_queues(struct be_adapter *adapter)
4008{
4009 struct net_device *netdev = adapter->netdev;
4010 int status;
4011
4012 if (netif_running(netdev))
4013 be_close(netdev);
4014
4015 be_cancel_worker(adapter);
4016
4017 /* If any vectors have been shared with RoCE we cannot re-program
4018 * the MSIx table.
4019 */
4020 if (!adapter->num_msix_roce_vec)
4021 be_msix_disable(adapter);
4022
4023 be_clear_queues(adapter);
4024
4025 if (!msix_enabled(adapter)) {
4026 status = be_msix_enable(adapter);
4027 if (status)
4028 return status;
4029 }
4030
4031 status = be_setup_queues(adapter);
4032 if (status)
4033 return status;
4034
4035 be_schedule_worker(adapter);
4036
4037 if (netif_running(netdev))
4038 status = be_open(netdev);
4039
4040 return status;
4041}
4042
f7062ee5
SP
4043static inline int fw_major_num(const char *fw_ver)
4044{
4045 int fw_major = 0, i;
4046
4047 i = sscanf(fw_ver, "%d.", &fw_major);
4048 if (i != 1)
4049 return 0;
4050
4051 return fw_major;
4052}
4053
f962f840
SP
4054/* If any VFs are already enabled don't FLR the PF */
4055static bool be_reset_required(struct be_adapter *adapter)
4056{
4057 return pci_num_vf(adapter->pdev) ? false : true;
4058}
4059
4060/* Wait for the FW to be ready and perform the required initialization */
4061static int be_func_init(struct be_adapter *adapter)
4062{
4063 int status;
4064
4065 status = be_fw_wait_ready(adapter);
4066 if (status)
4067 return status;
4068
4069 if (be_reset_required(adapter)) {
4070 status = be_cmd_reset_function(adapter);
4071 if (status)
4072 return status;
4073
4074 /* Wait for interrupts to quiesce after an FLR */
4075 msleep(100);
4076
4077 /* We can clear all errors when function reset succeeds */
4078 be_clear_all_error(adapter);
4079 }
4080
4081 /* Tell FW we're ready to fire cmds */
4082 status = be_cmd_fw_init(adapter);
4083 if (status)
4084 return status;
4085
4086 /* Allow interrupts for other ULPs running on NIC function */
4087 be_intr_set(adapter, true);
4088
4089 return 0;
4090}
4091
7707133c
SP
4092static int be_setup(struct be_adapter *adapter)
4093{
4094 struct device *dev = &adapter->pdev->dev;
7707133c
SP
4095 int status;
4096
f962f840
SP
4097 status = be_func_init(adapter);
4098 if (status)
4099 return status;
4100
7707133c
SP
4101 be_setup_init(adapter);
4102
4103 if (!lancer_chip(adapter))
4104 be_cmd_req_native_mode(adapter);
4105
ace40aff
VV
4106 if (!BE2_chip(adapter) && be_physfn(adapter))
4107 be_alloc_sriov_res(adapter);
4108
7707133c 4109 status = be_get_config(adapter);
10ef9ab4 4110 if (status)
a54769f5 4111 goto err;
6b7c5b94 4112
7707133c 4113 status = be_msix_enable(adapter);
10ef9ab4 4114 if (status)
a54769f5 4115 goto err;
6b7c5b94 4116
0700d816
KA
4117 status = be_if_create(adapter, &adapter->if_handle,
4118 be_if_cap_flags(adapter), 0);
7707133c 4119 if (status)
a54769f5 4120 goto err;
6b7c5b94 4121
68d7bdcb
SP
4122 /* Updating real_num_tx/rx_queues() requires rtnl_lock() */
4123 rtnl_lock();
7707133c 4124 status = be_setup_queues(adapter);
68d7bdcb 4125 rtnl_unlock();
95046b92 4126 if (status)
1578e777
PR
4127 goto err;
4128
7707133c 4129 be_cmd_get_fn_privileges(adapter, &adapter->cmd_privileges, 0);
7707133c
SP
4130
4131 status = be_mac_setup(adapter);
10ef9ab4
SP
4132 if (status)
4133 goto err;
4134
e97e3cda 4135 be_cmd_get_fw_ver(adapter);
acbafeb1 4136 dev_info(dev, "FW version is %s\n", adapter->fw_ver);
5a56eb10 4137
e9e2a904 4138 if (BE2_chip(adapter) && fw_major_num(adapter->fw_ver) < 4) {
50762667 4139 dev_err(dev, "Firmware on card is old(%s), IRQs may not work",
e9e2a904
SK
4140 adapter->fw_ver);
4141 dev_err(dev, "Please upgrade firmware to version >= 4.0\n");
4142 }
4143
1d1e9a46 4144 if (adapter->vlans_added)
10329df8 4145 be_vid_config(adapter);
7ab8b0b4 4146
a54769f5 4147 be_set_rx_mode(adapter->netdev);
5fb379ee 4148
00d594c3
KA
4149 status = be_cmd_set_flow_control(adapter, adapter->tx_fc,
4150 adapter->rx_fc);
4151 if (status)
4152 be_cmd_get_flow_control(adapter, &adapter->tx_fc,
4153 &adapter->rx_fc);
590c391d 4154
00d594c3
KA
4155 dev_info(&adapter->pdev->dev, "HW Flow control - TX:%d RX:%d\n",
4156 adapter->tx_fc, adapter->rx_fc);
2dc1deb6 4157
bdce2ad7
SR
4158 if (be_physfn(adapter))
4159 be_cmd_set_logical_link_config(adapter,
4160 IFLA_VF_LINK_STATE_AUTO, 0);
4161
bec84e6b
VV
4162 if (adapter->num_vfs)
4163 be_vf_setup(adapter);
f9449ab7 4164
f25b119c
PR
4165 status = be_cmd_get_phy_info(adapter);
4166 if (!status && be_pause_supported(adapter))
42f11cf2
AK
4167 adapter->phy.fc_autoneg = 1;
4168
68d7bdcb 4169 be_schedule_worker(adapter);
e1ad8e33 4170 adapter->flags |= BE_FLAGS_SETUP_DONE;
f9449ab7 4171 return 0;
a54769f5
SP
4172err:
4173 be_clear(adapter);
4174 return status;
4175}
6b7c5b94 4176
66268739
IV
4177#ifdef CONFIG_NET_POLL_CONTROLLER
4178static void be_netpoll(struct net_device *netdev)
4179{
4180 struct be_adapter *adapter = netdev_priv(netdev);
10ef9ab4 4181 struct be_eq_obj *eqo;
66268739
IV
4182 int i;
4183
e49cc34f
SP
4184 for_all_evt_queues(adapter, eqo, i) {
4185 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
4186 napi_schedule(&eqo->napi);
4187 }
66268739
IV
4188}
4189#endif
4190
96c9b2e4 4191static char flash_cookie[2][16] = {"*** SE FLAS", "H DIRECTORY *** "};
fa9a6fed 4192
306f1348
SP
4193static bool phy_flashing_required(struct be_adapter *adapter)
4194{
e02cfd96 4195 return (adapter->phy.phy_type == PHY_TYPE_TN_8022 &&
42f11cf2 4196 adapter->phy.interface_type == PHY_TYPE_BASET_10GB);
306f1348
SP
4197}
4198
c165541e
PR
4199static bool is_comp_in_ufi(struct be_adapter *adapter,
4200 struct flash_section_info *fsec, int type)
4201{
4202 int i = 0, img_type = 0;
4203 struct flash_section_info_g2 *fsec_g2 = NULL;
4204
ca34fe38 4205 if (BE2_chip(adapter))
c165541e
PR
4206 fsec_g2 = (struct flash_section_info_g2 *)fsec;
4207
4208 for (i = 0; i < MAX_FLASH_COMP; i++) {
4209 if (fsec_g2)
4210 img_type = le32_to_cpu(fsec_g2->fsec_entry[i].type);
4211 else
4212 img_type = le32_to_cpu(fsec->fsec_entry[i].type);
4213
4214 if (img_type == type)
4215 return true;
4216 }
4217 return false;
4218
4219}
4220
4188e7df 4221static struct flash_section_info *get_fsec_info(struct be_adapter *adapter,
748b539a
SP
4222 int header_size,
4223 const struct firmware *fw)
c165541e
PR
4224{
4225 struct flash_section_info *fsec = NULL;
4226 const u8 *p = fw->data;
4227
4228 p += header_size;
4229 while (p < (fw->data + fw->size)) {
4230 fsec = (struct flash_section_info *)p;
4231 if (!memcmp(flash_cookie, fsec->cookie, sizeof(flash_cookie)))
4232 return fsec;
4233 p += 32;
4234 }
4235 return NULL;
4236}
4237
96c9b2e4
VV
4238static int be_check_flash_crc(struct be_adapter *adapter, const u8 *p,
4239 u32 img_offset, u32 img_size, int hdr_size,
4240 u16 img_optype, bool *crc_match)
4241{
4242 u32 crc_offset;
4243 int status;
4244 u8 crc[4];
4245
70a7b525
VV
4246 status = be_cmd_get_flash_crc(adapter, crc, img_optype, img_offset,
4247 img_size - 4);
96c9b2e4
VV
4248 if (status)
4249 return status;
4250
4251 crc_offset = hdr_size + img_offset + img_size - 4;
4252
4253 /* Skip flashing, if crc of flashed region matches */
4254 if (!memcmp(crc, p + crc_offset, 4))
4255 *crc_match = true;
4256 else
4257 *crc_match = false;
4258
4259 return status;
4260}
4261
773a2d7c 4262static int be_flash(struct be_adapter *adapter, const u8 *img,
70a7b525
VV
4263 struct be_dma_mem *flash_cmd, int optype, int img_size,
4264 u32 img_offset)
773a2d7c 4265{
70a7b525 4266 u32 flash_op, num_bytes, total_bytes = img_size, bytes_sent = 0;
773a2d7c 4267 struct be_cmd_write_flashrom *req = flash_cmd->va;
96c9b2e4 4268 int status;
773a2d7c 4269
773a2d7c
PR
4270 while (total_bytes) {
4271 num_bytes = min_t(u32, 32*1024, total_bytes);
4272
4273 total_bytes -= num_bytes;
4274
4275 if (!total_bytes) {
4276 if (optype == OPTYPE_PHY_FW)
4277 flash_op = FLASHROM_OPER_PHY_FLASH;
4278 else
4279 flash_op = FLASHROM_OPER_FLASH;
4280 } else {
4281 if (optype == OPTYPE_PHY_FW)
4282 flash_op = FLASHROM_OPER_PHY_SAVE;
4283 else
4284 flash_op = FLASHROM_OPER_SAVE;
4285 }
4286
be716446 4287 memcpy(req->data_buf, img, num_bytes);
773a2d7c
PR
4288 img += num_bytes;
4289 status = be_cmd_write_flashrom(adapter, flash_cmd, optype,
70a7b525
VV
4290 flash_op, img_offset +
4291 bytes_sent, num_bytes);
4c60005f 4292 if (base_status(status) == MCC_STATUS_ILLEGAL_REQUEST &&
96c9b2e4
VV
4293 optype == OPTYPE_PHY_FW)
4294 break;
4295 else if (status)
773a2d7c 4296 return status;
70a7b525
VV
4297
4298 bytes_sent += num_bytes;
773a2d7c
PR
4299 }
4300 return 0;
4301}
4302
0ad3157e 4303/* For BE2, BE3 and BE3-R */
ca34fe38 4304static int be_flash_BEx(struct be_adapter *adapter,
748b539a
SP
4305 const struct firmware *fw,
4306 struct be_dma_mem *flash_cmd, int num_of_images)
84517482 4307{
c165541e 4308 int img_hdrs_size = (num_of_images * sizeof(struct image_hdr));
96c9b2e4 4309 struct device *dev = &adapter->pdev->dev;
c165541e 4310 struct flash_section_info *fsec = NULL;
96c9b2e4
VV
4311 int status, i, filehdr_size, num_comp;
4312 const struct flash_comp *pflashcomp;
4313 bool crc_match;
4314 const u8 *p;
c165541e
PR
4315
4316 struct flash_comp gen3_flash_types[] = {
4317 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, OPTYPE_ISCSI_ACTIVE,
4318 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_iSCSI},
4319 { FLASH_REDBOOT_START_g3, OPTYPE_REDBOOT,
4320 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3, IMAGE_BOOT_CODE},
4321 { FLASH_iSCSI_BIOS_START_g3, OPTYPE_BIOS,
4322 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_ISCSI},
4323 { FLASH_PXE_BIOS_START_g3, OPTYPE_PXE_BIOS,
4324 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_PXE},
4325 { FLASH_FCoE_BIOS_START_g3, OPTYPE_FCOE_BIOS,
4326 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_FCoE},
4327 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, OPTYPE_ISCSI_BACKUP,
4328 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_iSCSI},
4329 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, OPTYPE_FCOE_FW_ACTIVE,
4330 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_FCoE},
4331 { FLASH_FCoE_BACKUP_IMAGE_START_g3, OPTYPE_FCOE_FW_BACKUP,
4332 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_FCoE},
4333 { FLASH_NCSI_START_g3, OPTYPE_NCSI_FW,
4334 FLASH_NCSI_IMAGE_MAX_SIZE_g3, IMAGE_NCSI},
4335 { FLASH_PHY_FW_START_g3, OPTYPE_PHY_FW,
4336 FLASH_PHY_FW_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_PHY}
3f0d4560 4337 };
c165541e
PR
4338
4339 struct flash_comp gen2_flash_types[] = {
4340 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, OPTYPE_ISCSI_ACTIVE,
4341 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_iSCSI},
4342 { FLASH_REDBOOT_START_g2, OPTYPE_REDBOOT,
4343 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2, IMAGE_BOOT_CODE},
4344 { FLASH_iSCSI_BIOS_START_g2, OPTYPE_BIOS,
4345 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_ISCSI},
4346 { FLASH_PXE_BIOS_START_g2, OPTYPE_PXE_BIOS,
4347 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_PXE},
4348 { FLASH_FCoE_BIOS_START_g2, OPTYPE_FCOE_BIOS,
4349 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_FCoE},
4350 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, OPTYPE_ISCSI_BACKUP,
4351 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_iSCSI},
4352 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, OPTYPE_FCOE_FW_ACTIVE,
4353 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_FCoE},
4354 { FLASH_FCoE_BACKUP_IMAGE_START_g2, OPTYPE_FCOE_FW_BACKUP,
4355 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_FCoE}
3f0d4560
AK
4356 };
4357
ca34fe38 4358 if (BE3_chip(adapter)) {
3f0d4560
AK
4359 pflashcomp = gen3_flash_types;
4360 filehdr_size = sizeof(struct flash_file_hdr_g3);
215faf9c 4361 num_comp = ARRAY_SIZE(gen3_flash_types);
3f0d4560
AK
4362 } else {
4363 pflashcomp = gen2_flash_types;
4364 filehdr_size = sizeof(struct flash_file_hdr_g2);
215faf9c 4365 num_comp = ARRAY_SIZE(gen2_flash_types);
5d3acd0d 4366 img_hdrs_size = 0;
84517482 4367 }
ca34fe38 4368
c165541e
PR
4369 /* Get flash section info*/
4370 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
4371 if (!fsec) {
96c9b2e4 4372 dev_err(dev, "Invalid Cookie. FW image may be corrupted\n");
c165541e
PR
4373 return -1;
4374 }
9fe96934 4375 for (i = 0; i < num_comp; i++) {
c165541e 4376 if (!is_comp_in_ufi(adapter, fsec, pflashcomp[i].img_type))
9fe96934 4377 continue;
c165541e
PR
4378
4379 if ((pflashcomp[i].optype == OPTYPE_NCSI_FW) &&
4380 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
4381 continue;
4382
773a2d7c
PR
4383 if (pflashcomp[i].optype == OPTYPE_PHY_FW &&
4384 !phy_flashing_required(adapter))
306f1348 4385 continue;
c165541e 4386
773a2d7c 4387 if (pflashcomp[i].optype == OPTYPE_REDBOOT) {
96c9b2e4
VV
4388 status = be_check_flash_crc(adapter, fw->data,
4389 pflashcomp[i].offset,
4390 pflashcomp[i].size,
4391 filehdr_size +
4392 img_hdrs_size,
4393 OPTYPE_REDBOOT, &crc_match);
4394 if (status) {
4395 dev_err(dev,
4396 "Could not get CRC for 0x%x region\n",
4397 pflashcomp[i].optype);
4398 continue;
4399 }
4400
4401 if (crc_match)
773a2d7c
PR
4402 continue;
4403 }
c165541e 4404
96c9b2e4
VV
4405 p = fw->data + filehdr_size + pflashcomp[i].offset +
4406 img_hdrs_size;
306f1348
SP
4407 if (p + pflashcomp[i].size > fw->data + fw->size)
4408 return -1;
773a2d7c
PR
4409
4410 status = be_flash(adapter, p, flash_cmd, pflashcomp[i].optype,
70a7b525 4411 pflashcomp[i].size, 0);
773a2d7c 4412 if (status) {
96c9b2e4 4413 dev_err(dev, "Flashing section type 0x%x failed\n",
773a2d7c
PR
4414 pflashcomp[i].img_type);
4415 return status;
84517482 4416 }
84517482 4417 }
84517482
AK
4418 return 0;
4419}
4420
96c9b2e4
VV
4421static u16 be_get_img_optype(struct flash_section_entry fsec_entry)
4422{
4423 u32 img_type = le32_to_cpu(fsec_entry.type);
4424 u16 img_optype = le16_to_cpu(fsec_entry.optype);
4425
4426 if (img_optype != 0xFFFF)
4427 return img_optype;
4428
4429 switch (img_type) {
4430 case IMAGE_FIRMWARE_iSCSI:
4431 img_optype = OPTYPE_ISCSI_ACTIVE;
4432 break;
4433 case IMAGE_BOOT_CODE:
4434 img_optype = OPTYPE_REDBOOT;
4435 break;
4436 case IMAGE_OPTION_ROM_ISCSI:
4437 img_optype = OPTYPE_BIOS;
4438 break;
4439 case IMAGE_OPTION_ROM_PXE:
4440 img_optype = OPTYPE_PXE_BIOS;
4441 break;
4442 case IMAGE_OPTION_ROM_FCoE:
4443 img_optype = OPTYPE_FCOE_BIOS;
4444 break;
4445 case IMAGE_FIRMWARE_BACKUP_iSCSI:
4446 img_optype = OPTYPE_ISCSI_BACKUP;
4447 break;
4448 case IMAGE_NCSI:
4449 img_optype = OPTYPE_NCSI_FW;
4450 break;
4451 case IMAGE_FLASHISM_JUMPVECTOR:
4452 img_optype = OPTYPE_FLASHISM_JUMPVECTOR;
4453 break;
4454 case IMAGE_FIRMWARE_PHY:
4455 img_optype = OPTYPE_SH_PHY_FW;
4456 break;
4457 case IMAGE_REDBOOT_DIR:
4458 img_optype = OPTYPE_REDBOOT_DIR;
4459 break;
4460 case IMAGE_REDBOOT_CONFIG:
4461 img_optype = OPTYPE_REDBOOT_CONFIG;
4462 break;
4463 case IMAGE_UFI_DIR:
4464 img_optype = OPTYPE_UFI_DIR;
4465 break;
4466 default:
4467 break;
4468 }
4469
4470 return img_optype;
4471}
4472
773a2d7c 4473static int be_flash_skyhawk(struct be_adapter *adapter,
748b539a
SP
4474 const struct firmware *fw,
4475 struct be_dma_mem *flash_cmd, int num_of_images)
3f0d4560 4476{
773a2d7c 4477 int img_hdrs_size = num_of_images * sizeof(struct image_hdr);
70a7b525 4478 bool crc_match, old_fw_img, flash_offset_support = true;
96c9b2e4 4479 struct device *dev = &adapter->pdev->dev;
773a2d7c 4480 struct flash_section_info *fsec = NULL;
96c9b2e4 4481 u32 img_offset, img_size, img_type;
70a7b525 4482 u16 img_optype, flash_optype;
96c9b2e4 4483 int status, i, filehdr_size;
96c9b2e4 4484 const u8 *p;
773a2d7c
PR
4485
4486 filehdr_size = sizeof(struct flash_file_hdr_g3);
4487 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
4488 if (!fsec) {
96c9b2e4 4489 dev_err(dev, "Invalid Cookie. FW image may be corrupted\n");
56ace3a0 4490 return -EINVAL;
773a2d7c
PR
4491 }
4492
70a7b525 4493retry_flash:
773a2d7c
PR
4494 for (i = 0; i < le32_to_cpu(fsec->fsec_hdr.num_images); i++) {
4495 img_offset = le32_to_cpu(fsec->fsec_entry[i].offset);
4496 img_size = le32_to_cpu(fsec->fsec_entry[i].pad_size);
96c9b2e4
VV
4497 img_type = le32_to_cpu(fsec->fsec_entry[i].type);
4498 img_optype = be_get_img_optype(fsec->fsec_entry[i]);
4499 old_fw_img = fsec->fsec_entry[i].optype == 0xFFFF;
773a2d7c 4500
96c9b2e4 4501 if (img_optype == 0xFFFF)
773a2d7c 4502 continue;
70a7b525
VV
4503
4504 if (flash_offset_support)
4505 flash_optype = OPTYPE_OFFSET_SPECIFIED;
4506 else
4507 flash_optype = img_optype;
4508
96c9b2e4
VV
4509 /* Don't bother verifying CRC if an old FW image is being
4510 * flashed
4511 */
4512 if (old_fw_img)
4513 goto flash;
4514
4515 status = be_check_flash_crc(adapter, fw->data, img_offset,
4516 img_size, filehdr_size +
70a7b525 4517 img_hdrs_size, flash_optype,
96c9b2e4 4518 &crc_match);
4c60005f
KA
4519 if (base_status(status) == MCC_STATUS_ILLEGAL_REQUEST ||
4520 base_status(status) == MCC_STATUS_ILLEGAL_FIELD) {
70a7b525
VV
4521 /* The current FW image on the card does not support
4522 * OFFSET based flashing. Retry using older mechanism
4523 * of OPTYPE based flashing
4524 */
4525 if (flash_optype == OPTYPE_OFFSET_SPECIFIED) {
4526 flash_offset_support = false;
4527 goto retry_flash;
4528 }
4529
4530 /* The current FW image on the card does not recognize
4531 * the new FLASH op_type. The FW download is partially
4532 * complete. Reboot the server now to enable FW image
4533 * to recognize the new FLASH op_type. To complete the
4534 * remaining process, download the same FW again after
4535 * the reboot.
4536 */
96c9b2e4
VV
4537 dev_err(dev, "Flash incomplete. Reset the server\n");
4538 dev_err(dev, "Download FW image again after reset\n");
4539 return -EAGAIN;
4540 } else if (status) {
4541 dev_err(dev, "Could not get CRC for 0x%x region\n",
4542 img_optype);
4543 return -EFAULT;
773a2d7c
PR
4544 }
4545
96c9b2e4
VV
4546 if (crc_match)
4547 continue;
773a2d7c 4548
96c9b2e4
VV
4549flash:
4550 p = fw->data + filehdr_size + img_offset + img_hdrs_size;
773a2d7c
PR
4551 if (p + img_size > fw->data + fw->size)
4552 return -1;
4553
70a7b525
VV
4554 status = be_flash(adapter, p, flash_cmd, flash_optype, img_size,
4555 img_offset);
4556
4557 /* The current FW image on the card does not support OFFSET
4558 * based flashing. Retry using older mechanism of OPTYPE based
4559 * flashing
4560 */
4561 if (base_status(status) == MCC_STATUS_ILLEGAL_FIELD &&
4562 flash_optype == OPTYPE_OFFSET_SPECIFIED) {
4563 flash_offset_support = false;
4564 goto retry_flash;
4565 }
4566
96c9b2e4
VV
4567 /* For old FW images ignore ILLEGAL_FIELD error or errors on
4568 * UFI_DIR region
4569 */
4c60005f
KA
4570 if (old_fw_img &&
4571 (base_status(status) == MCC_STATUS_ILLEGAL_FIELD ||
4572 (img_optype == OPTYPE_UFI_DIR &&
4573 base_status(status) == MCC_STATUS_FAILED))) {
96c9b2e4
VV
4574 continue;
4575 } else if (status) {
4576 dev_err(dev, "Flashing section type 0x%x failed\n",
4577 img_type);
4578 return -EFAULT;
773a2d7c
PR
4579 }
4580 }
4581 return 0;
3f0d4560
AK
4582}
4583
485bf569 4584static int lancer_fw_download(struct be_adapter *adapter,
748b539a 4585 const struct firmware *fw)
84517482 4586{
485bf569
SN
4587#define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024)
4588#define LANCER_FW_DOWNLOAD_LOCATION "/prg"
bb864e07 4589 struct device *dev = &adapter->pdev->dev;
84517482 4590 struct be_dma_mem flash_cmd;
485bf569
SN
4591 const u8 *data_ptr = NULL;
4592 u8 *dest_image_ptr = NULL;
4593 size_t image_size = 0;
4594 u32 chunk_size = 0;
4595 u32 data_written = 0;
4596 u32 offset = 0;
4597 int status = 0;
4598 u8 add_status = 0;
f67ef7ba 4599 u8 change_status;
84517482 4600
485bf569 4601 if (!IS_ALIGNED(fw->size, sizeof(u32))) {
bb864e07 4602 dev_err(dev, "FW image size should be multiple of 4\n");
3fb8cb80 4603 return -EINVAL;
d9efd2af
SB
4604 }
4605
485bf569
SN
4606 flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
4607 + LANCER_FW_DOWNLOAD_CHUNK;
bb864e07 4608 flash_cmd.va = dma_alloc_coherent(dev, flash_cmd.size,
d0320f75 4609 &flash_cmd.dma, GFP_KERNEL);
3fb8cb80
KA
4610 if (!flash_cmd.va)
4611 return -ENOMEM;
84517482 4612
485bf569
SN
4613 dest_image_ptr = flash_cmd.va +
4614 sizeof(struct lancer_cmd_req_write_object);
4615 image_size = fw->size;
4616 data_ptr = fw->data;
4617
4618 while (image_size) {
4619 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
4620
4621 /* Copy the image chunk content. */
4622 memcpy(dest_image_ptr, data_ptr, chunk_size);
4623
4624 status = lancer_cmd_write_object(adapter, &flash_cmd,
f67ef7ba
PR
4625 chunk_size, offset,
4626 LANCER_FW_DOWNLOAD_LOCATION,
4627 &data_written, &change_status,
4628 &add_status);
485bf569
SN
4629 if (status)
4630 break;
4631
4632 offset += data_written;
4633 data_ptr += data_written;
4634 image_size -= data_written;
4635 }
4636
4637 if (!status) {
4638 /* Commit the FW written */
4639 status = lancer_cmd_write_object(adapter, &flash_cmd,
f67ef7ba
PR
4640 0, offset,
4641 LANCER_FW_DOWNLOAD_LOCATION,
4642 &data_written, &change_status,
4643 &add_status);
485bf569
SN
4644 }
4645
bb864e07 4646 dma_free_coherent(dev, flash_cmd.size, flash_cmd.va, flash_cmd.dma);
485bf569 4647 if (status) {
bb864e07 4648 dev_err(dev, "Firmware load error\n");
3fb8cb80 4649 return be_cmd_status(status);
485bf569
SN
4650 }
4651
bb864e07
KA
4652 dev_info(dev, "Firmware flashed successfully\n");
4653
f67ef7ba 4654 if (change_status == LANCER_FW_RESET_NEEDED) {
bb864e07 4655 dev_info(dev, "Resetting adapter to activate new FW\n");
5c510811
SK
4656 status = lancer_physdev_ctrl(adapter,
4657 PHYSDEV_CONTROL_FW_RESET_MASK);
f67ef7ba 4658 if (status) {
bb864e07
KA
4659 dev_err(dev, "Adapter busy, could not reset FW\n");
4660 dev_err(dev, "Reboot server to activate new FW\n");
f67ef7ba
PR
4661 }
4662 } else if (change_status != LANCER_NO_RESET_NEEDED) {
bb864e07 4663 dev_info(dev, "Reboot server to activate new FW\n");
f67ef7ba 4664 }
3fb8cb80
KA
4665
4666 return 0;
485bf569
SN
4667}
4668
5d3acd0d
VV
4669#define BE2_UFI 2
4670#define BE3_UFI 3
4671#define BE3R_UFI 10
4672#define SH_UFI 4
81a9e226 4673#define SH_P2_UFI 11
5d3acd0d 4674
ca34fe38 4675static int be_get_ufi_type(struct be_adapter *adapter,
0ad3157e 4676 struct flash_file_hdr_g3 *fhdr)
773a2d7c 4677{
5d3acd0d
VV
4678 if (!fhdr) {
4679 dev_err(&adapter->pdev->dev, "Invalid FW UFI file");
4680 return -1;
4681 }
773a2d7c 4682
5d3acd0d
VV
4683 /* First letter of the build version is used to identify
4684 * which chip this image file is meant for.
4685 */
4686 switch (fhdr->build[0]) {
4687 case BLD_STR_UFI_TYPE_SH:
81a9e226
VV
4688 return (fhdr->asic_type_rev == ASIC_REV_P2) ? SH_P2_UFI :
4689 SH_UFI;
5d3acd0d
VV
4690 case BLD_STR_UFI_TYPE_BE3:
4691 return (fhdr->asic_type_rev == ASIC_REV_B0) ? BE3R_UFI :
4692 BE3_UFI;
4693 case BLD_STR_UFI_TYPE_BE2:
4694 return BE2_UFI;
4695 default:
4696 return -1;
4697 }
4698}
773a2d7c 4699
5d3acd0d
VV
4700/* Check if the flash image file is compatible with the adapter that
4701 * is being flashed.
4702 * BE3 chips with asic-rev B0 must be flashed only with BE3R_UFI type.
81a9e226 4703 * Skyhawk chips with asic-rev P2 must be flashed only with SH_P2_UFI type.
5d3acd0d
VV
4704 */
4705static bool be_check_ufi_compatibility(struct be_adapter *adapter,
4706 struct flash_file_hdr_g3 *fhdr)
4707{
4708 int ufi_type = be_get_ufi_type(adapter, fhdr);
4709
4710 switch (ufi_type) {
81a9e226 4711 case SH_P2_UFI:
5d3acd0d 4712 return skyhawk_chip(adapter);
81a9e226
VV
4713 case SH_UFI:
4714 return (skyhawk_chip(adapter) &&
4715 adapter->asic_rev < ASIC_REV_P2);
5d3acd0d
VV
4716 case BE3R_UFI:
4717 return BE3_chip(adapter);
4718 case BE3_UFI:
4719 return (BE3_chip(adapter) && adapter->asic_rev < ASIC_REV_B0);
4720 case BE2_UFI:
4721 return BE2_chip(adapter);
4722 default:
4723 return false;
4724 }
773a2d7c
PR
4725}
4726
485bf569
SN
4727static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
4728{
5d3acd0d 4729 struct device *dev = &adapter->pdev->dev;
485bf569 4730 struct flash_file_hdr_g3 *fhdr3;
5d3acd0d
VV
4731 struct image_hdr *img_hdr_ptr;
4732 int status = 0, i, num_imgs;
485bf569 4733 struct be_dma_mem flash_cmd;
84517482 4734
5d3acd0d
VV
4735 fhdr3 = (struct flash_file_hdr_g3 *)fw->data;
4736 if (!be_check_ufi_compatibility(adapter, fhdr3)) {
4737 dev_err(dev, "Flash image is not compatible with adapter\n");
4738 return -EINVAL;
84517482
AK
4739 }
4740
5d3acd0d
VV
4741 flash_cmd.size = sizeof(struct be_cmd_write_flashrom);
4742 flash_cmd.va = dma_alloc_coherent(dev, flash_cmd.size, &flash_cmd.dma,
4743 GFP_KERNEL);
4744 if (!flash_cmd.va)
4745 return -ENOMEM;
773a2d7c 4746
773a2d7c
PR
4747 num_imgs = le32_to_cpu(fhdr3->num_imgs);
4748 for (i = 0; i < num_imgs; i++) {
4749 img_hdr_ptr = (struct image_hdr *)(fw->data +
4750 (sizeof(struct flash_file_hdr_g3) +
4751 i * sizeof(struct image_hdr)));
5d3acd0d
VV
4752 if (!BE2_chip(adapter) &&
4753 le32_to_cpu(img_hdr_ptr->imageid) != 1)
4754 continue;
84517482 4755
5d3acd0d
VV
4756 if (skyhawk_chip(adapter))
4757 status = be_flash_skyhawk(adapter, fw, &flash_cmd,
4758 num_imgs);
4759 else
4760 status = be_flash_BEx(adapter, fw, &flash_cmd,
4761 num_imgs);
84517482
AK
4762 }
4763
5d3acd0d
VV
4764 dma_free_coherent(dev, flash_cmd.size, flash_cmd.va, flash_cmd.dma);
4765 if (!status)
4766 dev_info(dev, "Firmware flashed successfully\n");
84517482 4767
485bf569
SN
4768 return status;
4769}
4770
4771int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
4772{
4773 const struct firmware *fw;
4774 int status;
4775
4776 if (!netif_running(adapter->netdev)) {
4777 dev_err(&adapter->pdev->dev,
4778 "Firmware load not allowed (interface is down)\n");
940a3fcd 4779 return -ENETDOWN;
485bf569
SN
4780 }
4781
4782 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
4783 if (status)
4784 goto fw_exit;
4785
4786 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
4787
4788 if (lancer_chip(adapter))
4789 status = lancer_fw_download(adapter, fw);
4790 else
4791 status = be_fw_download(adapter, fw);
4792
eeb65ced 4793 if (!status)
e97e3cda 4794 be_cmd_get_fw_ver(adapter);
eeb65ced 4795
84517482
AK
4796fw_exit:
4797 release_firmware(fw);
4798 return status;
4799}
4800
add511b3
RP
4801static int be_ndo_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
4802 u16 flags)
a77dcb8c
AK
4803{
4804 struct be_adapter *adapter = netdev_priv(dev);
4805 struct nlattr *attr, *br_spec;
4806 int rem;
4807 int status = 0;
4808 u16 mode = 0;
4809
4810 if (!sriov_enabled(adapter))
4811 return -EOPNOTSUPP;
4812
4813 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
4ea85e83
TG
4814 if (!br_spec)
4815 return -EINVAL;
a77dcb8c
AK
4816
4817 nla_for_each_nested(attr, br_spec, rem) {
4818 if (nla_type(attr) != IFLA_BRIDGE_MODE)
4819 continue;
4820
b7c1a314
TG
4821 if (nla_len(attr) < sizeof(mode))
4822 return -EINVAL;
4823
a77dcb8c
AK
4824 mode = nla_get_u16(attr);
4825 if (mode != BRIDGE_MODE_VEPA && mode != BRIDGE_MODE_VEB)
4826 return -EINVAL;
4827
4828 status = be_cmd_set_hsw_config(adapter, 0, 0,
4829 adapter->if_handle,
4830 mode == BRIDGE_MODE_VEPA ?
4831 PORT_FWD_TYPE_VEPA :
4832 PORT_FWD_TYPE_VEB);
4833 if (status)
4834 goto err;
4835
4836 dev_info(&adapter->pdev->dev, "enabled switch mode: %s\n",
4837 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
4838
4839 return status;
4840 }
4841err:
4842 dev_err(&adapter->pdev->dev, "Failed to set switch mode %s\n",
4843 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
4844
4845 return status;
4846}
4847
4848static int be_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
46c264da
ND
4849 struct net_device *dev, u32 filter_mask,
4850 int nlflags)
a77dcb8c
AK
4851{
4852 struct be_adapter *adapter = netdev_priv(dev);
4853 int status = 0;
4854 u8 hsw_mode;
4855
4856 if (!sriov_enabled(adapter))
4857 return 0;
4858
4859 /* BE and Lancer chips support VEB mode only */
4860 if (BEx_chip(adapter) || lancer_chip(adapter)) {
4861 hsw_mode = PORT_FWD_TYPE_VEB;
4862 } else {
4863 status = be_cmd_get_hsw_config(adapter, NULL, 0,
4864 adapter->if_handle, &hsw_mode);
4865 if (status)
4866 return 0;
4867 }
4868
4869 return ndo_dflt_bridge_getlink(skb, pid, seq, dev,
4870 hsw_mode == PORT_FWD_TYPE_VEPA ?
2c3c031c 4871 BRIDGE_MODE_VEPA : BRIDGE_MODE_VEB,
46c264da 4872 0, 0, nlflags);
a77dcb8c
AK
4873}
4874
c5abe7c0 4875#ifdef CONFIG_BE2NET_VXLAN
630f4b70
SB
4876/* VxLAN offload Notes:
4877 *
4878 * The stack defines tunnel offload flags (hw_enc_features) for IP and doesn't
4879 * distinguish various types of transports (VxLAN, GRE, NVGRE ..). So, offload
4880 * is expected to work across all types of IP tunnels once exported. Skyhawk
4881 * supports offloads for either VxLAN or NVGRE, exclusively. So we export VxLAN
16dde0d6
SB
4882 * offloads in hw_enc_features only when a VxLAN port is added. If other (non
4883 * VxLAN) tunnels are configured while VxLAN offloads are enabled, offloads for
4884 * those other tunnels are unexported on the fly through ndo_features_check().
630f4b70
SB
4885 *
4886 * Skyhawk supports VxLAN offloads only for one UDP dport. So, if the stack
4887 * adds more than one port, disable offloads and don't re-enable them again
4888 * until after all the tunnels are removed.
4889 */
c9c47142
SP
4890static void be_add_vxlan_port(struct net_device *netdev, sa_family_t sa_family,
4891 __be16 port)
4892{
4893 struct be_adapter *adapter = netdev_priv(netdev);
4894 struct device *dev = &adapter->pdev->dev;
4895 int status;
4896
4897 if (lancer_chip(adapter) || BEx_chip(adapter))
4898 return;
4899
4900 if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS) {
c9c47142
SP
4901 dev_info(dev,
4902 "Only one UDP port supported for VxLAN offloads\n");
630f4b70
SB
4903 dev_info(dev, "Disabling VxLAN offloads\n");
4904 adapter->vxlan_port_count++;
4905 goto err;
c9c47142
SP
4906 }
4907
630f4b70
SB
4908 if (adapter->vxlan_port_count++ >= 1)
4909 return;
4910
c9c47142
SP
4911 status = be_cmd_manage_iface(adapter, adapter->if_handle,
4912 OP_CONVERT_NORMAL_TO_TUNNEL);
4913 if (status) {
4914 dev_warn(dev, "Failed to convert normal interface to tunnel\n");
4915 goto err;
4916 }
4917
4918 status = be_cmd_set_vxlan_port(adapter, port);
4919 if (status) {
4920 dev_warn(dev, "Failed to add VxLAN port\n");
4921 goto err;
4922 }
4923 adapter->flags |= BE_FLAGS_VXLAN_OFFLOADS;
4924 adapter->vxlan_port = port;
4925
630f4b70
SB
4926 netdev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
4927 NETIF_F_TSO | NETIF_F_TSO6 |
4928 NETIF_F_GSO_UDP_TUNNEL;
4929 netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL;
ac9a3d84 4930 netdev->features |= NETIF_F_GSO_UDP_TUNNEL;
630f4b70 4931
c9c47142
SP
4932 dev_info(dev, "Enabled VxLAN offloads for UDP port %d\n",
4933 be16_to_cpu(port));
4934 return;
4935err:
4936 be_disable_vxlan_offloads(adapter);
c9c47142
SP
4937}
4938
4939static void be_del_vxlan_port(struct net_device *netdev, sa_family_t sa_family,
4940 __be16 port)
4941{
4942 struct be_adapter *adapter = netdev_priv(netdev);
4943
4944 if (lancer_chip(adapter) || BEx_chip(adapter))
4945 return;
4946
4947 if (adapter->vxlan_port != port)
630f4b70 4948 goto done;
c9c47142
SP
4949
4950 be_disable_vxlan_offloads(adapter);
4951
4952 dev_info(&adapter->pdev->dev,
4953 "Disabled VxLAN offloads for UDP port %d\n",
4954 be16_to_cpu(port));
630f4b70
SB
4955done:
4956 adapter->vxlan_port_count--;
c9c47142 4957}
725d548f 4958
5f35227e
JG
4959static netdev_features_t be_features_check(struct sk_buff *skb,
4960 struct net_device *dev,
4961 netdev_features_t features)
725d548f 4962{
16dde0d6
SB
4963 struct be_adapter *adapter = netdev_priv(dev);
4964 u8 l4_hdr = 0;
4965
4966 /* The code below restricts offload features for some tunneled packets.
4967 * Offload features for normal (non tunnel) packets are unchanged.
4968 */
4969 if (!skb->encapsulation ||
4970 !(adapter->flags & BE_FLAGS_VXLAN_OFFLOADS))
4971 return features;
4972
4973 /* It's an encapsulated packet and VxLAN offloads are enabled. We
4974 * should disable tunnel offload features if it's not a VxLAN packet,
4975 * as tunnel offloads have been enabled only for VxLAN. This is done to
4976 * allow other tunneled traffic like GRE work fine while VxLAN
4977 * offloads are configured in Skyhawk-R.
4978 */
4979 switch (vlan_get_protocol(skb)) {
4980 case htons(ETH_P_IP):
4981 l4_hdr = ip_hdr(skb)->protocol;
4982 break;
4983 case htons(ETH_P_IPV6):
4984 l4_hdr = ipv6_hdr(skb)->nexthdr;
4985 break;
4986 default:
4987 return features;
4988 }
4989
4990 if (l4_hdr != IPPROTO_UDP ||
4991 skb->inner_protocol_type != ENCAP_TYPE_ETHER ||
4992 skb->inner_protocol != htons(ETH_P_TEB) ||
4993 skb_inner_mac_header(skb) - skb_transport_header(skb) !=
4994 sizeof(struct udphdr) + sizeof(struct vxlanhdr))
4995 return features & ~(NETIF_F_ALL_CSUM | NETIF_F_GSO_MASK);
4996
4997 return features;
725d548f 4998}
c5abe7c0 4999#endif
c9c47142 5000
e5686ad8 5001static const struct net_device_ops be_netdev_ops = {
6b7c5b94
SP
5002 .ndo_open = be_open,
5003 .ndo_stop = be_close,
5004 .ndo_start_xmit = be_xmit,
a54769f5 5005 .ndo_set_rx_mode = be_set_rx_mode,
6b7c5b94
SP
5006 .ndo_set_mac_address = be_mac_addr_set,
5007 .ndo_change_mtu = be_change_mtu,
ab1594e9 5008 .ndo_get_stats64 = be_get_stats64,
6b7c5b94 5009 .ndo_validate_addr = eth_validate_addr,
6b7c5b94
SP
5010 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
5011 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
64600ea5 5012 .ndo_set_vf_mac = be_set_vf_mac,
1da87b7f 5013 .ndo_set_vf_vlan = be_set_vf_vlan,
ed616689 5014 .ndo_set_vf_rate = be_set_vf_tx_rate,
66268739 5015 .ndo_get_vf_config = be_get_vf_config,
bdce2ad7 5016 .ndo_set_vf_link_state = be_set_vf_link_state,
66268739
IV
5017#ifdef CONFIG_NET_POLL_CONTROLLER
5018 .ndo_poll_controller = be_netpoll,
5019#endif
a77dcb8c
AK
5020 .ndo_bridge_setlink = be_ndo_bridge_setlink,
5021 .ndo_bridge_getlink = be_ndo_bridge_getlink,
6384a4d0 5022#ifdef CONFIG_NET_RX_BUSY_POLL
c9c47142 5023 .ndo_busy_poll = be_busy_poll,
6384a4d0 5024#endif
c5abe7c0 5025#ifdef CONFIG_BE2NET_VXLAN
c9c47142
SP
5026 .ndo_add_vxlan_port = be_add_vxlan_port,
5027 .ndo_del_vxlan_port = be_del_vxlan_port,
5f35227e 5028 .ndo_features_check = be_features_check,
c5abe7c0 5029#endif
6b7c5b94
SP
5030};
5031
5032static void be_netdev_init(struct net_device *netdev)
5033{
5034 struct be_adapter *adapter = netdev_priv(netdev);
5035
6332c8d3 5036 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
8b8ddc68 5037 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
f646968f 5038 NETIF_F_HW_VLAN_CTAG_TX;
8b8ddc68
MM
5039 if (be_multi_rxq(adapter))
5040 netdev->hw_features |= NETIF_F_RXHASH;
6332c8d3
MM
5041
5042 netdev->features |= netdev->hw_features |
f646968f 5043 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER;
4b972914 5044
eb8a50d9 5045 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
79032644 5046 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
51c59870 5047
fbc13f01
AK
5048 netdev->priv_flags |= IFF_UNICAST_FLT;
5049
6b7c5b94
SP
5050 netdev->flags |= IFF_MULTICAST;
5051
b7e5887e 5052 netif_set_gso_max_size(netdev, 65535 - ETH_HLEN);
c190e3c8 5053
10ef9ab4 5054 netdev->netdev_ops = &be_netdev_ops;
6b7c5b94 5055
7ad24ea4 5056 netdev->ethtool_ops = &be_ethtool_ops;
6b7c5b94
SP
5057}
5058
87ac1a52
KA
5059static void be_cleanup(struct be_adapter *adapter)
5060{
5061 struct net_device *netdev = adapter->netdev;
5062
5063 rtnl_lock();
5064 netif_device_detach(netdev);
5065 if (netif_running(netdev))
5066 be_close(netdev);
5067 rtnl_unlock();
5068
5069 be_clear(adapter);
5070}
5071
484d76fd 5072static int be_resume(struct be_adapter *adapter)
78fad34e 5073{
d0e1b319 5074 struct net_device *netdev = adapter->netdev;
78fad34e
SP
5075 int status;
5076
78fad34e
SP
5077 status = be_setup(adapter);
5078 if (status)
484d76fd 5079 return status;
78fad34e 5080
d0e1b319
KA
5081 if (netif_running(netdev)) {
5082 status = be_open(netdev);
78fad34e 5083 if (status)
484d76fd 5084 return status;
78fad34e
SP
5085 }
5086
d0e1b319
KA
5087 netif_device_attach(netdev);
5088
484d76fd
KA
5089 return 0;
5090}
5091
5092static int be_err_recover(struct be_adapter *adapter)
5093{
5094 struct device *dev = &adapter->pdev->dev;
5095 int status;
5096
5097 status = be_resume(adapter);
5098 if (status)
5099 goto err;
5100
9fa465c0 5101 dev_info(dev, "Adapter recovery successful\n");
78fad34e
SP
5102 return 0;
5103err:
9fa465c0 5104 if (be_physfn(adapter))
78fad34e 5105 dev_err(dev, "Adapter recovery failed\n");
9fa465c0
SP
5106 else
5107 dev_err(dev, "Re-trying adapter recovery\n");
78fad34e
SP
5108
5109 return status;
5110}
5111
eb7dd46c 5112static void be_err_detection_task(struct work_struct *work)
78fad34e
SP
5113{
5114 struct be_adapter *adapter =
eb7dd46c
SP
5115 container_of(work, struct be_adapter,
5116 be_err_detection_work.work);
78fad34e
SP
5117 int status = 0;
5118
5119 be_detect_error(adapter);
5120
d0e1b319 5121 if (adapter->hw_error) {
87ac1a52 5122 be_cleanup(adapter);
d0e1b319
KA
5123
5124 /* As of now error recovery support is in Lancer only */
5125 if (lancer_chip(adapter))
5126 status = be_err_recover(adapter);
78fad34e
SP
5127 }
5128
9fa465c0
SP
5129 /* Always attempt recovery on VFs */
5130 if (!status || be_virtfn(adapter))
eb7dd46c 5131 be_schedule_err_detection(adapter);
78fad34e
SP
5132}
5133
5134static void be_log_sfp_info(struct be_adapter *adapter)
5135{
5136 int status;
5137
5138 status = be_cmd_query_sfp_info(adapter);
5139 if (!status) {
5140 dev_err(&adapter->pdev->dev,
5141 "Unqualified SFP+ detected on %c from %s part no: %s",
5142 adapter->port_name, adapter->phy.vendor_name,
5143 adapter->phy.vendor_pn);
5144 }
5145 adapter->flags &= ~BE_FLAGS_EVT_INCOMPATIBLE_SFP;
5146}
5147
5148static void be_worker(struct work_struct *work)
5149{
5150 struct be_adapter *adapter =
5151 container_of(work, struct be_adapter, work.work);
5152 struct be_rx_obj *rxo;
5153 int i;
5154
5155 /* when interrupts are not yet enabled, just reap any pending
5156 * mcc completions
5157 */
5158 if (!netif_running(adapter->netdev)) {
5159 local_bh_disable();
5160 be_process_mcc(adapter);
5161 local_bh_enable();
5162 goto reschedule;
5163 }
5164
5165 if (!adapter->stats_cmd_sent) {
5166 if (lancer_chip(adapter))
5167 lancer_cmd_get_pport_stats(adapter,
5168 &adapter->stats_cmd);
5169 else
5170 be_cmd_get_stats(adapter, &adapter->stats_cmd);
5171 }
5172
5173 if (be_physfn(adapter) &&
5174 MODULO(adapter->work_counter, adapter->be_get_temp_freq) == 0)
5175 be_cmd_get_die_temperature(adapter);
5176
5177 for_all_rx_queues(adapter, rxo, i) {
5178 /* Replenish RX-queues starved due to memory
5179 * allocation failures.
5180 */
5181 if (rxo->rx_post_starved)
5182 be_post_rx_frags(rxo, GFP_KERNEL, MAX_RX_POST);
5183 }
5184
5185 be_eqd_update(adapter);
5186
5187 if (adapter->flags & BE_FLAGS_EVT_INCOMPATIBLE_SFP)
5188 be_log_sfp_info(adapter);
5189
5190reschedule:
5191 adapter->work_counter++;
5192 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
5193}
5194
6b7c5b94
SP
5195static void be_unmap_pci_bars(struct be_adapter *adapter)
5196{
c5b3ad4c
SP
5197 if (adapter->csr)
5198 pci_iounmap(adapter->pdev, adapter->csr);
8788fdc2 5199 if (adapter->db)
ce66f781 5200 pci_iounmap(adapter->pdev, adapter->db);
045508a8
PP
5201}
5202
ce66f781
SP
5203static int db_bar(struct be_adapter *adapter)
5204{
5205 if (lancer_chip(adapter) || !be_physfn(adapter))
5206 return 0;
5207 else
5208 return 4;
5209}
5210
5211static int be_roce_map_pci_bars(struct be_adapter *adapter)
045508a8 5212{
dbf0f2a7 5213 if (skyhawk_chip(adapter)) {
ce66f781
SP
5214 adapter->roce_db.size = 4096;
5215 adapter->roce_db.io_addr = pci_resource_start(adapter->pdev,
5216 db_bar(adapter));
5217 adapter->roce_db.total_size = pci_resource_len(adapter->pdev,
5218 db_bar(adapter));
5219 }
045508a8 5220 return 0;
6b7c5b94
SP
5221}
5222
5223static int be_map_pci_bars(struct be_adapter *adapter)
5224{
0fa74a4b 5225 struct pci_dev *pdev = adapter->pdev;
6b7c5b94 5226 u8 __iomem *addr;
78fad34e
SP
5227 u32 sli_intf;
5228
5229 pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
5230 adapter->sli_family = (sli_intf & SLI_INTF_FAMILY_MASK) >>
5231 SLI_INTF_FAMILY_SHIFT;
5232 adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
fe6d2a38 5233
c5b3ad4c 5234 if (BEx_chip(adapter) && be_physfn(adapter)) {
0fa74a4b 5235 adapter->csr = pci_iomap(pdev, 2, 0);
ddf1169f 5236 if (!adapter->csr)
c5b3ad4c
SP
5237 return -ENOMEM;
5238 }
5239
25848c90 5240 addr = pci_iomap(pdev, db_bar(adapter), 0);
ddf1169f 5241 if (!addr)
6b7c5b94 5242 goto pci_map_err;
ba343c77 5243 adapter->db = addr;
ce66f781 5244
25848c90
SR
5245 if (skyhawk_chip(adapter) || BEx_chip(adapter)) {
5246 if (be_physfn(adapter)) {
5247 /* PCICFG is the 2nd BAR in BE2 */
5248 addr = pci_iomap(pdev, BE2_chip(adapter) ? 1 : 0, 0);
5249 if (!addr)
5250 goto pci_map_err;
5251 adapter->pcicfg = addr;
5252 } else {
5253 adapter->pcicfg = adapter->db + SRIOV_VF_PCICFG_OFFSET;
5254 }
5255 }
5256
ce66f781 5257 be_roce_map_pci_bars(adapter);
6b7c5b94 5258 return 0;
ce66f781 5259
6b7c5b94 5260pci_map_err:
25848c90 5261 dev_err(&pdev->dev, "Error in mapping PCI BARs\n");
6b7c5b94
SP
5262 be_unmap_pci_bars(adapter);
5263 return -ENOMEM;
5264}
5265
78fad34e 5266static void be_drv_cleanup(struct be_adapter *adapter)
6b7c5b94 5267{
8788fdc2 5268 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
78fad34e 5269 struct device *dev = &adapter->pdev->dev;
6b7c5b94
SP
5270
5271 if (mem->va)
78fad34e 5272 dma_free_coherent(dev, mem->size, mem->va, mem->dma);
e7b909a6 5273
5b8821b7 5274 mem = &adapter->rx_filter;
e7b909a6 5275 if (mem->va)
78fad34e
SP
5276 dma_free_coherent(dev, mem->size, mem->va, mem->dma);
5277
5278 mem = &adapter->stats_cmd;
5279 if (mem->va)
5280 dma_free_coherent(dev, mem->size, mem->va, mem->dma);
6b7c5b94
SP
5281}
5282
78fad34e
SP
5283/* Allocate and initialize various fields in be_adapter struct */
5284static int be_drv_init(struct be_adapter *adapter)
6b7c5b94 5285{
8788fdc2
SP
5286 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
5287 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
5b8821b7 5288 struct be_dma_mem *rx_filter = &adapter->rx_filter;
78fad34e
SP
5289 struct be_dma_mem *stats_cmd = &adapter->stats_cmd;
5290 struct device *dev = &adapter->pdev->dev;
5291 int status = 0;
6b7c5b94
SP
5292
5293 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
78fad34e 5294 mbox_mem_alloc->va = dma_alloc_coherent(dev, mbox_mem_alloc->size,
2b7bcebf
IV
5295 &mbox_mem_alloc->dma,
5296 GFP_KERNEL);
78fad34e
SP
5297 if (!mbox_mem_alloc->va)
5298 return -ENOMEM;
5299
6b7c5b94
SP
5300 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
5301 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
5302 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
5303 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
e7b909a6 5304
5b8821b7 5305 rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
78fad34e
SP
5306 rx_filter->va = dma_zalloc_coherent(dev, rx_filter->size,
5307 &rx_filter->dma, GFP_KERNEL);
ddf1169f 5308 if (!rx_filter->va) {
e7b909a6
SP
5309 status = -ENOMEM;
5310 goto free_mbox;
5311 }
1f9061d2 5312
78fad34e
SP
5313 if (lancer_chip(adapter))
5314 stats_cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
5315 else if (BE2_chip(adapter))
5316 stats_cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
5317 else if (BE3_chip(adapter))
5318 stats_cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
5319 else
5320 stats_cmd->size = sizeof(struct be_cmd_req_get_stats_v2);
5321 stats_cmd->va = dma_zalloc_coherent(dev, stats_cmd->size,
5322 &stats_cmd->dma, GFP_KERNEL);
5323 if (!stats_cmd->va) {
5324 status = -ENOMEM;
5325 goto free_rx_filter;
5326 }
5327
2984961c 5328 mutex_init(&adapter->mbox_lock);
8788fdc2
SP
5329 spin_lock_init(&adapter->mcc_lock);
5330 spin_lock_init(&adapter->mcc_cq_lock);
5eeff635 5331 init_completion(&adapter->et_cmd_compl);
e7b909a6 5332
78fad34e 5333 pci_save_state(adapter->pdev);
6b7c5b94 5334
78fad34e 5335 INIT_DELAYED_WORK(&adapter->work, be_worker);
eb7dd46c
SP
5336 INIT_DELAYED_WORK(&adapter->be_err_detection_work,
5337 be_err_detection_task);
6b7c5b94 5338
78fad34e
SP
5339 adapter->rx_fc = true;
5340 adapter->tx_fc = true;
6b7c5b94 5341
78fad34e
SP
5342 /* Must be a power of 2 or else MODULO will BUG_ON */
5343 adapter->be_get_temp_freq = 64;
ca34fe38 5344
6b7c5b94 5345 return 0;
78fad34e
SP
5346
5347free_rx_filter:
5348 dma_free_coherent(dev, rx_filter->size, rx_filter->va, rx_filter->dma);
5349free_mbox:
5350 dma_free_coherent(dev, mbox_mem_alloc->size, mbox_mem_alloc->va,
5351 mbox_mem_alloc->dma);
5352 return status;
6b7c5b94
SP
5353}
5354
3bc6b06c 5355static void be_remove(struct pci_dev *pdev)
6b7c5b94
SP
5356{
5357 struct be_adapter *adapter = pci_get_drvdata(pdev);
8d56ff11 5358
6b7c5b94
SP
5359 if (!adapter)
5360 return;
5361
045508a8 5362 be_roce_dev_remove(adapter);
8cef7a78 5363 be_intr_set(adapter, false);
045508a8 5364
eb7dd46c 5365 be_cancel_err_detection(adapter);
f67ef7ba 5366
6b7c5b94
SP
5367 unregister_netdev(adapter->netdev);
5368
5fb379ee
SP
5369 be_clear(adapter);
5370
bf99e50d
PR
5371 /* tell fw we're done with firing cmds */
5372 be_cmd_fw_clean(adapter);
5373
78fad34e
SP
5374 be_unmap_pci_bars(adapter);
5375 be_drv_cleanup(adapter);
6b7c5b94 5376
d6b6d987
SP
5377 pci_disable_pcie_error_reporting(pdev);
5378
6b7c5b94
SP
5379 pci_release_regions(pdev);
5380 pci_disable_device(pdev);
5381
5382 free_netdev(adapter->netdev);
5383}
5384
d379142b
SP
5385static char *mc_name(struct be_adapter *adapter)
5386{
f93f160b
VV
5387 char *str = ""; /* default */
5388
5389 switch (adapter->mc_type) {
5390 case UMC:
5391 str = "UMC";
5392 break;
5393 case FLEX10:
5394 str = "FLEX10";
5395 break;
5396 case vNIC1:
5397 str = "vNIC-1";
5398 break;
5399 case nPAR:
5400 str = "nPAR";
5401 break;
5402 case UFP:
5403 str = "UFP";
5404 break;
5405 case vNIC2:
5406 str = "vNIC-2";
5407 break;
5408 default:
5409 str = "";
5410 }
5411
5412 return str;
d379142b
SP
5413}
5414
5415static inline char *func_name(struct be_adapter *adapter)
5416{
5417 return be_physfn(adapter) ? "PF" : "VF";
5418}
5419
f7062ee5
SP
5420static inline char *nic_name(struct pci_dev *pdev)
5421{
5422 switch (pdev->device) {
5423 case OC_DEVICE_ID1:
5424 return OC_NAME;
5425 case OC_DEVICE_ID2:
5426 return OC_NAME_BE;
5427 case OC_DEVICE_ID3:
5428 case OC_DEVICE_ID4:
5429 return OC_NAME_LANCER;
5430 case BE_DEVICE_ID2:
5431 return BE3_NAME;
5432 case OC_DEVICE_ID5:
5433 case OC_DEVICE_ID6:
5434 return OC_NAME_SH;
5435 default:
5436 return BE_NAME;
5437 }
5438}
5439
1dd06ae8 5440static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id)
6b7c5b94 5441{
6b7c5b94
SP
5442 struct be_adapter *adapter;
5443 struct net_device *netdev;
21252377 5444 int status = 0;
6b7c5b94 5445
acbafeb1
SP
5446 dev_info(&pdev->dev, "%s version is %s\n", DRV_NAME, DRV_VER);
5447
6b7c5b94
SP
5448 status = pci_enable_device(pdev);
5449 if (status)
5450 goto do_none;
5451
5452 status = pci_request_regions(pdev, DRV_NAME);
5453 if (status)
5454 goto disable_dev;
5455 pci_set_master(pdev);
5456
7f640062 5457 netdev = alloc_etherdev_mqs(sizeof(*adapter), MAX_TX_QS, MAX_RX_QS);
ddf1169f 5458 if (!netdev) {
6b7c5b94
SP
5459 status = -ENOMEM;
5460 goto rel_reg;
5461 }
5462 adapter = netdev_priv(netdev);
5463 adapter->pdev = pdev;
5464 pci_set_drvdata(pdev, adapter);
5465 adapter->netdev = netdev;
2243e2e9 5466 SET_NETDEV_DEV(netdev, &pdev->dev);
6b7c5b94 5467
4c15c243 5468 status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
6b7c5b94
SP
5469 if (!status) {
5470 netdev->features |= NETIF_F_HIGHDMA;
5471 } else {
4c15c243 5472 status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
6b7c5b94
SP
5473 if (status) {
5474 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
5475 goto free_netdev;
5476 }
5477 }
5478
2f951a9a
KA
5479 status = pci_enable_pcie_error_reporting(pdev);
5480 if (!status)
5481 dev_info(&pdev->dev, "PCIe error reporting enabled\n");
d6b6d987 5482
78fad34e 5483 status = be_map_pci_bars(adapter);
6b7c5b94 5484 if (status)
39f1d94d 5485 goto free_netdev;
6b7c5b94 5486
78fad34e
SP
5487 status = be_drv_init(adapter);
5488 if (status)
5489 goto unmap_bars;
5490
5fb379ee
SP
5491 status = be_setup(adapter);
5492 if (status)
78fad34e 5493 goto drv_cleanup;
2243e2e9 5494
3abcdeda 5495 be_netdev_init(netdev);
6b7c5b94
SP
5496 status = register_netdev(netdev);
5497 if (status != 0)
5fb379ee 5498 goto unsetup;
6b7c5b94 5499
045508a8
PP
5500 be_roce_dev_add(adapter);
5501
eb7dd46c 5502 be_schedule_err_detection(adapter);
b4e32a71 5503
d379142b 5504 dev_info(&pdev->dev, "%s: %s %s port %c\n", nic_name(pdev),
21252377 5505 func_name(adapter), mc_name(adapter), adapter->port_name);
34b1ef04 5506
6b7c5b94
SP
5507 return 0;
5508
5fb379ee
SP
5509unsetup:
5510 be_clear(adapter);
78fad34e
SP
5511drv_cleanup:
5512 be_drv_cleanup(adapter);
5513unmap_bars:
5514 be_unmap_pci_bars(adapter);
f9449ab7 5515free_netdev:
fe6d2a38 5516 free_netdev(netdev);
6b7c5b94
SP
5517rel_reg:
5518 pci_release_regions(pdev);
5519disable_dev:
5520 pci_disable_device(pdev);
5521do_none:
c4ca2374 5522 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
6b7c5b94
SP
5523 return status;
5524}
5525
5526static int be_suspend(struct pci_dev *pdev, pm_message_t state)
5527{
5528 struct be_adapter *adapter = pci_get_drvdata(pdev);
6b7c5b94 5529
76a9e08e 5530 if (adapter->wol_en)
71d8d1b5
AK
5531 be_setup_wol(adapter, true);
5532
d4360d6f 5533 be_intr_set(adapter, false);
eb7dd46c 5534 be_cancel_err_detection(adapter);
f67ef7ba 5535
87ac1a52 5536 be_cleanup(adapter);
6b7c5b94
SP
5537
5538 pci_save_state(pdev);
5539 pci_disable_device(pdev);
5540 pci_set_power_state(pdev, pci_choose_state(pdev, state));
5541 return 0;
5542}
5543
484d76fd 5544static int be_pci_resume(struct pci_dev *pdev)
6b7c5b94 5545{
6b7c5b94 5546 struct be_adapter *adapter = pci_get_drvdata(pdev);
484d76fd 5547 int status = 0;
6b7c5b94
SP
5548
5549 status = pci_enable_device(pdev);
5550 if (status)
5551 return status;
5552
1ca01512 5553 pci_set_power_state(pdev, PCI_D0);
6b7c5b94
SP
5554 pci_restore_state(pdev);
5555
484d76fd 5556 status = be_resume(adapter);
2243e2e9
SP
5557 if (status)
5558 return status;
5559
eb7dd46c
SP
5560 be_schedule_err_detection(adapter);
5561
76a9e08e 5562 if (adapter->wol_en)
71d8d1b5 5563 be_setup_wol(adapter, false);
a4ca055f 5564
6b7c5b94
SP
5565 return 0;
5566}
5567
82456b03
SP
5568/*
5569 * An FLR will stop BE from DMAing any data.
5570 */
5571static void be_shutdown(struct pci_dev *pdev)
5572{
5573 struct be_adapter *adapter = pci_get_drvdata(pdev);
82456b03 5574
2d5d4154
AK
5575 if (!adapter)
5576 return;
82456b03 5577
d114f99a 5578 be_roce_dev_shutdown(adapter);
0f4a6828 5579 cancel_delayed_work_sync(&adapter->work);
eb7dd46c 5580 be_cancel_err_detection(adapter);
a4ca055f 5581
2d5d4154 5582 netif_device_detach(adapter->netdev);
82456b03 5583
57841869
AK
5584 be_cmd_reset_function(adapter);
5585
82456b03 5586 pci_disable_device(pdev);
82456b03
SP
5587}
5588
cf588477 5589static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
748b539a 5590 pci_channel_state_t state)
cf588477
SP
5591{
5592 struct be_adapter *adapter = pci_get_drvdata(pdev);
cf588477
SP
5593
5594 dev_err(&adapter->pdev->dev, "EEH error detected\n");
5595
01e5b2c4
SK
5596 if (!adapter->eeh_error) {
5597 adapter->eeh_error = true;
cf588477 5598
eb7dd46c 5599 be_cancel_err_detection(adapter);
cf588477 5600
87ac1a52 5601 be_cleanup(adapter);
cf588477 5602 }
cf588477
SP
5603
5604 if (state == pci_channel_io_perm_failure)
5605 return PCI_ERS_RESULT_DISCONNECT;
5606
5607 pci_disable_device(pdev);
5608
eeb7fc7b
SK
5609 /* The error could cause the FW to trigger a flash debug dump.
5610 * Resetting the card while flash dump is in progress
c8a54163
PR
5611 * can cause it not to recover; wait for it to finish.
5612 * Wait only for first function as it is needed only once per
5613 * adapter.
eeb7fc7b 5614 */
c8a54163
PR
5615 if (pdev->devfn == 0)
5616 ssleep(30);
5617
cf588477
SP
5618 return PCI_ERS_RESULT_NEED_RESET;
5619}
5620
5621static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
5622{
5623 struct be_adapter *adapter = pci_get_drvdata(pdev);
5624 int status;
5625
5626 dev_info(&adapter->pdev->dev, "EEH reset\n");
cf588477
SP
5627
5628 status = pci_enable_device(pdev);
5629 if (status)
5630 return PCI_ERS_RESULT_DISCONNECT;
5631
5632 pci_set_master(pdev);
1ca01512 5633 pci_set_power_state(pdev, PCI_D0);
cf588477
SP
5634 pci_restore_state(pdev);
5635
5636 /* Check if card is ok and fw is ready */
c5b3ad4c
SP
5637 dev_info(&adapter->pdev->dev,
5638 "Waiting for FW to be ready after EEH reset\n");
bf99e50d 5639 status = be_fw_wait_ready(adapter);
cf588477
SP
5640 if (status)
5641 return PCI_ERS_RESULT_DISCONNECT;
5642
d6b6d987 5643 pci_cleanup_aer_uncorrect_error_status(pdev);
01e5b2c4 5644 be_clear_all_error(adapter);
cf588477
SP
5645 return PCI_ERS_RESULT_RECOVERED;
5646}
5647
5648static void be_eeh_resume(struct pci_dev *pdev)
5649{
5650 int status = 0;
5651 struct be_adapter *adapter = pci_get_drvdata(pdev);
cf588477
SP
5652
5653 dev_info(&adapter->pdev->dev, "EEH resume\n");
5654
5655 pci_save_state(pdev);
5656
484d76fd 5657 status = be_resume(adapter);
bf99e50d
PR
5658 if (status)
5659 goto err;
5660
eb7dd46c 5661 be_schedule_err_detection(adapter);
cf588477
SP
5662 return;
5663err:
5664 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
cf588477
SP
5665}
5666
ace40aff
VV
5667static int be_pci_sriov_configure(struct pci_dev *pdev, int num_vfs)
5668{
5669 struct be_adapter *adapter = pci_get_drvdata(pdev);
5670 u16 num_vf_qs;
5671 int status;
5672
5673 if (!num_vfs)
5674 be_vf_clear(adapter);
5675
5676 adapter->num_vfs = num_vfs;
5677
5678 if (adapter->num_vfs == 0 && pci_vfs_assigned(pdev)) {
5679 dev_warn(&pdev->dev,
5680 "Cannot disable VFs while they are assigned\n");
5681 return -EBUSY;
5682 }
5683
5684 /* When the HW is in SRIOV capable configuration, the PF-pool resources
5685 * are equally distributed across the max-number of VFs. The user may
5686 * request only a subset of the max-vfs to be enabled.
5687 * Based on num_vfs, redistribute the resources across num_vfs so that
5688 * each VF will have access to more number of resources.
5689 * This facility is not available in BE3 FW.
5690 * Also, this is done by FW in Lancer chip.
5691 */
5692 if (skyhawk_chip(adapter) && !pci_num_vf(pdev)) {
5693 num_vf_qs = be_calculate_vf_qs(adapter, adapter->num_vfs);
5694 status = be_cmd_set_sriov_config(adapter, adapter->pool_res,
5695 adapter->num_vfs, num_vf_qs);
5696 if (status)
5697 dev_err(&pdev->dev,
5698 "Failed to optimize SR-IOV resources\n");
5699 }
5700
5701 status = be_get_resources(adapter);
5702 if (status)
5703 return be_cmd_status(status);
5704
5705 /* Updating real_num_tx/rx_queues() requires rtnl_lock() */
5706 rtnl_lock();
5707 status = be_update_queues(adapter);
5708 rtnl_unlock();
5709 if (status)
5710 return be_cmd_status(status);
5711
5712 if (adapter->num_vfs)
5713 status = be_vf_setup(adapter);
5714
5715 if (!status)
5716 return adapter->num_vfs;
5717
5718 return 0;
5719}
5720
3646f0e5 5721static const struct pci_error_handlers be_eeh_handlers = {
cf588477
SP
5722 .error_detected = be_eeh_err_detected,
5723 .slot_reset = be_eeh_reset,
5724 .resume = be_eeh_resume,
5725};
5726
6b7c5b94
SP
5727static struct pci_driver be_driver = {
5728 .name = DRV_NAME,
5729 .id_table = be_dev_ids,
5730 .probe = be_probe,
5731 .remove = be_remove,
5732 .suspend = be_suspend,
484d76fd 5733 .resume = be_pci_resume,
82456b03 5734 .shutdown = be_shutdown,
ace40aff 5735 .sriov_configure = be_pci_sriov_configure,
cf588477 5736 .err_handler = &be_eeh_handlers
6b7c5b94
SP
5737};
5738
5739static int __init be_init_module(void)
5740{
8e95a202
JP
5741 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
5742 rx_frag_size != 2048) {
6b7c5b94
SP
5743 printk(KERN_WARNING DRV_NAME
5744 " : Module param rx_frag_size must be 2048/4096/8192."
5745 " Using 2048\n");
5746 rx_frag_size = 2048;
5747 }
6b7c5b94 5748
ace40aff
VV
5749 if (num_vfs > 0) {
5750 pr_info(DRV_NAME " : Module param num_vfs is obsolete.");
5751 pr_info(DRV_NAME " : Use sysfs method to enable VFs\n");
5752 }
5753
6b7c5b94
SP
5754 return pci_register_driver(&be_driver);
5755}
5756module_init(be_init_module);
5757
5758static void __exit be_exit_module(void)
5759{
5760 pci_unregister_driver(&be_driver);
5761}
5762module_exit(be_exit_module);