be2net: re-distribute SRIOV resources allowed by FW
[linux-2.6-block.git] / drivers / net / ethernet / emulex / benet / be_main.c
CommitLineData
6b7c5b94 1/*
40263820 2 * Copyright (C) 2005 - 2014 Emulex
6b7c5b94
SP
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
d2145cde 11 * linux-drivers@emulex.com
6b7c5b94 12 *
d2145cde
AK
13 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
6b7c5b94
SP
16 */
17
70c71606 18#include <linux/prefetch.h>
9d9779e7 19#include <linux/module.h>
6b7c5b94 20#include "be.h"
8788fdc2 21#include "be_cmds.h"
65f71b8b 22#include <asm/div64.h>
d6b6d987 23#include <linux/aer.h>
a77dcb8c 24#include <linux/if_bridge.h>
6384a4d0 25#include <net/busy_poll.h>
c9c47142 26#include <net/vxlan.h>
6b7c5b94
SP
27
28MODULE_VERSION(DRV_VER);
6b7c5b94 29MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
00d3d51e 30MODULE_AUTHOR("Emulex Corporation");
6b7c5b94
SP
31MODULE_LICENSE("GPL");
32
ba343c77 33static unsigned int num_vfs;
ba343c77 34module_param(num_vfs, uint, S_IRUGO);
ba343c77 35MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
6b7c5b94 36
11ac75ed
SP
37static ushort rx_frag_size = 2048;
38module_param(rx_frag_size, ushort, S_IRUGO);
39MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
40
9baa3c34 41static const struct pci_device_id be_dev_ids[] = {
c4ca2374 42 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
59fd5d87 43 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
c4ca2374
AK
44 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
45 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
fe6d2a38 46 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
12f4d0a8 47 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
ecedb6ae 48 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
76b73530 49 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID6)},
6b7c5b94
SP
50 { 0 }
51};
52MODULE_DEVICE_TABLE(pci, be_dev_ids);
7c185276 53/* UE Status Low CSR */
42c8b11e 54static const char * const ue_status_low_desc[] = {
7c185276
AK
55 "CEV",
56 "CTX",
57 "DBUF",
58 "ERX",
59 "Host",
60 "MPU",
61 "NDMA",
62 "PTC ",
63 "RDMA ",
64 "RXF ",
65 "RXIPS ",
66 "RXULP0 ",
67 "RXULP1 ",
68 "RXULP2 ",
69 "TIM ",
70 "TPOST ",
71 "TPRE ",
72 "TXIPS ",
73 "TXULP0 ",
74 "TXULP1 ",
75 "UC ",
76 "WDMA ",
77 "TXULP2 ",
78 "HOST1 ",
79 "P0_OB_LINK ",
80 "P1_OB_LINK ",
81 "HOST_GPIO ",
82 "MBOX ",
6bdf8f55
VV
83 "ERX2 ",
84 "SPARE ",
85 "JTAG ",
86 "MPU_INTPEND "
7c185276 87};
e2fb1afa 88
7c185276 89/* UE Status High CSR */
42c8b11e 90static const char * const ue_status_hi_desc[] = {
7c185276
AK
91 "LPCMEMHOST",
92 "MGMT_MAC",
93 "PCS0ONLINE",
94 "MPU_IRAM",
95 "PCS1ONLINE",
96 "PCTL0",
97 "PCTL1",
98 "PMEM",
99 "RR",
100 "TXPB",
101 "RXPP",
102 "XAUI",
103 "TXP",
104 "ARM",
105 "IPC",
106 "HOST2",
107 "HOST3",
108 "HOST4",
109 "HOST5",
110 "HOST6",
111 "HOST7",
6bdf8f55
VV
112 "ECRC",
113 "Poison TLP",
42c8b11e 114 "NETC",
6bdf8f55
VV
115 "PERIPH",
116 "LLTXULP",
117 "D2P",
118 "RCON",
119 "LDMA",
120 "LLTXP",
121 "LLTXPB",
7c185276
AK
122 "Unknown"
123};
6b7c5b94
SP
124
125static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
126{
127 struct be_dma_mem *mem = &q->dma_mem;
03d28ffe 128
1cfafab9 129 if (mem->va) {
2b7bcebf
IV
130 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
131 mem->dma);
1cfafab9
SP
132 mem->va = NULL;
133 }
6b7c5b94
SP
134}
135
136static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
748b539a 137 u16 len, u16 entry_size)
6b7c5b94
SP
138{
139 struct be_dma_mem *mem = &q->dma_mem;
140
141 memset(q, 0, sizeof(*q));
142 q->len = len;
143 q->entry_size = entry_size;
144 mem->size = len * entry_size;
ede23fa8
JP
145 mem->va = dma_zalloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
146 GFP_KERNEL);
6b7c5b94 147 if (!mem->va)
10ef9ab4 148 return -ENOMEM;
6b7c5b94
SP
149 return 0;
150}
151
68c45a2d 152static void be_reg_intr_set(struct be_adapter *adapter, bool enable)
6b7c5b94 153{
db3ea781 154 u32 reg, enabled;
5f0b849e 155
db3ea781 156 pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
748b539a 157 &reg);
db3ea781
SP
158 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
159
5f0b849e 160 if (!enabled && enable)
6b7c5b94 161 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
5f0b849e 162 else if (enabled && !enable)
6b7c5b94 163 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
5f0b849e 164 else
6b7c5b94 165 return;
5f0b849e 166
db3ea781 167 pci_write_config_dword(adapter->pdev,
748b539a 168 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
6b7c5b94
SP
169}
170
68c45a2d
SK
171static void be_intr_set(struct be_adapter *adapter, bool enable)
172{
173 int status = 0;
174
175 /* On lancer interrupts can't be controlled via this register */
176 if (lancer_chip(adapter))
177 return;
178
179 if (adapter->eeh_error)
180 return;
181
182 status = be_cmd_intr_set(adapter, enable);
183 if (status)
184 be_reg_intr_set(adapter, enable);
185}
186
8788fdc2 187static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
6b7c5b94
SP
188{
189 u32 val = 0;
03d28ffe 190
6b7c5b94
SP
191 val |= qid & DB_RQ_RING_ID_MASK;
192 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
f3eb62d2
SP
193
194 wmb();
8788fdc2 195 iowrite32(val, adapter->db + DB_RQ_OFFSET);
6b7c5b94
SP
196}
197
94d73aaa
VV
198static void be_txq_notify(struct be_adapter *adapter, struct be_tx_obj *txo,
199 u16 posted)
6b7c5b94
SP
200{
201 u32 val = 0;
03d28ffe 202
94d73aaa 203 val |= txo->q.id & DB_TXULP_RING_ID_MASK;
6b7c5b94 204 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
f3eb62d2
SP
205
206 wmb();
94d73aaa 207 iowrite32(val, adapter->db + txo->db_offset);
6b7c5b94
SP
208}
209
8788fdc2 210static void be_eq_notify(struct be_adapter *adapter, u16 qid,
748b539a 211 bool arm, bool clear_int, u16 num_popped)
6b7c5b94
SP
212{
213 u32 val = 0;
03d28ffe 214
6b7c5b94 215 val |= qid & DB_EQ_RING_ID_MASK;
748b539a 216 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) << DB_EQ_RING_ID_EXT_MASK_SHIFT);
cf588477 217
f67ef7ba 218 if (adapter->eeh_error)
cf588477
SP
219 return;
220
6b7c5b94
SP
221 if (arm)
222 val |= 1 << DB_EQ_REARM_SHIFT;
223 if (clear_int)
224 val |= 1 << DB_EQ_CLR_SHIFT;
225 val |= 1 << DB_EQ_EVNT_SHIFT;
226 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
8788fdc2 227 iowrite32(val, adapter->db + DB_EQ_OFFSET);
6b7c5b94
SP
228}
229
8788fdc2 230void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
6b7c5b94
SP
231{
232 u32 val = 0;
03d28ffe 233
6b7c5b94 234 val |= qid & DB_CQ_RING_ID_MASK;
fe6d2a38
SP
235 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
236 DB_CQ_RING_ID_EXT_MASK_SHIFT);
cf588477 237
f67ef7ba 238 if (adapter->eeh_error)
cf588477
SP
239 return;
240
6b7c5b94
SP
241 if (arm)
242 val |= 1 << DB_CQ_REARM_SHIFT;
243 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
8788fdc2 244 iowrite32(val, adapter->db + DB_CQ_OFFSET);
6b7c5b94
SP
245}
246
6b7c5b94
SP
247static int be_mac_addr_set(struct net_device *netdev, void *p)
248{
249 struct be_adapter *adapter = netdev_priv(netdev);
5a712c13 250 struct device *dev = &adapter->pdev->dev;
6b7c5b94 251 struct sockaddr *addr = p;
5a712c13
SP
252 int status;
253 u8 mac[ETH_ALEN];
254 u32 old_pmac_id = adapter->pmac_id[0], curr_pmac_id = 0;
6b7c5b94 255
ca9e4988
AK
256 if (!is_valid_ether_addr(addr->sa_data))
257 return -EADDRNOTAVAIL;
258
ff32f8ab
VV
259 /* Proceed further only if, User provided MAC is different
260 * from active MAC
261 */
262 if (ether_addr_equal(addr->sa_data, netdev->dev_addr))
263 return 0;
264
5a712c13
SP
265 /* The PMAC_ADD cmd may fail if the VF doesn't have FILTMGMT
266 * privilege or if PF did not provision the new MAC address.
267 * On BE3, this cmd will always fail if the VF doesn't have the
268 * FILTMGMT privilege. This failure is OK, only if the PF programmed
269 * the MAC for the VF.
704e4c88 270 */
5a712c13
SP
271 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
272 adapter->if_handle, &adapter->pmac_id[0], 0);
273 if (!status) {
274 curr_pmac_id = adapter->pmac_id[0];
275
276 /* Delete the old programmed MAC. This call may fail if the
277 * old MAC was already deleted by the PF driver.
278 */
279 if (adapter->pmac_id[0] != old_pmac_id)
280 be_cmd_pmac_del(adapter, adapter->if_handle,
281 old_pmac_id, 0);
704e4c88
PR
282 }
283
5a712c13
SP
284 /* Decide if the new MAC is successfully activated only after
285 * querying the FW
704e4c88 286 */
b188f090
SR
287 status = be_cmd_get_active_mac(adapter, curr_pmac_id, mac,
288 adapter->if_handle, true, 0);
a65027e4 289 if (status)
e3a7ae2c 290 goto err;
6b7c5b94 291
5a712c13
SP
292 /* The MAC change did not happen, either due to lack of privilege
293 * or PF didn't pre-provision.
294 */
61d23e9f 295 if (!ether_addr_equal(addr->sa_data, mac)) {
5a712c13
SP
296 status = -EPERM;
297 goto err;
298 }
299
e3a7ae2c 300 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
5a712c13 301 dev_info(dev, "MAC address changed to %pM\n", mac);
e3a7ae2c
SK
302 return 0;
303err:
5a712c13 304 dev_warn(dev, "MAC address change to %pM failed\n", addr->sa_data);
6b7c5b94
SP
305 return status;
306}
307
ca34fe38
SP
308/* BE2 supports only v0 cmd */
309static void *hw_stats_from_cmd(struct be_adapter *adapter)
310{
311 if (BE2_chip(adapter)) {
312 struct be_cmd_resp_get_stats_v0 *cmd = adapter->stats_cmd.va;
313
314 return &cmd->hw_stats;
61000861 315 } else if (BE3_chip(adapter)) {
ca34fe38
SP
316 struct be_cmd_resp_get_stats_v1 *cmd = adapter->stats_cmd.va;
317
61000861
AK
318 return &cmd->hw_stats;
319 } else {
320 struct be_cmd_resp_get_stats_v2 *cmd = adapter->stats_cmd.va;
321
ca34fe38
SP
322 return &cmd->hw_stats;
323 }
324}
325
326/* BE2 supports only v0 cmd */
327static void *be_erx_stats_from_cmd(struct be_adapter *adapter)
328{
329 if (BE2_chip(adapter)) {
330 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
331
332 return &hw_stats->erx;
61000861 333 } else if (BE3_chip(adapter)) {
ca34fe38
SP
334 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
335
61000861
AK
336 return &hw_stats->erx;
337 } else {
338 struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
339
ca34fe38
SP
340 return &hw_stats->erx;
341 }
342}
343
344static void populate_be_v0_stats(struct be_adapter *adapter)
89a88ab8 345{
ac124ff9
SP
346 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
347 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
348 struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
89a88ab8 349 struct be_port_rxf_stats_v0 *port_stats =
ac124ff9
SP
350 &rxf_stats->port[adapter->port_num];
351 struct be_drv_stats *drvs = &adapter->drv_stats;
89a88ab8 352
ac124ff9 353 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
89a88ab8
AK
354 drvs->rx_pause_frames = port_stats->rx_pause_frames;
355 drvs->rx_crc_errors = port_stats->rx_crc_errors;
356 drvs->rx_control_frames = port_stats->rx_control_frames;
357 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
358 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
359 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
360 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
361 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
362 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
363 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
364 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
365 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
366 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
367 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
ac124ff9 368 drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
89a88ab8
AK
369 drvs->rx_dropped_header_too_small =
370 port_stats->rx_dropped_header_too_small;
18fb06a1
SR
371 drvs->rx_address_filtered =
372 port_stats->rx_address_filtered +
373 port_stats->rx_vlan_filtered;
89a88ab8
AK
374 drvs->rx_alignment_symbol_errors =
375 port_stats->rx_alignment_symbol_errors;
376
377 drvs->tx_pauseframes = port_stats->tx_pauseframes;
378 drvs->tx_controlframes = port_stats->tx_controlframes;
379
380 if (adapter->port_num)
ac124ff9 381 drvs->jabber_events = rxf_stats->port1_jabber_events;
89a88ab8 382 else
ac124ff9 383 drvs->jabber_events = rxf_stats->port0_jabber_events;
89a88ab8 384 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
89a88ab8 385 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
89a88ab8
AK
386 drvs->forwarded_packets = rxf_stats->forwarded_packets;
387 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
ac124ff9
SP
388 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
389 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
89a88ab8
AK
390 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
391}
392
ca34fe38 393static void populate_be_v1_stats(struct be_adapter *adapter)
89a88ab8 394{
ac124ff9
SP
395 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
396 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
397 struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
89a88ab8 398 struct be_port_rxf_stats_v1 *port_stats =
ac124ff9
SP
399 &rxf_stats->port[adapter->port_num];
400 struct be_drv_stats *drvs = &adapter->drv_stats;
89a88ab8 401
ac124ff9 402 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
02fe7027
AK
403 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
404 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
89a88ab8
AK
405 drvs->rx_pause_frames = port_stats->rx_pause_frames;
406 drvs->rx_crc_errors = port_stats->rx_crc_errors;
407 drvs->rx_control_frames = port_stats->rx_control_frames;
408 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
409 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
410 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
411 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
412 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
413 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
414 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
415 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
416 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
417 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
418 drvs->rx_dropped_header_too_small =
419 port_stats->rx_dropped_header_too_small;
420 drvs->rx_input_fifo_overflow_drop =
421 port_stats->rx_input_fifo_overflow_drop;
18fb06a1 422 drvs->rx_address_filtered = port_stats->rx_address_filtered;
89a88ab8
AK
423 drvs->rx_alignment_symbol_errors =
424 port_stats->rx_alignment_symbol_errors;
ac124ff9 425 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
89a88ab8
AK
426 drvs->tx_pauseframes = port_stats->tx_pauseframes;
427 drvs->tx_controlframes = port_stats->tx_controlframes;
b5adffc4 428 drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
89a88ab8
AK
429 drvs->jabber_events = port_stats->jabber_events;
430 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
89a88ab8 431 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
89a88ab8
AK
432 drvs->forwarded_packets = rxf_stats->forwarded_packets;
433 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
ac124ff9
SP
434 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
435 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
89a88ab8
AK
436 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
437}
438
61000861
AK
439static void populate_be_v2_stats(struct be_adapter *adapter)
440{
441 struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
442 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
443 struct be_rxf_stats_v2 *rxf_stats = &hw_stats->rxf;
444 struct be_port_rxf_stats_v2 *port_stats =
445 &rxf_stats->port[adapter->port_num];
446 struct be_drv_stats *drvs = &adapter->drv_stats;
447
448 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
449 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
450 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
451 drvs->rx_pause_frames = port_stats->rx_pause_frames;
452 drvs->rx_crc_errors = port_stats->rx_crc_errors;
453 drvs->rx_control_frames = port_stats->rx_control_frames;
454 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
455 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
456 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
457 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
458 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
459 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
460 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
461 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
462 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
463 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
464 drvs->rx_dropped_header_too_small =
465 port_stats->rx_dropped_header_too_small;
466 drvs->rx_input_fifo_overflow_drop =
467 port_stats->rx_input_fifo_overflow_drop;
468 drvs->rx_address_filtered = port_stats->rx_address_filtered;
469 drvs->rx_alignment_symbol_errors =
470 port_stats->rx_alignment_symbol_errors;
471 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
472 drvs->tx_pauseframes = port_stats->tx_pauseframes;
473 drvs->tx_controlframes = port_stats->tx_controlframes;
474 drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
475 drvs->jabber_events = port_stats->jabber_events;
476 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
477 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
478 drvs->forwarded_packets = rxf_stats->forwarded_packets;
479 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
480 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
481 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
482 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
748b539a 483 if (be_roce_supported(adapter)) {
461ae379
AK
484 drvs->rx_roce_bytes_lsd = port_stats->roce_bytes_received_lsd;
485 drvs->rx_roce_bytes_msd = port_stats->roce_bytes_received_msd;
486 drvs->rx_roce_frames = port_stats->roce_frames_received;
487 drvs->roce_drops_crc = port_stats->roce_drops_crc;
488 drvs->roce_drops_payload_len =
489 port_stats->roce_drops_payload_len;
490 }
61000861
AK
491}
492
005d5696
SX
493static void populate_lancer_stats(struct be_adapter *adapter)
494{
005d5696 495 struct be_drv_stats *drvs = &adapter->drv_stats;
748b539a 496 struct lancer_pport_stats *pport_stats = pport_stats_from_cmd(adapter);
ac124ff9
SP
497
498 be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
499 drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
500 drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
501 drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
005d5696 502 drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
ac124ff9 503 drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
005d5696
SX
504 drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
505 drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
506 drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
507 drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
508 drvs->rx_dropped_tcp_length =
509 pport_stats->rx_dropped_invalid_tcp_length;
510 drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
511 drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
512 drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
513 drvs->rx_dropped_header_too_small =
514 pport_stats->rx_dropped_header_too_small;
515 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
18fb06a1
SR
516 drvs->rx_address_filtered =
517 pport_stats->rx_address_filtered +
518 pport_stats->rx_vlan_filtered;
ac124ff9 519 drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
005d5696 520 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
ac124ff9
SP
521 drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
522 drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
005d5696 523 drvs->jabber_events = pport_stats->rx_jabbers;
ac124ff9
SP
524 drvs->forwarded_packets = pport_stats->num_forwards_lo;
525 drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
005d5696 526 drvs->rx_drops_too_many_frags =
ac124ff9 527 pport_stats->rx_drops_too_many_frags_lo;
005d5696 528}
89a88ab8 529
09c1c68f
SP
530static void accumulate_16bit_val(u32 *acc, u16 val)
531{
532#define lo(x) (x & 0xFFFF)
533#define hi(x) (x & 0xFFFF0000)
534 bool wrapped = val < lo(*acc);
535 u32 newacc = hi(*acc) + val;
536
537 if (wrapped)
538 newacc += 65536;
539 ACCESS_ONCE(*acc) = newacc;
540}
541
4188e7df 542static void populate_erx_stats(struct be_adapter *adapter,
748b539a 543 struct be_rx_obj *rxo, u32 erx_stat)
a6c578ef
AK
544{
545 if (!BEx_chip(adapter))
546 rx_stats(rxo)->rx_drops_no_frags = erx_stat;
547 else
548 /* below erx HW counter can actually wrap around after
549 * 65535. Driver accumulates a 32-bit value
550 */
551 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
552 (u16)erx_stat);
553}
554
89a88ab8
AK
555void be_parse_stats(struct be_adapter *adapter)
556{
61000861 557 struct be_erx_stats_v2 *erx = be_erx_stats_from_cmd(adapter);
ac124ff9
SP
558 struct be_rx_obj *rxo;
559 int i;
a6c578ef 560 u32 erx_stat;
ac124ff9 561
ca34fe38
SP
562 if (lancer_chip(adapter)) {
563 populate_lancer_stats(adapter);
005d5696 564 } else {
ca34fe38
SP
565 if (BE2_chip(adapter))
566 populate_be_v0_stats(adapter);
61000861
AK
567 else if (BE3_chip(adapter))
568 /* for BE3 */
ca34fe38 569 populate_be_v1_stats(adapter);
61000861
AK
570 else
571 populate_be_v2_stats(adapter);
d51ebd33 572
61000861 573 /* erx_v2 is longer than v0, v1. use v2 for v0, v1 access */
ca34fe38 574 for_all_rx_queues(adapter, rxo, i) {
a6c578ef
AK
575 erx_stat = erx->rx_drops_no_fragments[rxo->q.id];
576 populate_erx_stats(adapter, rxo, erx_stat);
ca34fe38 577 }
09c1c68f 578 }
89a88ab8
AK
579}
580
ab1594e9 581static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
748b539a 582 struct rtnl_link_stats64 *stats)
6b7c5b94 583{
ab1594e9 584 struct be_adapter *adapter = netdev_priv(netdev);
89a88ab8 585 struct be_drv_stats *drvs = &adapter->drv_stats;
3abcdeda 586 struct be_rx_obj *rxo;
3c8def97 587 struct be_tx_obj *txo;
ab1594e9
SP
588 u64 pkts, bytes;
589 unsigned int start;
3abcdeda 590 int i;
6b7c5b94 591
3abcdeda 592 for_all_rx_queues(adapter, rxo, i) {
ab1594e9 593 const struct be_rx_stats *rx_stats = rx_stats(rxo);
03d28ffe 594
ab1594e9 595 do {
57a7744e 596 start = u64_stats_fetch_begin_irq(&rx_stats->sync);
ab1594e9
SP
597 pkts = rx_stats(rxo)->rx_pkts;
598 bytes = rx_stats(rxo)->rx_bytes;
57a7744e 599 } while (u64_stats_fetch_retry_irq(&rx_stats->sync, start));
ab1594e9
SP
600 stats->rx_packets += pkts;
601 stats->rx_bytes += bytes;
602 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
603 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
604 rx_stats(rxo)->rx_drops_no_frags;
3abcdeda
SP
605 }
606
3c8def97 607 for_all_tx_queues(adapter, txo, i) {
ab1594e9 608 const struct be_tx_stats *tx_stats = tx_stats(txo);
03d28ffe 609
ab1594e9 610 do {
57a7744e 611 start = u64_stats_fetch_begin_irq(&tx_stats->sync);
ab1594e9
SP
612 pkts = tx_stats(txo)->tx_pkts;
613 bytes = tx_stats(txo)->tx_bytes;
57a7744e 614 } while (u64_stats_fetch_retry_irq(&tx_stats->sync, start));
ab1594e9
SP
615 stats->tx_packets += pkts;
616 stats->tx_bytes += bytes;
3c8def97 617 }
6b7c5b94
SP
618
619 /* bad pkts received */
ab1594e9 620 stats->rx_errors = drvs->rx_crc_errors +
89a88ab8
AK
621 drvs->rx_alignment_symbol_errors +
622 drvs->rx_in_range_errors +
623 drvs->rx_out_range_errors +
624 drvs->rx_frame_too_long +
625 drvs->rx_dropped_too_small +
626 drvs->rx_dropped_too_short +
627 drvs->rx_dropped_header_too_small +
628 drvs->rx_dropped_tcp_length +
ab1594e9 629 drvs->rx_dropped_runt;
68110868 630
6b7c5b94 631 /* detailed rx errors */
ab1594e9 632 stats->rx_length_errors = drvs->rx_in_range_errors +
89a88ab8
AK
633 drvs->rx_out_range_errors +
634 drvs->rx_frame_too_long;
68110868 635
ab1594e9 636 stats->rx_crc_errors = drvs->rx_crc_errors;
6b7c5b94
SP
637
638 /* frame alignment errors */
ab1594e9 639 stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
68110868 640
6b7c5b94
SP
641 /* receiver fifo overrun */
642 /* drops_no_pbuf is no per i/f, it's per BE card */
ab1594e9 643 stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
89a88ab8
AK
644 drvs->rx_input_fifo_overflow_drop +
645 drvs->rx_drops_no_pbuf;
ab1594e9 646 return stats;
6b7c5b94
SP
647}
648
b236916a 649void be_link_status_update(struct be_adapter *adapter, u8 link_status)
6b7c5b94 650{
6b7c5b94
SP
651 struct net_device *netdev = adapter->netdev;
652
b236916a 653 if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
ea172a01 654 netif_carrier_off(netdev);
b236916a 655 adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
6b7c5b94 656 }
b236916a 657
bdce2ad7 658 if (link_status)
b236916a
AK
659 netif_carrier_on(netdev);
660 else
661 netif_carrier_off(netdev);
6b7c5b94
SP
662}
663
5f07b3c5 664static void be_tx_stats_update(struct be_tx_obj *txo, struct sk_buff *skb)
6b7c5b94 665{
3c8def97
SP
666 struct be_tx_stats *stats = tx_stats(txo);
667
ab1594e9 668 u64_stats_update_begin(&stats->sync);
ac124ff9 669 stats->tx_reqs++;
5f07b3c5
SP
670 stats->tx_bytes += skb->len;
671 stats->tx_pkts += (skb_shinfo(skb)->gso_segs ? : 1);
ab1594e9 672 u64_stats_update_end(&stats->sync);
6b7c5b94
SP
673}
674
5f07b3c5
SP
675/* Returns number of WRBs needed for the skb */
676static u32 skb_wrb_cnt(struct sk_buff *skb)
6b7c5b94 677{
5f07b3c5
SP
678 /* +1 for the header wrb */
679 return 1 + (skb_headlen(skb) ? 1 : 0) + skb_shinfo(skb)->nr_frags;
6b7c5b94
SP
680}
681
682static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
683{
f986afcb
SP
684 wrb->frag_pa_hi = cpu_to_le32(upper_32_bits(addr));
685 wrb->frag_pa_lo = cpu_to_le32(lower_32_bits(addr));
686 wrb->frag_len = cpu_to_le32(len & ETH_WRB_FRAG_LEN_MASK);
687 wrb->rsvd0 = 0;
688}
689
690/* A dummy wrb is just all zeros. Using a separate routine for dummy-wrb
691 * to avoid the swap and shift/mask operations in wrb_fill().
692 */
693static inline void wrb_fill_dummy(struct be_eth_wrb *wrb)
694{
695 wrb->frag_pa_hi = 0;
696 wrb->frag_pa_lo = 0;
697 wrb->frag_len = 0;
89b1f496 698 wrb->rsvd0 = 0;
6b7c5b94
SP
699}
700
1ded132d 701static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
748b539a 702 struct sk_buff *skb)
1ded132d
AK
703{
704 u8 vlan_prio;
705 u16 vlan_tag;
706
df8a39de 707 vlan_tag = skb_vlan_tag_get(skb);
1ded132d
AK
708 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
709 /* If vlan priority provided by OS is NOT in available bmap */
710 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
711 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
712 adapter->recommended_prio;
713
714 return vlan_tag;
715}
716
c9c47142
SP
717/* Used only for IP tunnel packets */
718static u16 skb_inner_ip_proto(struct sk_buff *skb)
719{
720 return (inner_ip_hdr(skb)->version == 4) ?
721 inner_ip_hdr(skb)->protocol : inner_ipv6_hdr(skb)->nexthdr;
722}
723
724static u16 skb_ip_proto(struct sk_buff *skb)
725{
726 return (ip_hdr(skb)->version == 4) ?
727 ip_hdr(skb)->protocol : ipv6_hdr(skb)->nexthdr;
728}
729
cf5671e6
SB
730static inline bool be_is_txq_full(struct be_tx_obj *txo)
731{
732 return atomic_read(&txo->q.used) + BE_MAX_TX_FRAG_COUNT >= txo->q.len;
733}
734
735static inline bool be_can_txq_wake(struct be_tx_obj *txo)
736{
737 return atomic_read(&txo->q.used) < txo->q.len / 2;
738}
739
740static inline bool be_is_tx_compl_pending(struct be_tx_obj *txo)
741{
742 return atomic_read(&txo->q.used) > txo->pend_wrb_cnt;
743}
744
804abcdb
SB
745static void be_get_wrb_params_from_skb(struct be_adapter *adapter,
746 struct sk_buff *skb,
747 struct be_wrb_params *wrb_params)
6b7c5b94 748{
804abcdb 749 u16 proto;
6b7c5b94 750
49e4b847 751 if (skb_is_gso(skb)) {
804abcdb
SB
752 BE_WRB_F_SET(wrb_params->features, LSO, 1);
753 wrb_params->lso_mss = skb_shinfo(skb)->gso_size;
fe6d2a38 754 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
804abcdb 755 BE_WRB_F_SET(wrb_params->features, LSO6, 1);
6b7c5b94 756 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
c9c47142 757 if (skb->encapsulation) {
804abcdb 758 BE_WRB_F_SET(wrb_params->features, IPCS, 1);
c9c47142
SP
759 proto = skb_inner_ip_proto(skb);
760 } else {
761 proto = skb_ip_proto(skb);
762 }
763 if (proto == IPPROTO_TCP)
804abcdb 764 BE_WRB_F_SET(wrb_params->features, TCPCS, 1);
c9c47142 765 else if (proto == IPPROTO_UDP)
804abcdb 766 BE_WRB_F_SET(wrb_params->features, UDPCS, 1);
6b7c5b94
SP
767 }
768
df8a39de 769 if (skb_vlan_tag_present(skb)) {
804abcdb
SB
770 BE_WRB_F_SET(wrb_params->features, VLAN, 1);
771 wrb_params->vlan_tag = be_get_tx_vlan_tag(adapter, skb);
6b7c5b94
SP
772 }
773
804abcdb
SB
774 BE_WRB_F_SET(wrb_params->features, CRC, 1);
775}
5f07b3c5 776
804abcdb
SB
777static void wrb_fill_hdr(struct be_adapter *adapter,
778 struct be_eth_hdr_wrb *hdr,
779 struct be_wrb_params *wrb_params,
780 struct sk_buff *skb)
781{
782 memset(hdr, 0, sizeof(*hdr));
783
784 SET_TX_WRB_HDR_BITS(crc, hdr,
785 BE_WRB_F_GET(wrb_params->features, CRC));
786 SET_TX_WRB_HDR_BITS(ipcs, hdr,
787 BE_WRB_F_GET(wrb_params->features, IPCS));
788 SET_TX_WRB_HDR_BITS(tcpcs, hdr,
789 BE_WRB_F_GET(wrb_params->features, TCPCS));
790 SET_TX_WRB_HDR_BITS(udpcs, hdr,
791 BE_WRB_F_GET(wrb_params->features, UDPCS));
792
793 SET_TX_WRB_HDR_BITS(lso, hdr,
794 BE_WRB_F_GET(wrb_params->features, LSO));
795 SET_TX_WRB_HDR_BITS(lso6, hdr,
796 BE_WRB_F_GET(wrb_params->features, LSO6));
797 SET_TX_WRB_HDR_BITS(lso_mss, hdr, wrb_params->lso_mss);
798
799 /* Hack to skip HW VLAN tagging needs evt = 1, compl = 0. When this
800 * hack is not needed, the evt bit is set while ringing DB.
5f07b3c5 801 */
804abcdb
SB
802 SET_TX_WRB_HDR_BITS(event, hdr,
803 BE_WRB_F_GET(wrb_params->features, VLAN_SKIP_HW));
804 SET_TX_WRB_HDR_BITS(vlan, hdr,
805 BE_WRB_F_GET(wrb_params->features, VLAN));
806 SET_TX_WRB_HDR_BITS(vlan_tag, hdr, wrb_params->vlan_tag);
807
808 SET_TX_WRB_HDR_BITS(num_wrb, hdr, skb_wrb_cnt(skb));
809 SET_TX_WRB_HDR_BITS(len, hdr, skb->len);
6b7c5b94
SP
810}
811
2b7bcebf 812static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
748b539a 813 bool unmap_single)
7101e111
SP
814{
815 dma_addr_t dma;
f986afcb 816 u32 frag_len = le32_to_cpu(wrb->frag_len);
7101e111 817
7101e111 818
f986afcb
SP
819 dma = (u64)le32_to_cpu(wrb->frag_pa_hi) << 32 |
820 (u64)le32_to_cpu(wrb->frag_pa_lo);
821 if (frag_len) {
7101e111 822 if (unmap_single)
f986afcb 823 dma_unmap_single(dev, dma, frag_len, DMA_TO_DEVICE);
7101e111 824 else
f986afcb 825 dma_unmap_page(dev, dma, frag_len, DMA_TO_DEVICE);
7101e111
SP
826 }
827}
6b7c5b94 828
79a0d7d8
SB
829/* Grab a WRB header for xmit */
830static u16 be_tx_get_wrb_hdr(struct be_tx_obj *txo)
831{
832 u16 head = txo->q.head;
833
834 queue_head_inc(&txo->q);
835 return head;
836}
837
838/* Set up the WRB header for xmit */
839static void be_tx_setup_wrb_hdr(struct be_adapter *adapter,
840 struct be_tx_obj *txo,
841 struct be_wrb_params *wrb_params,
842 struct sk_buff *skb, u16 head)
843{
844 u32 num_frags = skb_wrb_cnt(skb);
845 struct be_queue_info *txq = &txo->q;
846 struct be_eth_hdr_wrb *hdr = queue_index_node(txq, head);
847
848 wrb_fill_hdr(adapter, hdr, wrb_params, skb);
849 be_dws_cpu_to_le(hdr, sizeof(*hdr));
850
851 BUG_ON(txo->sent_skb_list[head]);
852 txo->sent_skb_list[head] = skb;
853 txo->last_req_hdr = head;
854 atomic_add(num_frags, &txq->used);
855 txo->last_req_wrb_cnt = num_frags;
856 txo->pend_wrb_cnt += num_frags;
857}
858
859/* Setup a WRB fragment (buffer descriptor) for xmit */
860static void be_tx_setup_wrb_frag(struct be_tx_obj *txo, dma_addr_t busaddr,
861 int len)
862{
863 struct be_eth_wrb *wrb;
864 struct be_queue_info *txq = &txo->q;
865
866 wrb = queue_head_node(txq);
867 wrb_fill(wrb, busaddr, len);
868 queue_head_inc(txq);
869}
870
871/* Bring the queue back to the state it was in before be_xmit_enqueue() routine
872 * was invoked. The producer index is restored to the previous packet and the
873 * WRBs of the current packet are unmapped. Invoked to handle tx setup errors.
874 */
875static void be_xmit_restore(struct be_adapter *adapter,
876 struct be_tx_obj *txo, u16 head, bool map_single,
877 u32 copied)
878{
879 struct device *dev;
880 struct be_eth_wrb *wrb;
881 struct be_queue_info *txq = &txo->q;
882
883 dev = &adapter->pdev->dev;
884 txq->head = head;
885
886 /* skip the first wrb (hdr); it's not mapped */
887 queue_head_inc(txq);
888 while (copied) {
889 wrb = queue_head_node(txq);
890 unmap_tx_frag(dev, wrb, map_single);
891 map_single = false;
892 copied -= le32_to_cpu(wrb->frag_len);
893 queue_head_inc(txq);
894 }
895
896 txq->head = head;
897}
898
899/* Enqueue the given packet for transmit. This routine allocates WRBs for the
900 * packet, dma maps the packet buffers and sets up the WRBs. Returns the number
901 * of WRBs used up by the packet.
902 */
5f07b3c5 903static u32 be_xmit_enqueue(struct be_adapter *adapter, struct be_tx_obj *txo,
804abcdb
SB
904 struct sk_buff *skb,
905 struct be_wrb_params *wrb_params)
6b7c5b94 906{
5f07b3c5 907 u32 i, copied = 0, wrb_cnt = skb_wrb_cnt(skb);
2b7bcebf 908 struct device *dev = &adapter->pdev->dev;
5f07b3c5 909 struct be_queue_info *txq = &txo->q;
7101e111 910 bool map_single = false;
5f07b3c5 911 u16 head = txq->head;
79a0d7d8
SB
912 dma_addr_t busaddr;
913 int len;
6b7c5b94 914
79a0d7d8 915 head = be_tx_get_wrb_hdr(txo);
6b7c5b94 916
ebc8d2ab 917 if (skb->len > skb->data_len) {
79a0d7d8 918 len = skb_headlen(skb);
03d28ffe 919
2b7bcebf
IV
920 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
921 if (dma_mapping_error(dev, busaddr))
7101e111
SP
922 goto dma_err;
923 map_single = true;
79a0d7d8 924 be_tx_setup_wrb_frag(txo, busaddr, len);
ebc8d2ab
DM
925 copied += len;
926 }
6b7c5b94 927
ebc8d2ab 928 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
748b539a 929 const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
79a0d7d8 930 len = skb_frag_size(frag);
03d28ffe 931
79a0d7d8 932 busaddr = skb_frag_dma_map(dev, frag, 0, len, DMA_TO_DEVICE);
2b7bcebf 933 if (dma_mapping_error(dev, busaddr))
7101e111 934 goto dma_err;
79a0d7d8
SB
935 be_tx_setup_wrb_frag(txo, busaddr, len);
936 copied += len;
6b7c5b94
SP
937 }
938
79a0d7d8 939 be_tx_setup_wrb_hdr(adapter, txo, wrb_params, skb, head);
6b7c5b94 940
5f07b3c5
SP
941 be_tx_stats_update(txo, skb);
942 return wrb_cnt;
6b7c5b94 943
7101e111 944dma_err:
79a0d7d8
SB
945 adapter->drv_stats.dma_map_errors++;
946 be_xmit_restore(adapter, txo, head, map_single, copied);
7101e111 947 return 0;
6b7c5b94
SP
948}
949
f7062ee5
SP
950static inline int qnq_async_evt_rcvd(struct be_adapter *adapter)
951{
952 return adapter->flags & BE_FLAGS_QNQ_ASYNC_EVT_RCVD;
953}
954
93040ae5 955static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
bc0c3405 956 struct sk_buff *skb,
804abcdb
SB
957 struct be_wrb_params
958 *wrb_params)
93040ae5
SK
959{
960 u16 vlan_tag = 0;
961
962 skb = skb_share_check(skb, GFP_ATOMIC);
963 if (unlikely(!skb))
964 return skb;
965
df8a39de 966 if (skb_vlan_tag_present(skb))
93040ae5 967 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
52fe29e4
SB
968
969 if (qnq_async_evt_rcvd(adapter) && adapter->pvid) {
970 if (!vlan_tag)
971 vlan_tag = adapter->pvid;
972 /* f/w workaround to set skip_hw_vlan = 1, informs the F/W to
973 * skip VLAN insertion
974 */
804abcdb 975 BE_WRB_F_SET(wrb_params->features, VLAN_SKIP_HW, 1);
52fe29e4 976 }
bc0c3405
AK
977
978 if (vlan_tag) {
62749e2c
JP
979 skb = vlan_insert_tag_set_proto(skb, htons(ETH_P_8021Q),
980 vlan_tag);
bc0c3405
AK
981 if (unlikely(!skb))
982 return skb;
bc0c3405
AK
983 skb->vlan_tci = 0;
984 }
985
986 /* Insert the outer VLAN, if any */
987 if (adapter->qnq_vid) {
988 vlan_tag = adapter->qnq_vid;
62749e2c
JP
989 skb = vlan_insert_tag_set_proto(skb, htons(ETH_P_8021Q),
990 vlan_tag);
bc0c3405
AK
991 if (unlikely(!skb))
992 return skb;
804abcdb 993 BE_WRB_F_SET(wrb_params->features, VLAN_SKIP_HW, 1);
bc0c3405
AK
994 }
995
93040ae5
SK
996 return skb;
997}
998
bc0c3405
AK
999static bool be_ipv6_exthdr_check(struct sk_buff *skb)
1000{
1001 struct ethhdr *eh = (struct ethhdr *)skb->data;
1002 u16 offset = ETH_HLEN;
1003
1004 if (eh->h_proto == htons(ETH_P_IPV6)) {
1005 struct ipv6hdr *ip6h = (struct ipv6hdr *)(skb->data + offset);
1006
1007 offset += sizeof(struct ipv6hdr);
1008 if (ip6h->nexthdr != NEXTHDR_TCP &&
1009 ip6h->nexthdr != NEXTHDR_UDP) {
1010 struct ipv6_opt_hdr *ehdr =
504fbf1e 1011 (struct ipv6_opt_hdr *)(skb->data + offset);
bc0c3405
AK
1012
1013 /* offending pkt: 2nd byte following IPv6 hdr is 0xff */
1014 if (ehdr->hdrlen == 0xff)
1015 return true;
1016 }
1017 }
1018 return false;
1019}
1020
1021static int be_vlan_tag_tx_chk(struct be_adapter *adapter, struct sk_buff *skb)
1022{
df8a39de 1023 return skb_vlan_tag_present(skb) || adapter->pvid || adapter->qnq_vid;
bc0c3405
AK
1024}
1025
748b539a 1026static int be_ipv6_tx_stall_chk(struct be_adapter *adapter, struct sk_buff *skb)
bc0c3405 1027{
ee9c799c 1028 return BE3_chip(adapter) && be_ipv6_exthdr_check(skb);
bc0c3405
AK
1029}
1030
ec495fac
VV
1031static struct sk_buff *be_lancer_xmit_workarounds(struct be_adapter *adapter,
1032 struct sk_buff *skb,
804abcdb
SB
1033 struct be_wrb_params
1034 *wrb_params)
6b7c5b94 1035{
d2cb6ce7 1036 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
ee9c799c
SP
1037 unsigned int eth_hdr_len;
1038 struct iphdr *ip;
93040ae5 1039
1297f9db
AK
1040 /* For padded packets, BE HW modifies tot_len field in IP header
1041 * incorrecly when VLAN tag is inserted by HW.
3904dcc4 1042 * For padded packets, Lancer computes incorrect checksum.
1ded132d 1043 */
ee9c799c
SP
1044 eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
1045 VLAN_ETH_HLEN : ETH_HLEN;
3904dcc4 1046 if (skb->len <= 60 &&
df8a39de 1047 (lancer_chip(adapter) || skb_vlan_tag_present(skb)) &&
ee9c799c 1048 is_ipv4_pkt(skb)) {
93040ae5
SK
1049 ip = (struct iphdr *)ip_hdr(skb);
1050 pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len));
1051 }
1ded132d 1052
d2cb6ce7 1053 /* If vlan tag is already inlined in the packet, skip HW VLAN
f93f160b 1054 * tagging in pvid-tagging mode
d2cb6ce7 1055 */
f93f160b 1056 if (be_pvid_tagging_enabled(adapter) &&
d2cb6ce7 1057 veh->h_vlan_proto == htons(ETH_P_8021Q))
804abcdb 1058 BE_WRB_F_SET(wrb_params->features, VLAN_SKIP_HW, 1);
d2cb6ce7 1059
93040ae5
SK
1060 /* HW has a bug wherein it will calculate CSUM for VLAN
1061 * pkts even though it is disabled.
1062 * Manually insert VLAN in pkt.
1063 */
1064 if (skb->ip_summed != CHECKSUM_PARTIAL &&
df8a39de 1065 skb_vlan_tag_present(skb)) {
804abcdb 1066 skb = be_insert_vlan_in_pkt(adapter, skb, wrb_params);
bc0c3405 1067 if (unlikely(!skb))
c9128951 1068 goto err;
bc0c3405
AK
1069 }
1070
1071 /* HW may lockup when VLAN HW tagging is requested on
1072 * certain ipv6 packets. Drop such pkts if the HW workaround to
1073 * skip HW tagging is not enabled by FW.
1074 */
1075 if (unlikely(be_ipv6_tx_stall_chk(adapter, skb) &&
cd3307aa
KA
1076 (adapter->pvid || adapter->qnq_vid) &&
1077 !qnq_async_evt_rcvd(adapter)))
bc0c3405
AK
1078 goto tx_drop;
1079
1080 /* Manual VLAN tag insertion to prevent:
1081 * ASIC lockup when the ASIC inserts VLAN tag into
1082 * certain ipv6 packets. Insert VLAN tags in driver,
1083 * and set event, completion, vlan bits accordingly
1084 * in the Tx WRB.
1085 */
1086 if (be_ipv6_tx_stall_chk(adapter, skb) &&
1087 be_vlan_tag_tx_chk(adapter, skb)) {
804abcdb 1088 skb = be_insert_vlan_in_pkt(adapter, skb, wrb_params);
1ded132d 1089 if (unlikely(!skb))
c9128951 1090 goto err;
1ded132d
AK
1091 }
1092
ee9c799c
SP
1093 return skb;
1094tx_drop:
1095 dev_kfree_skb_any(skb);
c9128951 1096err:
ee9c799c
SP
1097 return NULL;
1098}
1099
ec495fac
VV
1100static struct sk_buff *be_xmit_workarounds(struct be_adapter *adapter,
1101 struct sk_buff *skb,
804abcdb 1102 struct be_wrb_params *wrb_params)
ec495fac
VV
1103{
1104 /* Lancer, SH-R ASICs have a bug wherein Packets that are 32 bytes or
1105 * less may cause a transmit stall on that port. So the work-around is
1106 * to pad short packets (<= 32 bytes) to a 36-byte length.
1107 */
1108 if (unlikely(!BEx_chip(adapter) && skb->len <= 32)) {
74b6939d 1109 if (skb_put_padto(skb, 36))
ec495fac 1110 return NULL;
ec495fac
VV
1111 }
1112
1113 if (BEx_chip(adapter) || lancer_chip(adapter)) {
804abcdb 1114 skb = be_lancer_xmit_workarounds(adapter, skb, wrb_params);
ec495fac
VV
1115 if (!skb)
1116 return NULL;
1117 }
1118
1119 return skb;
1120}
1121
5f07b3c5
SP
1122static void be_xmit_flush(struct be_adapter *adapter, struct be_tx_obj *txo)
1123{
1124 struct be_queue_info *txq = &txo->q;
1125 struct be_eth_hdr_wrb *hdr = queue_index_node(txq, txo->last_req_hdr);
1126
1127 /* Mark the last request eventable if it hasn't been marked already */
1128 if (!(hdr->dw[2] & cpu_to_le32(TX_HDR_WRB_EVT)))
1129 hdr->dw[2] |= cpu_to_le32(TX_HDR_WRB_EVT | TX_HDR_WRB_COMPL);
1130
1131 /* compose a dummy wrb if there are odd set of wrbs to notify */
1132 if (!lancer_chip(adapter) && (txo->pend_wrb_cnt & 1)) {
f986afcb 1133 wrb_fill_dummy(queue_head_node(txq));
5f07b3c5
SP
1134 queue_head_inc(txq);
1135 atomic_inc(&txq->used);
1136 txo->pend_wrb_cnt++;
1137 hdr->dw[2] &= ~cpu_to_le32(TX_HDR_WRB_NUM_MASK <<
1138 TX_HDR_WRB_NUM_SHIFT);
1139 hdr->dw[2] |= cpu_to_le32((txo->last_req_wrb_cnt + 1) <<
1140 TX_HDR_WRB_NUM_SHIFT);
1141 }
1142 be_txq_notify(adapter, txo, txo->pend_wrb_cnt);
1143 txo->pend_wrb_cnt = 0;
1144}
1145
ee9c799c
SP
1146static netdev_tx_t be_xmit(struct sk_buff *skb, struct net_device *netdev)
1147{
1148 struct be_adapter *adapter = netdev_priv(netdev);
5f07b3c5
SP
1149 u16 q_idx = skb_get_queue_mapping(skb);
1150 struct be_tx_obj *txo = &adapter->tx_obj[q_idx];
804abcdb 1151 struct be_wrb_params wrb_params = { 0 };
804abcdb 1152 bool flush = !skb->xmit_more;
5f07b3c5 1153 u16 wrb_cnt;
ee9c799c 1154
804abcdb 1155 skb = be_xmit_workarounds(adapter, skb, &wrb_params);
5f07b3c5
SP
1156 if (unlikely(!skb))
1157 goto drop;
6b7c5b94 1158
804abcdb
SB
1159 be_get_wrb_params_from_skb(adapter, skb, &wrb_params);
1160
1161 wrb_cnt = be_xmit_enqueue(adapter, txo, skb, &wrb_params);
5f07b3c5
SP
1162 if (unlikely(!wrb_cnt)) {
1163 dev_kfree_skb_any(skb);
1164 goto drop;
1165 }
cd8f76c0 1166
cf5671e6 1167 if (be_is_txq_full(txo)) {
5f07b3c5
SP
1168 netif_stop_subqueue(netdev, q_idx);
1169 tx_stats(txo)->tx_stops++;
1170 }
c190e3c8 1171
5f07b3c5
SP
1172 if (flush || __netif_subqueue_stopped(netdev, q_idx))
1173 be_xmit_flush(adapter, txo);
6b7c5b94 1174
5f07b3c5
SP
1175 return NETDEV_TX_OK;
1176drop:
1177 tx_stats(txo)->tx_drv_drops++;
1178 /* Flush the already enqueued tx requests */
1179 if (flush && txo->pend_wrb_cnt)
1180 be_xmit_flush(adapter, txo);
6b7c5b94 1181
6b7c5b94
SP
1182 return NETDEV_TX_OK;
1183}
1184
1185static int be_change_mtu(struct net_device *netdev, int new_mtu)
1186{
1187 struct be_adapter *adapter = netdev_priv(netdev);
0d3f5cce
KA
1188 struct device *dev = &adapter->pdev->dev;
1189
1190 if (new_mtu < BE_MIN_MTU || new_mtu > BE_MAX_MTU) {
1191 dev_info(dev, "MTU must be between %d and %d bytes\n",
1192 BE_MIN_MTU, BE_MAX_MTU);
6b7c5b94
SP
1193 return -EINVAL;
1194 }
0d3f5cce
KA
1195
1196 dev_info(dev, "MTU changed from %d to %d bytes\n",
748b539a 1197 netdev->mtu, new_mtu);
6b7c5b94
SP
1198 netdev->mtu = new_mtu;
1199 return 0;
1200}
1201
f66b7cfd
SP
1202static inline bool be_in_all_promisc(struct be_adapter *adapter)
1203{
1204 return (adapter->if_flags & BE_IF_FLAGS_ALL_PROMISCUOUS) ==
1205 BE_IF_FLAGS_ALL_PROMISCUOUS;
1206}
1207
1208static int be_set_vlan_promisc(struct be_adapter *adapter)
1209{
1210 struct device *dev = &adapter->pdev->dev;
1211 int status;
1212
1213 if (adapter->if_flags & BE_IF_FLAGS_VLAN_PROMISCUOUS)
1214 return 0;
1215
1216 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_VLAN_PROMISCUOUS, ON);
1217 if (!status) {
1218 dev_info(dev, "Enabled VLAN promiscuous mode\n");
1219 adapter->if_flags |= BE_IF_FLAGS_VLAN_PROMISCUOUS;
1220 } else {
1221 dev_err(dev, "Failed to enable VLAN promiscuous mode\n");
1222 }
1223 return status;
1224}
1225
1226static int be_clear_vlan_promisc(struct be_adapter *adapter)
1227{
1228 struct device *dev = &adapter->pdev->dev;
1229 int status;
1230
1231 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_VLAN_PROMISCUOUS, OFF);
1232 if (!status) {
1233 dev_info(dev, "Disabling VLAN promiscuous mode\n");
1234 adapter->if_flags &= ~BE_IF_FLAGS_VLAN_PROMISCUOUS;
1235 }
1236 return status;
1237}
1238
6b7c5b94 1239/*
82903e4b
AK
1240 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
1241 * If the user configures more, place BE in vlan promiscuous mode.
6b7c5b94 1242 */
10329df8 1243static int be_vid_config(struct be_adapter *adapter)
6b7c5b94 1244{
50762667 1245 struct device *dev = &adapter->pdev->dev;
10329df8 1246 u16 vids[BE_NUM_VLANS_SUPPORTED];
f6cbd364 1247 u16 num = 0, i = 0;
82903e4b 1248 int status = 0;
1da87b7f 1249
c0e64ef4 1250 /* No need to further configure vids if in promiscuous mode */
f66b7cfd 1251 if (be_in_all_promisc(adapter))
c0e64ef4
SP
1252 return 0;
1253
92bf14ab 1254 if (adapter->vlans_added > be_max_vlans(adapter))
f66b7cfd 1255 return be_set_vlan_promisc(adapter);
0fc16ebf
PR
1256
1257 /* Construct VLAN Table to give to HW */
f6cbd364
RN
1258 for_each_set_bit(i, adapter->vids, VLAN_N_VID)
1259 vids[num++] = cpu_to_le16(i);
0fc16ebf 1260
4d567d97 1261 status = be_cmd_vlan_config(adapter, adapter->if_handle, vids, num);
0fc16ebf 1262 if (status) {
f66b7cfd 1263 dev_err(dev, "Setting HW VLAN filtering failed\n");
d9d604f8 1264 /* Set to VLAN promisc mode as setting VLAN filter failed */
4c60005f
KA
1265 if (addl_status(status) ==
1266 MCC_ADDL_STATUS_INSUFFICIENT_RESOURCES)
f66b7cfd
SP
1267 return be_set_vlan_promisc(adapter);
1268 } else if (adapter->if_flags & BE_IF_FLAGS_VLAN_PROMISCUOUS) {
1269 status = be_clear_vlan_promisc(adapter);
6b7c5b94 1270 }
0fc16ebf 1271 return status;
6b7c5b94
SP
1272}
1273
80d5c368 1274static int be_vlan_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
6b7c5b94
SP
1275{
1276 struct be_adapter *adapter = netdev_priv(netdev);
80817cbf 1277 int status = 0;
6b7c5b94 1278
a85e9986
PR
1279 /* Packets with VID 0 are always received by Lancer by default */
1280 if (lancer_chip(adapter) && vid == 0)
48291c22
VV
1281 return status;
1282
f6cbd364 1283 if (test_bit(vid, adapter->vids))
48291c22 1284 return status;
a85e9986 1285
f6cbd364 1286 set_bit(vid, adapter->vids);
a6b74e01 1287 adapter->vlans_added++;
8e586137 1288
a6b74e01
SK
1289 status = be_vid_config(adapter);
1290 if (status) {
1291 adapter->vlans_added--;
f6cbd364 1292 clear_bit(vid, adapter->vids);
a6b74e01 1293 }
48291c22 1294
80817cbf 1295 return status;
6b7c5b94
SP
1296}
1297
80d5c368 1298static int be_vlan_rem_vid(struct net_device *netdev, __be16 proto, u16 vid)
6b7c5b94
SP
1299{
1300 struct be_adapter *adapter = netdev_priv(netdev);
1301
a85e9986
PR
1302 /* Packets with VID 0 are always received by Lancer by default */
1303 if (lancer_chip(adapter) && vid == 0)
9d4dfe4a 1304 return 0;
a85e9986 1305
f6cbd364 1306 clear_bit(vid, adapter->vids);
9d4dfe4a
KA
1307 adapter->vlans_added--;
1308
1309 return be_vid_config(adapter);
6b7c5b94
SP
1310}
1311
f66b7cfd 1312static void be_clear_all_promisc(struct be_adapter *adapter)
7ad09458 1313{
ac34b743 1314 be_cmd_rx_filter(adapter, BE_IF_FLAGS_ALL_PROMISCUOUS, OFF);
f66b7cfd 1315 adapter->if_flags &= ~BE_IF_FLAGS_ALL_PROMISCUOUS;
7ad09458
S
1316}
1317
f66b7cfd
SP
1318static void be_set_all_promisc(struct be_adapter *adapter)
1319{
1320 be_cmd_rx_filter(adapter, BE_IF_FLAGS_ALL_PROMISCUOUS, ON);
1321 adapter->if_flags |= BE_IF_FLAGS_ALL_PROMISCUOUS;
1322}
1323
1324static void be_set_mc_promisc(struct be_adapter *adapter)
6b7c5b94 1325{
0fc16ebf 1326 int status;
6b7c5b94 1327
f66b7cfd
SP
1328 if (adapter->if_flags & BE_IF_FLAGS_MCAST_PROMISCUOUS)
1329 return;
6b7c5b94 1330
f66b7cfd
SP
1331 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_MCAST_PROMISCUOUS, ON);
1332 if (!status)
1333 adapter->if_flags |= BE_IF_FLAGS_MCAST_PROMISCUOUS;
1334}
1335
1336static void be_set_mc_list(struct be_adapter *adapter)
1337{
1338 int status;
1339
1340 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_MULTICAST, ON);
1341 if (!status)
1342 adapter->if_flags &= ~BE_IF_FLAGS_MCAST_PROMISCUOUS;
1343 else
1344 be_set_mc_promisc(adapter);
1345}
1346
1347static void be_set_uc_list(struct be_adapter *adapter)
1348{
1349 struct netdev_hw_addr *ha;
1350 int i = 1; /* First slot is claimed by the Primary MAC */
1351
1352 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++)
1353 be_cmd_pmac_del(adapter, adapter->if_handle,
1354 adapter->pmac_id[i], 0);
1355
1356 if (netdev_uc_count(adapter->netdev) > be_max_uc(adapter)) {
1357 be_set_all_promisc(adapter);
1358 return;
6b7c5b94
SP
1359 }
1360
f66b7cfd
SP
1361 netdev_for_each_uc_addr(ha, adapter->netdev) {
1362 adapter->uc_macs++; /* First slot is for Primary MAC */
1363 be_cmd_pmac_add(adapter, (u8 *)ha->addr, adapter->if_handle,
1364 &adapter->pmac_id[adapter->uc_macs], 0);
1365 }
1366}
6b7c5b94 1367
f66b7cfd
SP
1368static void be_clear_uc_list(struct be_adapter *adapter)
1369{
1370 int i;
fbc13f01 1371
f66b7cfd
SP
1372 for (i = 1; i < (adapter->uc_macs + 1); i++)
1373 be_cmd_pmac_del(adapter, adapter->if_handle,
1374 adapter->pmac_id[i], 0);
1375 adapter->uc_macs = 0;
1376}
fbc13f01 1377
f66b7cfd
SP
1378static void be_set_rx_mode(struct net_device *netdev)
1379{
1380 struct be_adapter *adapter = netdev_priv(netdev);
fbc13f01 1381
f66b7cfd
SP
1382 if (netdev->flags & IFF_PROMISC) {
1383 be_set_all_promisc(adapter);
1384 return;
fbc13f01
AK
1385 }
1386
f66b7cfd
SP
1387 /* Interface was previously in promiscuous mode; disable it */
1388 if (be_in_all_promisc(adapter)) {
1389 be_clear_all_promisc(adapter);
1390 if (adapter->vlans_added)
1391 be_vid_config(adapter);
0fc16ebf 1392 }
a0794885 1393
f66b7cfd
SP
1394 /* Enable multicast promisc if num configured exceeds what we support */
1395 if (netdev->flags & IFF_ALLMULTI ||
1396 netdev_mc_count(netdev) > be_max_mc(adapter)) {
1397 be_set_mc_promisc(adapter);
a0794885 1398 return;
f66b7cfd 1399 }
a0794885 1400
f66b7cfd
SP
1401 if (netdev_uc_count(netdev) != adapter->uc_macs)
1402 be_set_uc_list(adapter);
1403
1404 be_set_mc_list(adapter);
6b7c5b94
SP
1405}
1406
ba343c77
SB
1407static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
1408{
1409 struct be_adapter *adapter = netdev_priv(netdev);
11ac75ed 1410 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
ba343c77
SB
1411 int status;
1412
11ac75ed 1413 if (!sriov_enabled(adapter))
ba343c77
SB
1414 return -EPERM;
1415
11ac75ed 1416 if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
ba343c77
SB
1417 return -EINVAL;
1418
3c31aaf3
VV
1419 /* Proceed further only if user provided MAC is different
1420 * from active MAC
1421 */
1422 if (ether_addr_equal(mac, vf_cfg->mac_addr))
1423 return 0;
1424
3175d8c2
SP
1425 if (BEx_chip(adapter)) {
1426 be_cmd_pmac_del(adapter, vf_cfg->if_handle, vf_cfg->pmac_id,
1427 vf + 1);
ba343c77 1428
11ac75ed
SP
1429 status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
1430 &vf_cfg->pmac_id, vf + 1);
3175d8c2
SP
1431 } else {
1432 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
1433 vf + 1);
590c391d
PR
1434 }
1435
abccf23e
KA
1436 if (status) {
1437 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed: %#x",
1438 mac, vf, status);
1439 return be_cmd_status(status);
1440 }
64600ea5 1441
abccf23e
KA
1442 ether_addr_copy(vf_cfg->mac_addr, mac);
1443
1444 return 0;
ba343c77
SB
1445}
1446
64600ea5 1447static int be_get_vf_config(struct net_device *netdev, int vf,
748b539a 1448 struct ifla_vf_info *vi)
64600ea5
AK
1449{
1450 struct be_adapter *adapter = netdev_priv(netdev);
11ac75ed 1451 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
64600ea5 1452
11ac75ed 1453 if (!sriov_enabled(adapter))
64600ea5
AK
1454 return -EPERM;
1455
11ac75ed 1456 if (vf >= adapter->num_vfs)
64600ea5
AK
1457 return -EINVAL;
1458
1459 vi->vf = vf;
ed616689
SC
1460 vi->max_tx_rate = vf_cfg->tx_rate;
1461 vi->min_tx_rate = 0;
a60b3a13
AK
1462 vi->vlan = vf_cfg->vlan_tag & VLAN_VID_MASK;
1463 vi->qos = vf_cfg->vlan_tag >> VLAN_PRIO_SHIFT;
11ac75ed 1464 memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
bdce2ad7 1465 vi->linkstate = adapter->vf_cfg[vf].plink_tracking;
64600ea5
AK
1466
1467 return 0;
1468}
1469
748b539a 1470static int be_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos)
1da87b7f
AK
1471{
1472 struct be_adapter *adapter = netdev_priv(netdev);
b9fc0e53 1473 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1da87b7f
AK
1474 int status = 0;
1475
11ac75ed 1476 if (!sriov_enabled(adapter))
1da87b7f
AK
1477 return -EPERM;
1478
b9fc0e53 1479 if (vf >= adapter->num_vfs || vlan > 4095 || qos > 7)
1da87b7f
AK
1480 return -EINVAL;
1481
b9fc0e53
AK
1482 if (vlan || qos) {
1483 vlan |= qos << VLAN_PRIO_SHIFT;
c502224e 1484 if (vf_cfg->vlan_tag != vlan)
b9fc0e53
AK
1485 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
1486 vf_cfg->if_handle, 0);
1da87b7f 1487 } else {
f1f3ee1b 1488 /* Reset Transparent Vlan Tagging. */
c502224e
SK
1489 status = be_cmd_set_hsw_config(adapter, BE_RESET_VLAN_TAG_ID,
1490 vf + 1, vf_cfg->if_handle, 0);
1da87b7f
AK
1491 }
1492
abccf23e
KA
1493 if (status) {
1494 dev_err(&adapter->pdev->dev,
1495 "VLAN %d config on VF %d failed : %#x\n", vlan,
1496 vf, status);
1497 return be_cmd_status(status);
1498 }
1499
1500 vf_cfg->vlan_tag = vlan;
1501
1502 return 0;
1da87b7f
AK
1503}
1504
ed616689
SC
1505static int be_set_vf_tx_rate(struct net_device *netdev, int vf,
1506 int min_tx_rate, int max_tx_rate)
e1d18735
AK
1507{
1508 struct be_adapter *adapter = netdev_priv(netdev);
0f77ba73
RN
1509 struct device *dev = &adapter->pdev->dev;
1510 int percent_rate, status = 0;
1511 u16 link_speed = 0;
1512 u8 link_status;
e1d18735 1513
11ac75ed 1514 if (!sriov_enabled(adapter))
e1d18735
AK
1515 return -EPERM;
1516
94f434c2 1517 if (vf >= adapter->num_vfs)
e1d18735
AK
1518 return -EINVAL;
1519
ed616689
SC
1520 if (min_tx_rate)
1521 return -EINVAL;
1522
0f77ba73
RN
1523 if (!max_tx_rate)
1524 goto config_qos;
1525
1526 status = be_cmd_link_status_query(adapter, &link_speed,
1527 &link_status, 0);
1528 if (status)
1529 goto err;
1530
1531 if (!link_status) {
1532 dev_err(dev, "TX-rate setting not allowed when link is down\n");
940a3fcd 1533 status = -ENETDOWN;
0f77ba73
RN
1534 goto err;
1535 }
1536
1537 if (max_tx_rate < 100 || max_tx_rate > link_speed) {
1538 dev_err(dev, "TX-rate must be between 100 and %d Mbps\n",
1539 link_speed);
1540 status = -EINVAL;
1541 goto err;
1542 }
1543
1544 /* On Skyhawk the QOS setting must be done only as a % value */
1545 percent_rate = link_speed / 100;
1546 if (skyhawk_chip(adapter) && (max_tx_rate % percent_rate)) {
1547 dev_err(dev, "TX-rate must be a multiple of %d Mbps\n",
1548 percent_rate);
1549 status = -EINVAL;
1550 goto err;
94f434c2 1551 }
e1d18735 1552
0f77ba73
RN
1553config_qos:
1554 status = be_cmd_config_qos(adapter, max_tx_rate, link_speed, vf + 1);
e1d18735 1555 if (status)
0f77ba73
RN
1556 goto err;
1557
1558 adapter->vf_cfg[vf].tx_rate = max_tx_rate;
1559 return 0;
1560
1561err:
1562 dev_err(dev, "TX-rate setting of %dMbps on VF%d failed\n",
1563 max_tx_rate, vf);
abccf23e 1564 return be_cmd_status(status);
e1d18735 1565}
e2fb1afa 1566
bdce2ad7
SR
1567static int be_set_vf_link_state(struct net_device *netdev, int vf,
1568 int link_state)
1569{
1570 struct be_adapter *adapter = netdev_priv(netdev);
1571 int status;
1572
1573 if (!sriov_enabled(adapter))
1574 return -EPERM;
1575
1576 if (vf >= adapter->num_vfs)
1577 return -EINVAL;
1578
1579 status = be_cmd_set_logical_link_config(adapter, link_state, vf+1);
abccf23e
KA
1580 if (status) {
1581 dev_err(&adapter->pdev->dev,
1582 "Link state change on VF %d failed: %#x\n", vf, status);
1583 return be_cmd_status(status);
1584 }
bdce2ad7 1585
abccf23e
KA
1586 adapter->vf_cfg[vf].plink_tracking = link_state;
1587
1588 return 0;
bdce2ad7 1589}
e1d18735 1590
2632bafd
SP
1591static void be_aic_update(struct be_aic_obj *aic, u64 rx_pkts, u64 tx_pkts,
1592 ulong now)
6b7c5b94 1593{
2632bafd
SP
1594 aic->rx_pkts_prev = rx_pkts;
1595 aic->tx_reqs_prev = tx_pkts;
1596 aic->jiffies = now;
1597}
ac124ff9 1598
2632bafd
SP
1599static void be_eqd_update(struct be_adapter *adapter)
1600{
1601 struct be_set_eqd set_eqd[MAX_EVT_QS];
1602 int eqd, i, num = 0, start;
1603 struct be_aic_obj *aic;
1604 struct be_eq_obj *eqo;
1605 struct be_rx_obj *rxo;
1606 struct be_tx_obj *txo;
1607 u64 rx_pkts, tx_pkts;
1608 ulong now;
1609 u32 pps, delta;
10ef9ab4 1610
2632bafd
SP
1611 for_all_evt_queues(adapter, eqo, i) {
1612 aic = &adapter->aic_obj[eqo->idx];
1613 if (!aic->enable) {
1614 if (aic->jiffies)
1615 aic->jiffies = 0;
1616 eqd = aic->et_eqd;
1617 goto modify_eqd;
1618 }
6b7c5b94 1619
2632bafd
SP
1620 rxo = &adapter->rx_obj[eqo->idx];
1621 do {
57a7744e 1622 start = u64_stats_fetch_begin_irq(&rxo->stats.sync);
2632bafd 1623 rx_pkts = rxo->stats.rx_pkts;
57a7744e 1624 } while (u64_stats_fetch_retry_irq(&rxo->stats.sync, start));
10ef9ab4 1625
2632bafd
SP
1626 txo = &adapter->tx_obj[eqo->idx];
1627 do {
57a7744e 1628 start = u64_stats_fetch_begin_irq(&txo->stats.sync);
2632bafd 1629 tx_pkts = txo->stats.tx_reqs;
57a7744e 1630 } while (u64_stats_fetch_retry_irq(&txo->stats.sync, start));
6b7c5b94 1631
2632bafd
SP
1632 /* Skip, if wrapped around or first calculation */
1633 now = jiffies;
1634 if (!aic->jiffies || time_before(now, aic->jiffies) ||
1635 rx_pkts < aic->rx_pkts_prev ||
1636 tx_pkts < aic->tx_reqs_prev) {
1637 be_aic_update(aic, rx_pkts, tx_pkts, now);
1638 continue;
1639 }
1640
1641 delta = jiffies_to_msecs(now - aic->jiffies);
1642 pps = (((u32)(rx_pkts - aic->rx_pkts_prev) * 1000) / delta) +
1643 (((u32)(tx_pkts - aic->tx_reqs_prev) * 1000) / delta);
1644 eqd = (pps / 15000) << 2;
10ef9ab4 1645
2632bafd
SP
1646 if (eqd < 8)
1647 eqd = 0;
1648 eqd = min_t(u32, eqd, aic->max_eqd);
1649 eqd = max_t(u32, eqd, aic->min_eqd);
1650
1651 be_aic_update(aic, rx_pkts, tx_pkts, now);
10ef9ab4 1652modify_eqd:
2632bafd
SP
1653 if (eqd != aic->prev_eqd) {
1654 set_eqd[num].delay_multiplier = (eqd * 65)/100;
1655 set_eqd[num].eq_id = eqo->q.id;
1656 aic->prev_eqd = eqd;
1657 num++;
1658 }
ac124ff9 1659 }
2632bafd
SP
1660
1661 if (num)
1662 be_cmd_modify_eqd(adapter, set_eqd, num);
6b7c5b94
SP
1663}
1664
3abcdeda 1665static void be_rx_stats_update(struct be_rx_obj *rxo,
748b539a 1666 struct be_rx_compl_info *rxcp)
4097f663 1667{
ac124ff9 1668 struct be_rx_stats *stats = rx_stats(rxo);
1ef78abe 1669
ab1594e9 1670 u64_stats_update_begin(&stats->sync);
3abcdeda 1671 stats->rx_compl++;
2e588f84 1672 stats->rx_bytes += rxcp->pkt_size;
3abcdeda 1673 stats->rx_pkts++;
2e588f84 1674 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
3abcdeda 1675 stats->rx_mcast_pkts++;
2e588f84 1676 if (rxcp->err)
ac124ff9 1677 stats->rx_compl_err++;
ab1594e9 1678 u64_stats_update_end(&stats->sync);
4097f663
SP
1679}
1680
2e588f84 1681static inline bool csum_passed(struct be_rx_compl_info *rxcp)
728a9972 1682{
19fad86f 1683 /* L4 checksum is not reliable for non TCP/UDP packets.
c9c47142
SP
1684 * Also ignore ipcksm for ipv6 pkts
1685 */
2e588f84 1686 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
c9c47142 1687 (rxcp->ip_csum || rxcp->ipv6) && !rxcp->err;
728a9972
AK
1688}
1689
0b0ef1d0 1690static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo)
6b7c5b94 1691{
10ef9ab4 1692 struct be_adapter *adapter = rxo->adapter;
6b7c5b94 1693 struct be_rx_page_info *rx_page_info;
3abcdeda 1694 struct be_queue_info *rxq = &rxo->q;
0b0ef1d0 1695 u16 frag_idx = rxq->tail;
6b7c5b94 1696
3abcdeda 1697 rx_page_info = &rxo->page_info_tbl[frag_idx];
6b7c5b94
SP
1698 BUG_ON(!rx_page_info->page);
1699
e50287be 1700 if (rx_page_info->last_frag) {
2b7bcebf
IV
1701 dma_unmap_page(&adapter->pdev->dev,
1702 dma_unmap_addr(rx_page_info, bus),
1703 adapter->big_page_size, DMA_FROM_DEVICE);
e50287be
SP
1704 rx_page_info->last_frag = false;
1705 } else {
1706 dma_sync_single_for_cpu(&adapter->pdev->dev,
1707 dma_unmap_addr(rx_page_info, bus),
1708 rx_frag_size, DMA_FROM_DEVICE);
205859a2 1709 }
6b7c5b94 1710
0b0ef1d0 1711 queue_tail_inc(rxq);
6b7c5b94
SP
1712 atomic_dec(&rxq->used);
1713 return rx_page_info;
1714}
1715
1716/* Throwaway the data in the Rx completion */
10ef9ab4
SP
1717static void be_rx_compl_discard(struct be_rx_obj *rxo,
1718 struct be_rx_compl_info *rxcp)
6b7c5b94 1719{
6b7c5b94 1720 struct be_rx_page_info *page_info;
2e588f84 1721 u16 i, num_rcvd = rxcp->num_rcvd;
6b7c5b94 1722
e80d9da6 1723 for (i = 0; i < num_rcvd; i++) {
0b0ef1d0 1724 page_info = get_rx_page_info(rxo);
e80d9da6
PR
1725 put_page(page_info->page);
1726 memset(page_info, 0, sizeof(*page_info));
6b7c5b94
SP
1727 }
1728}
1729
1730/*
1731 * skb_fill_rx_data forms a complete skb for an ether frame
1732 * indicated by rxcp.
1733 */
10ef9ab4
SP
1734static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
1735 struct be_rx_compl_info *rxcp)
6b7c5b94 1736{
6b7c5b94 1737 struct be_rx_page_info *page_info;
2e588f84
SP
1738 u16 i, j;
1739 u16 hdr_len, curr_frag_len, remaining;
6b7c5b94 1740 u8 *start;
6b7c5b94 1741
0b0ef1d0 1742 page_info = get_rx_page_info(rxo);
6b7c5b94
SP
1743 start = page_address(page_info->page) + page_info->page_offset;
1744 prefetch(start);
1745
1746 /* Copy data in the first descriptor of this completion */
2e588f84 1747 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
6b7c5b94 1748
6b7c5b94
SP
1749 skb->len = curr_frag_len;
1750 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
ac1ae5f3 1751 memcpy(skb->data, start, curr_frag_len);
6b7c5b94
SP
1752 /* Complete packet has now been moved to data */
1753 put_page(page_info->page);
1754 skb->data_len = 0;
1755 skb->tail += curr_frag_len;
1756 } else {
ac1ae5f3
ED
1757 hdr_len = ETH_HLEN;
1758 memcpy(skb->data, start, hdr_len);
6b7c5b94 1759 skb_shinfo(skb)->nr_frags = 1;
b061b39e 1760 skb_frag_set_page(skb, 0, page_info->page);
6b7c5b94
SP
1761 skb_shinfo(skb)->frags[0].page_offset =
1762 page_info->page_offset + hdr_len;
748b539a
SP
1763 skb_frag_size_set(&skb_shinfo(skb)->frags[0],
1764 curr_frag_len - hdr_len);
6b7c5b94 1765 skb->data_len = curr_frag_len - hdr_len;
bdb28a97 1766 skb->truesize += rx_frag_size;
6b7c5b94
SP
1767 skb->tail += hdr_len;
1768 }
205859a2 1769 page_info->page = NULL;
6b7c5b94 1770
2e588f84
SP
1771 if (rxcp->pkt_size <= rx_frag_size) {
1772 BUG_ON(rxcp->num_rcvd != 1);
1773 return;
6b7c5b94
SP
1774 }
1775
1776 /* More frags present for this completion */
2e588f84
SP
1777 remaining = rxcp->pkt_size - curr_frag_len;
1778 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
0b0ef1d0 1779 page_info = get_rx_page_info(rxo);
2e588f84 1780 curr_frag_len = min(remaining, rx_frag_size);
6b7c5b94 1781
bd46cb6c
AK
1782 /* Coalesce all frags from the same physical page in one slot */
1783 if (page_info->page_offset == 0) {
1784 /* Fresh page */
1785 j++;
b061b39e 1786 skb_frag_set_page(skb, j, page_info->page);
bd46cb6c
AK
1787 skb_shinfo(skb)->frags[j].page_offset =
1788 page_info->page_offset;
9e903e08 1789 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
bd46cb6c
AK
1790 skb_shinfo(skb)->nr_frags++;
1791 } else {
1792 put_page(page_info->page);
1793 }
1794
9e903e08 1795 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
6b7c5b94
SP
1796 skb->len += curr_frag_len;
1797 skb->data_len += curr_frag_len;
bdb28a97 1798 skb->truesize += rx_frag_size;
2e588f84 1799 remaining -= curr_frag_len;
205859a2 1800 page_info->page = NULL;
6b7c5b94 1801 }
bd46cb6c 1802 BUG_ON(j > MAX_SKB_FRAGS);
6b7c5b94
SP
1803}
1804
5be93b9a 1805/* Process the RX completion indicated by rxcp when GRO is disabled */
6384a4d0 1806static void be_rx_compl_process(struct be_rx_obj *rxo, struct napi_struct *napi,
10ef9ab4 1807 struct be_rx_compl_info *rxcp)
6b7c5b94 1808{
10ef9ab4 1809 struct be_adapter *adapter = rxo->adapter;
6332c8d3 1810 struct net_device *netdev = adapter->netdev;
6b7c5b94 1811 struct sk_buff *skb;
89420424 1812
bb349bb4 1813 skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE);
a058a632 1814 if (unlikely(!skb)) {
ac124ff9 1815 rx_stats(rxo)->rx_drops_no_skbs++;
10ef9ab4 1816 be_rx_compl_discard(rxo, rxcp);
6b7c5b94
SP
1817 return;
1818 }
1819
10ef9ab4 1820 skb_fill_rx_data(rxo, skb, rxcp);
6b7c5b94 1821
6332c8d3 1822 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
728a9972 1823 skb->ip_summed = CHECKSUM_UNNECESSARY;
c6ce2f4b
SK
1824 else
1825 skb_checksum_none_assert(skb);
6b7c5b94 1826
6332c8d3 1827 skb->protocol = eth_type_trans(skb, netdev);
aaa6daec 1828 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
10ef9ab4 1829 if (netdev->features & NETIF_F_RXHASH)
d2464c8c 1830 skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
c9c47142 1831
b6c0e89d 1832 skb->csum_level = rxcp->tunneled;
6384a4d0 1833 skb_mark_napi_id(skb, napi);
6b7c5b94 1834
343e43c0 1835 if (rxcp->vlanf)
86a9bad3 1836 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
4c5102f9
AK
1837
1838 netif_receive_skb(skb);
6b7c5b94
SP
1839}
1840
5be93b9a 1841/* Process the RX completion indicated by rxcp when GRO is enabled */
4188e7df
JH
1842static void be_rx_compl_process_gro(struct be_rx_obj *rxo,
1843 struct napi_struct *napi,
1844 struct be_rx_compl_info *rxcp)
6b7c5b94 1845{
10ef9ab4 1846 struct be_adapter *adapter = rxo->adapter;
6b7c5b94 1847 struct be_rx_page_info *page_info;
5be93b9a 1848 struct sk_buff *skb = NULL;
2e588f84
SP
1849 u16 remaining, curr_frag_len;
1850 u16 i, j;
3968fa1e 1851
10ef9ab4 1852 skb = napi_get_frags(napi);
5be93b9a 1853 if (!skb) {
10ef9ab4 1854 be_rx_compl_discard(rxo, rxcp);
5be93b9a
AK
1855 return;
1856 }
1857
2e588f84
SP
1858 remaining = rxcp->pkt_size;
1859 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
0b0ef1d0 1860 page_info = get_rx_page_info(rxo);
6b7c5b94
SP
1861
1862 curr_frag_len = min(remaining, rx_frag_size);
1863
bd46cb6c
AK
1864 /* Coalesce all frags from the same physical page in one slot */
1865 if (i == 0 || page_info->page_offset == 0) {
1866 /* First frag or Fresh page */
1867 j++;
b061b39e 1868 skb_frag_set_page(skb, j, page_info->page);
5be93b9a
AK
1869 skb_shinfo(skb)->frags[j].page_offset =
1870 page_info->page_offset;
9e903e08 1871 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
bd46cb6c
AK
1872 } else {
1873 put_page(page_info->page);
1874 }
9e903e08 1875 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
bdb28a97 1876 skb->truesize += rx_frag_size;
bd46cb6c 1877 remaining -= curr_frag_len;
6b7c5b94
SP
1878 memset(page_info, 0, sizeof(*page_info));
1879 }
bd46cb6c 1880 BUG_ON(j > MAX_SKB_FRAGS);
6b7c5b94 1881
5be93b9a 1882 skb_shinfo(skb)->nr_frags = j + 1;
2e588f84
SP
1883 skb->len = rxcp->pkt_size;
1884 skb->data_len = rxcp->pkt_size;
5be93b9a 1885 skb->ip_summed = CHECKSUM_UNNECESSARY;
aaa6daec 1886 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
4b972914 1887 if (adapter->netdev->features & NETIF_F_RXHASH)
d2464c8c 1888 skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
c9c47142 1889
b6c0e89d 1890 skb->csum_level = rxcp->tunneled;
6384a4d0 1891 skb_mark_napi_id(skb, napi);
5be93b9a 1892
343e43c0 1893 if (rxcp->vlanf)
86a9bad3 1894 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
4c5102f9 1895
10ef9ab4 1896 napi_gro_frags(napi);
2e588f84
SP
1897}
1898
10ef9ab4
SP
1899static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
1900 struct be_rx_compl_info *rxcp)
2e588f84 1901{
c3c18bc1
SP
1902 rxcp->pkt_size = GET_RX_COMPL_V1_BITS(pktsize, compl);
1903 rxcp->vlanf = GET_RX_COMPL_V1_BITS(vtp, compl);
1904 rxcp->err = GET_RX_COMPL_V1_BITS(err, compl);
1905 rxcp->tcpf = GET_RX_COMPL_V1_BITS(tcpf, compl);
1906 rxcp->udpf = GET_RX_COMPL_V1_BITS(udpf, compl);
1907 rxcp->ip_csum = GET_RX_COMPL_V1_BITS(ipcksm, compl);
1908 rxcp->l4_csum = GET_RX_COMPL_V1_BITS(l4_cksm, compl);
1909 rxcp->ipv6 = GET_RX_COMPL_V1_BITS(ip_version, compl);
1910 rxcp->num_rcvd = GET_RX_COMPL_V1_BITS(numfrags, compl);
1911 rxcp->pkt_type = GET_RX_COMPL_V1_BITS(cast_enc, compl);
1912 rxcp->rss_hash = GET_RX_COMPL_V1_BITS(rsshash, compl);
15d72184 1913 if (rxcp->vlanf) {
c3c18bc1
SP
1914 rxcp->qnq = GET_RX_COMPL_V1_BITS(qnq, compl);
1915 rxcp->vlan_tag = GET_RX_COMPL_V1_BITS(vlan_tag, compl);
15d72184 1916 }
c3c18bc1 1917 rxcp->port = GET_RX_COMPL_V1_BITS(port, compl);
c9c47142 1918 rxcp->tunneled =
c3c18bc1 1919 GET_RX_COMPL_V1_BITS(tunneled, compl);
2e588f84
SP
1920}
1921
10ef9ab4
SP
1922static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
1923 struct be_rx_compl_info *rxcp)
2e588f84 1924{
c3c18bc1
SP
1925 rxcp->pkt_size = GET_RX_COMPL_V0_BITS(pktsize, compl);
1926 rxcp->vlanf = GET_RX_COMPL_V0_BITS(vtp, compl);
1927 rxcp->err = GET_RX_COMPL_V0_BITS(err, compl);
1928 rxcp->tcpf = GET_RX_COMPL_V0_BITS(tcpf, compl);
1929 rxcp->udpf = GET_RX_COMPL_V0_BITS(udpf, compl);
1930 rxcp->ip_csum = GET_RX_COMPL_V0_BITS(ipcksm, compl);
1931 rxcp->l4_csum = GET_RX_COMPL_V0_BITS(l4_cksm, compl);
1932 rxcp->ipv6 = GET_RX_COMPL_V0_BITS(ip_version, compl);
1933 rxcp->num_rcvd = GET_RX_COMPL_V0_BITS(numfrags, compl);
1934 rxcp->pkt_type = GET_RX_COMPL_V0_BITS(cast_enc, compl);
1935 rxcp->rss_hash = GET_RX_COMPL_V0_BITS(rsshash, compl);
15d72184 1936 if (rxcp->vlanf) {
c3c18bc1
SP
1937 rxcp->qnq = GET_RX_COMPL_V0_BITS(qnq, compl);
1938 rxcp->vlan_tag = GET_RX_COMPL_V0_BITS(vlan_tag, compl);
15d72184 1939 }
c3c18bc1
SP
1940 rxcp->port = GET_RX_COMPL_V0_BITS(port, compl);
1941 rxcp->ip_frag = GET_RX_COMPL_V0_BITS(ip_frag, compl);
2e588f84
SP
1942}
1943
1944static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1945{
1946 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1947 struct be_rx_compl_info *rxcp = &rxo->rxcp;
1948 struct be_adapter *adapter = rxo->adapter;
6b7c5b94 1949
2e588f84
SP
1950 /* For checking the valid bit it is Ok to use either definition as the
1951 * valid bit is at the same position in both v0 and v1 Rx compl */
1952 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
1953 return NULL;
6b7c5b94 1954
2e588f84
SP
1955 rmb();
1956 be_dws_le_to_cpu(compl, sizeof(*compl));
6b7c5b94 1957
2e588f84 1958 if (adapter->be3_native)
10ef9ab4 1959 be_parse_rx_compl_v1(compl, rxcp);
2e588f84 1960 else
10ef9ab4 1961 be_parse_rx_compl_v0(compl, rxcp);
6b7c5b94 1962
e38b1706
SK
1963 if (rxcp->ip_frag)
1964 rxcp->l4_csum = 0;
1965
15d72184 1966 if (rxcp->vlanf) {
f93f160b
VV
1967 /* In QNQ modes, if qnq bit is not set, then the packet was
1968 * tagged only with the transparent outer vlan-tag and must
1969 * not be treated as a vlan packet by host
1970 */
1971 if (be_is_qnq_mode(adapter) && !rxcp->qnq)
15d72184 1972 rxcp->vlanf = 0;
6b7c5b94 1973
15d72184 1974 if (!lancer_chip(adapter))
3c709f8f 1975 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
6b7c5b94 1976
939cf306 1977 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
f6cbd364 1978 !test_bit(rxcp->vlan_tag, adapter->vids))
15d72184
SP
1979 rxcp->vlanf = 0;
1980 }
2e588f84
SP
1981
1982 /* As the compl has been parsed, reset it; we wont touch it again */
1983 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
6b7c5b94 1984
3abcdeda 1985 queue_tail_inc(&rxo->cq);
6b7c5b94
SP
1986 return rxcp;
1987}
1988
1829b086 1989static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
6b7c5b94 1990{
6b7c5b94 1991 u32 order = get_order(size);
1829b086 1992
6b7c5b94 1993 if (order > 0)
1829b086
ED
1994 gfp |= __GFP_COMP;
1995 return alloc_pages(gfp, order);
6b7c5b94
SP
1996}
1997
1998/*
1999 * Allocate a page, split it to fragments of size rx_frag_size and post as
2000 * receive buffers to BE
2001 */
c30d7266 2002static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp, u32 frags_needed)
6b7c5b94 2003{
3abcdeda 2004 struct be_adapter *adapter = rxo->adapter;
26d92f92 2005 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
3abcdeda 2006 struct be_queue_info *rxq = &rxo->q;
6b7c5b94 2007 struct page *pagep = NULL;
ba42fad0 2008 struct device *dev = &adapter->pdev->dev;
6b7c5b94
SP
2009 struct be_eth_rx_d *rxd;
2010 u64 page_dmaaddr = 0, frag_dmaaddr;
c30d7266 2011 u32 posted, page_offset = 0, notify = 0;
6b7c5b94 2012
3abcdeda 2013 page_info = &rxo->page_info_tbl[rxq->head];
c30d7266 2014 for (posted = 0; posted < frags_needed && !page_info->page; posted++) {
6b7c5b94 2015 if (!pagep) {
1829b086 2016 pagep = be_alloc_pages(adapter->big_page_size, gfp);
6b7c5b94 2017 if (unlikely(!pagep)) {
ac124ff9 2018 rx_stats(rxo)->rx_post_fail++;
6b7c5b94
SP
2019 break;
2020 }
ba42fad0
IV
2021 page_dmaaddr = dma_map_page(dev, pagep, 0,
2022 adapter->big_page_size,
2b7bcebf 2023 DMA_FROM_DEVICE);
ba42fad0
IV
2024 if (dma_mapping_error(dev, page_dmaaddr)) {
2025 put_page(pagep);
2026 pagep = NULL;
d3de1540 2027 adapter->drv_stats.dma_map_errors++;
ba42fad0
IV
2028 break;
2029 }
e50287be 2030 page_offset = 0;
6b7c5b94
SP
2031 } else {
2032 get_page(pagep);
e50287be 2033 page_offset += rx_frag_size;
6b7c5b94 2034 }
e50287be 2035 page_info->page_offset = page_offset;
6b7c5b94 2036 page_info->page = pagep;
6b7c5b94
SP
2037
2038 rxd = queue_head_node(rxq);
e50287be 2039 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
6b7c5b94
SP
2040 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
2041 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
6b7c5b94
SP
2042
2043 /* Any space left in the current big page for another frag? */
2044 if ((page_offset + rx_frag_size + rx_frag_size) >
2045 adapter->big_page_size) {
2046 pagep = NULL;
e50287be
SP
2047 page_info->last_frag = true;
2048 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
2049 } else {
2050 dma_unmap_addr_set(page_info, bus, frag_dmaaddr);
6b7c5b94 2051 }
26d92f92
SP
2052
2053 prev_page_info = page_info;
2054 queue_head_inc(rxq);
10ef9ab4 2055 page_info = &rxo->page_info_tbl[rxq->head];
6b7c5b94 2056 }
e50287be
SP
2057
2058 /* Mark the last frag of a page when we break out of the above loop
2059 * with no more slots available in the RXQ
2060 */
2061 if (pagep) {
2062 prev_page_info->last_frag = true;
2063 dma_unmap_addr_set(prev_page_info, bus, page_dmaaddr);
2064 }
6b7c5b94
SP
2065
2066 if (posted) {
6b7c5b94 2067 atomic_add(posted, &rxq->used);
6384a4d0
SP
2068 if (rxo->rx_post_starved)
2069 rxo->rx_post_starved = false;
c30d7266
AK
2070 do {
2071 notify = min(256u, posted);
2072 be_rxq_notify(adapter, rxq->id, notify);
2073 posted -= notify;
2074 } while (posted);
ea1dae11
SP
2075 } else if (atomic_read(&rxq->used) == 0) {
2076 /* Let be_worker replenish when memory is available */
3abcdeda 2077 rxo->rx_post_starved = true;
6b7c5b94 2078 }
6b7c5b94
SP
2079}
2080
152ffe5b 2081static struct be_tx_compl_info *be_tx_compl_get(struct be_tx_obj *txo)
6b7c5b94 2082{
152ffe5b
SB
2083 struct be_queue_info *tx_cq = &txo->cq;
2084 struct be_tx_compl_info *txcp = &txo->txcp;
2085 struct be_eth_tx_compl *compl = queue_tail_node(tx_cq);
6b7c5b94 2086
152ffe5b 2087 if (compl->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
6b7c5b94
SP
2088 return NULL;
2089
152ffe5b 2090 /* Ensure load ordering of valid bit dword and other dwords below */
f3eb62d2 2091 rmb();
152ffe5b 2092 be_dws_le_to_cpu(compl, sizeof(*compl));
6b7c5b94 2093
152ffe5b
SB
2094 txcp->status = GET_TX_COMPL_BITS(status, compl);
2095 txcp->end_index = GET_TX_COMPL_BITS(wrb_index, compl);
6b7c5b94 2096
152ffe5b 2097 compl->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
6b7c5b94
SP
2098 queue_tail_inc(tx_cq);
2099 return txcp;
2100}
2101
3c8def97 2102static u16 be_tx_compl_process(struct be_adapter *adapter,
748b539a 2103 struct be_tx_obj *txo, u16 last_index)
6b7c5b94 2104{
5f07b3c5 2105 struct sk_buff **sent_skbs = txo->sent_skb_list;
3c8def97 2106 struct be_queue_info *txq = &txo->q;
5f07b3c5
SP
2107 u16 frag_index, num_wrbs = 0;
2108 struct sk_buff *skb = NULL;
2109 bool unmap_skb_hdr = false;
a73b796e 2110 struct be_eth_wrb *wrb;
6b7c5b94 2111
ec43b1a6 2112 do {
5f07b3c5
SP
2113 if (sent_skbs[txq->tail]) {
2114 /* Free skb from prev req */
2115 if (skb)
2116 dev_consume_skb_any(skb);
2117 skb = sent_skbs[txq->tail];
2118 sent_skbs[txq->tail] = NULL;
2119 queue_tail_inc(txq); /* skip hdr wrb */
2120 num_wrbs++;
2121 unmap_skb_hdr = true;
2122 }
a73b796e 2123 wrb = queue_tail_node(txq);
5f07b3c5 2124 frag_index = txq->tail;
2b7bcebf 2125 unmap_tx_frag(&adapter->pdev->dev, wrb,
5f07b3c5 2126 (unmap_skb_hdr && skb_headlen(skb)));
ec43b1a6 2127 unmap_skb_hdr = false;
6b7c5b94 2128 queue_tail_inc(txq);
5f07b3c5
SP
2129 num_wrbs++;
2130 } while (frag_index != last_index);
2131 dev_consume_skb_any(skb);
6b7c5b94 2132
4d586b82 2133 return num_wrbs;
6b7c5b94
SP
2134}
2135
10ef9ab4
SP
2136/* Return the number of events in the event queue */
2137static inline int events_get(struct be_eq_obj *eqo)
859b1e4e 2138{
10ef9ab4
SP
2139 struct be_eq_entry *eqe;
2140 int num = 0;
859b1e4e 2141
10ef9ab4
SP
2142 do {
2143 eqe = queue_tail_node(&eqo->q);
2144 if (eqe->evt == 0)
2145 break;
859b1e4e 2146
10ef9ab4
SP
2147 rmb();
2148 eqe->evt = 0;
2149 num++;
2150 queue_tail_inc(&eqo->q);
2151 } while (true);
2152
2153 return num;
859b1e4e
SP
2154}
2155
10ef9ab4
SP
2156/* Leaves the EQ is disarmed state */
2157static void be_eq_clean(struct be_eq_obj *eqo)
859b1e4e 2158{
10ef9ab4 2159 int num = events_get(eqo);
859b1e4e 2160
10ef9ab4 2161 be_eq_notify(eqo->adapter, eqo->q.id, false, true, num);
859b1e4e
SP
2162}
2163
10ef9ab4 2164static void be_rx_cq_clean(struct be_rx_obj *rxo)
6b7c5b94
SP
2165{
2166 struct be_rx_page_info *page_info;
3abcdeda
SP
2167 struct be_queue_info *rxq = &rxo->q;
2168 struct be_queue_info *rx_cq = &rxo->cq;
2e588f84 2169 struct be_rx_compl_info *rxcp;
d23e946c
SP
2170 struct be_adapter *adapter = rxo->adapter;
2171 int flush_wait = 0;
6b7c5b94 2172
d23e946c
SP
2173 /* Consume pending rx completions.
2174 * Wait for the flush completion (identified by zero num_rcvd)
2175 * to arrive. Notify CQ even when there are no more CQ entries
2176 * for HW to flush partially coalesced CQ entries.
2177 * In Lancer, there is no need to wait for flush compl.
2178 */
2179 for (;;) {
2180 rxcp = be_rx_compl_get(rxo);
ddf1169f 2181 if (!rxcp) {
d23e946c
SP
2182 if (lancer_chip(adapter))
2183 break;
2184
2185 if (flush_wait++ > 10 || be_hw_error(adapter)) {
2186 dev_warn(&adapter->pdev->dev,
2187 "did not receive flush compl\n");
2188 break;
2189 }
2190 be_cq_notify(adapter, rx_cq->id, true, 0);
2191 mdelay(1);
2192 } else {
2193 be_rx_compl_discard(rxo, rxcp);
3f5dffe6 2194 be_cq_notify(adapter, rx_cq->id, false, 1);
d23e946c
SP
2195 if (rxcp->num_rcvd == 0)
2196 break;
2197 }
6b7c5b94
SP
2198 }
2199
d23e946c
SP
2200 /* After cleanup, leave the CQ in unarmed state */
2201 be_cq_notify(adapter, rx_cq->id, false, 0);
2202
2203 /* Then free posted rx buffers that were not used */
0b0ef1d0
SR
2204 while (atomic_read(&rxq->used) > 0) {
2205 page_info = get_rx_page_info(rxo);
6b7c5b94
SP
2206 put_page(page_info->page);
2207 memset(page_info, 0, sizeof(*page_info));
2208 }
2209 BUG_ON(atomic_read(&rxq->used));
5f820b6c
KA
2210 rxq->tail = 0;
2211 rxq->head = 0;
6b7c5b94
SP
2212}
2213
0ae57bb3 2214static void be_tx_compl_clean(struct be_adapter *adapter)
6b7c5b94 2215{
5f07b3c5
SP
2216 u16 end_idx, notified_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
2217 struct device *dev = &adapter->pdev->dev;
152ffe5b 2218 struct be_tx_compl_info *txcp;
0ae57bb3 2219 struct be_queue_info *txq;
152ffe5b 2220 struct be_tx_obj *txo;
0ae57bb3 2221 int i, pending_txqs;
a8e9179a 2222
1a3d0717 2223 /* Stop polling for compls when HW has been silent for 10ms */
a8e9179a 2224 do {
0ae57bb3
SP
2225 pending_txqs = adapter->num_tx_qs;
2226
2227 for_all_tx_queues(adapter, txo, i) {
1a3d0717
VV
2228 cmpl = 0;
2229 num_wrbs = 0;
0ae57bb3 2230 txq = &txo->q;
152ffe5b
SB
2231 while ((txcp = be_tx_compl_get(txo))) {
2232 num_wrbs +=
2233 be_tx_compl_process(adapter, txo,
2234 txcp->end_index);
0ae57bb3
SP
2235 cmpl++;
2236 }
2237 if (cmpl) {
2238 be_cq_notify(adapter, txo->cq.id, false, cmpl);
2239 atomic_sub(num_wrbs, &txq->used);
1a3d0717 2240 timeo = 0;
0ae57bb3 2241 }
cf5671e6 2242 if (!be_is_tx_compl_pending(txo))
0ae57bb3 2243 pending_txqs--;
a8e9179a
SP
2244 }
2245
1a3d0717 2246 if (pending_txqs == 0 || ++timeo > 10 || be_hw_error(adapter))
a8e9179a
SP
2247 break;
2248
2249 mdelay(1);
2250 } while (true);
2251
5f07b3c5 2252 /* Free enqueued TX that was never notified to HW */
0ae57bb3
SP
2253 for_all_tx_queues(adapter, txo, i) {
2254 txq = &txo->q;
0ae57bb3 2255
5f07b3c5
SP
2256 if (atomic_read(&txq->used)) {
2257 dev_info(dev, "txq%d: cleaning %d pending tx-wrbs\n",
2258 i, atomic_read(&txq->used));
2259 notified_idx = txq->tail;
0ae57bb3 2260 end_idx = txq->tail;
5f07b3c5
SP
2261 index_adv(&end_idx, atomic_read(&txq->used) - 1,
2262 txq->len);
2263 /* Use the tx-compl process logic to handle requests
2264 * that were not sent to the HW.
2265 */
0ae57bb3
SP
2266 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
2267 atomic_sub(num_wrbs, &txq->used);
5f07b3c5
SP
2268 BUG_ON(atomic_read(&txq->used));
2269 txo->pend_wrb_cnt = 0;
2270 /* Since hw was never notified of these requests,
2271 * reset TXQ indices
2272 */
2273 txq->head = notified_idx;
2274 txq->tail = notified_idx;
0ae57bb3 2275 }
b03388d6 2276 }
6b7c5b94
SP
2277}
2278
10ef9ab4
SP
2279static void be_evt_queues_destroy(struct be_adapter *adapter)
2280{
2281 struct be_eq_obj *eqo;
2282 int i;
2283
2284 for_all_evt_queues(adapter, eqo, i) {
19d59aa7
PR
2285 if (eqo->q.created) {
2286 be_eq_clean(eqo);
10ef9ab4 2287 be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
6384a4d0 2288 napi_hash_del(&eqo->napi);
68d7bdcb 2289 netif_napi_del(&eqo->napi);
19d59aa7 2290 }
10ef9ab4
SP
2291 be_queue_free(adapter, &eqo->q);
2292 }
2293}
2294
2295static int be_evt_queues_create(struct be_adapter *adapter)
2296{
2297 struct be_queue_info *eq;
2298 struct be_eq_obj *eqo;
2632bafd 2299 struct be_aic_obj *aic;
10ef9ab4
SP
2300 int i, rc;
2301
92bf14ab
SP
2302 adapter->num_evt_qs = min_t(u16, num_irqs(adapter),
2303 adapter->cfg_num_qs);
10ef9ab4
SP
2304
2305 for_all_evt_queues(adapter, eqo, i) {
68d7bdcb
SP
2306 netif_napi_add(adapter->netdev, &eqo->napi, be_poll,
2307 BE_NAPI_WEIGHT);
6384a4d0 2308 napi_hash_add(&eqo->napi);
2632bafd 2309 aic = &adapter->aic_obj[i];
10ef9ab4 2310 eqo->adapter = adapter;
10ef9ab4 2311 eqo->idx = i;
2632bafd
SP
2312 aic->max_eqd = BE_MAX_EQD;
2313 aic->enable = true;
10ef9ab4
SP
2314
2315 eq = &eqo->q;
2316 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
748b539a 2317 sizeof(struct be_eq_entry));
10ef9ab4
SP
2318 if (rc)
2319 return rc;
2320
f2f781a7 2321 rc = be_cmd_eq_create(adapter, eqo);
10ef9ab4
SP
2322 if (rc)
2323 return rc;
2324 }
1cfafab9 2325 return 0;
10ef9ab4
SP
2326}
2327
5fb379ee
SP
2328static void be_mcc_queues_destroy(struct be_adapter *adapter)
2329{
2330 struct be_queue_info *q;
5fb379ee 2331
8788fdc2 2332 q = &adapter->mcc_obj.q;
5fb379ee 2333 if (q->created)
8788fdc2 2334 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
5fb379ee
SP
2335 be_queue_free(adapter, q);
2336
8788fdc2 2337 q = &adapter->mcc_obj.cq;
5fb379ee 2338 if (q->created)
8788fdc2 2339 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
5fb379ee
SP
2340 be_queue_free(adapter, q);
2341}
2342
2343/* Must be called only after TX qs are created as MCC shares TX EQ */
2344static int be_mcc_queues_create(struct be_adapter *adapter)
2345{
2346 struct be_queue_info *q, *cq;
5fb379ee 2347
8788fdc2 2348 cq = &adapter->mcc_obj.cq;
5fb379ee 2349 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
748b539a 2350 sizeof(struct be_mcc_compl)))
5fb379ee
SP
2351 goto err;
2352
10ef9ab4
SP
2353 /* Use the default EQ for MCC completions */
2354 if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0))
5fb379ee
SP
2355 goto mcc_cq_free;
2356
8788fdc2 2357 q = &adapter->mcc_obj.q;
5fb379ee
SP
2358 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
2359 goto mcc_cq_destroy;
2360
8788fdc2 2361 if (be_cmd_mccq_create(adapter, q, cq))
5fb379ee
SP
2362 goto mcc_q_free;
2363
2364 return 0;
2365
2366mcc_q_free:
2367 be_queue_free(adapter, q);
2368mcc_cq_destroy:
8788fdc2 2369 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
5fb379ee
SP
2370mcc_cq_free:
2371 be_queue_free(adapter, cq);
2372err:
2373 return -1;
2374}
2375
6b7c5b94
SP
2376static void be_tx_queues_destroy(struct be_adapter *adapter)
2377{
2378 struct be_queue_info *q;
3c8def97
SP
2379 struct be_tx_obj *txo;
2380 u8 i;
6b7c5b94 2381
3c8def97
SP
2382 for_all_tx_queues(adapter, txo, i) {
2383 q = &txo->q;
2384 if (q->created)
2385 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
2386 be_queue_free(adapter, q);
6b7c5b94 2387
3c8def97
SP
2388 q = &txo->cq;
2389 if (q->created)
2390 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2391 be_queue_free(adapter, q);
2392 }
6b7c5b94
SP
2393}
2394
7707133c 2395static int be_tx_qs_create(struct be_adapter *adapter)
6b7c5b94 2396{
10ef9ab4 2397 struct be_queue_info *cq, *eq;
3c8def97 2398 struct be_tx_obj *txo;
92bf14ab 2399 int status, i;
6b7c5b94 2400
92bf14ab 2401 adapter->num_tx_qs = min(adapter->num_evt_qs, be_max_txqs(adapter));
dafc0fe3 2402
10ef9ab4
SP
2403 for_all_tx_queues(adapter, txo, i) {
2404 cq = &txo->cq;
2405 status = be_queue_alloc(adapter, cq, TX_CQ_LEN,
2406 sizeof(struct be_eth_tx_compl));
2407 if (status)
2408 return status;
3c8def97 2409
827da44c
JS
2410 u64_stats_init(&txo->stats.sync);
2411 u64_stats_init(&txo->stats.sync_compl);
2412
10ef9ab4
SP
2413 /* If num_evt_qs is less than num_tx_qs, then more than
2414 * one txq share an eq
2415 */
2416 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2417 status = be_cmd_cq_create(adapter, cq, eq, false, 3);
2418 if (status)
2419 return status;
6b7c5b94 2420
10ef9ab4
SP
2421 status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
2422 sizeof(struct be_eth_wrb));
2423 if (status)
2424 return status;
6b7c5b94 2425
94d73aaa 2426 status = be_cmd_txq_create(adapter, txo);
10ef9ab4
SP
2427 if (status)
2428 return status;
3c8def97 2429 }
6b7c5b94 2430
d379142b
SP
2431 dev_info(&adapter->pdev->dev, "created %d TX queue(s)\n",
2432 adapter->num_tx_qs);
10ef9ab4 2433 return 0;
6b7c5b94
SP
2434}
2435
10ef9ab4 2436static void be_rx_cqs_destroy(struct be_adapter *adapter)
6b7c5b94
SP
2437{
2438 struct be_queue_info *q;
3abcdeda
SP
2439 struct be_rx_obj *rxo;
2440 int i;
2441
2442 for_all_rx_queues(adapter, rxo, i) {
3abcdeda
SP
2443 q = &rxo->cq;
2444 if (q->created)
2445 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2446 be_queue_free(adapter, q);
ac6a0c4a
SP
2447 }
2448}
2449
10ef9ab4 2450static int be_rx_cqs_create(struct be_adapter *adapter)
6b7c5b94 2451{
10ef9ab4 2452 struct be_queue_info *eq, *cq;
3abcdeda
SP
2453 struct be_rx_obj *rxo;
2454 int rc, i;
6b7c5b94 2455
92bf14ab 2456 /* We can create as many RSS rings as there are EQs. */
71bb8bd0 2457 adapter->num_rss_qs = adapter->num_evt_qs;
92bf14ab 2458
71bb8bd0
VV
2459 /* We'll use RSS only if atleast 2 RSS rings are supported. */
2460 if (adapter->num_rss_qs <= 1)
2461 adapter->num_rss_qs = 0;
2462
2463 adapter->num_rx_qs = adapter->num_rss_qs + adapter->need_def_rxq;
2464
2465 /* When the interface is not capable of RSS rings (and there is no
2466 * need to create a default RXQ) we'll still need one RXQ
10ef9ab4 2467 */
71bb8bd0
VV
2468 if (adapter->num_rx_qs == 0)
2469 adapter->num_rx_qs = 1;
92bf14ab 2470
6b7c5b94 2471 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
3abcdeda
SP
2472 for_all_rx_queues(adapter, rxo, i) {
2473 rxo->adapter = adapter;
3abcdeda
SP
2474 cq = &rxo->cq;
2475 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
748b539a 2476 sizeof(struct be_eth_rx_compl));
3abcdeda 2477 if (rc)
10ef9ab4 2478 return rc;
3abcdeda 2479
827da44c 2480 u64_stats_init(&rxo->stats.sync);
10ef9ab4
SP
2481 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2482 rc = be_cmd_cq_create(adapter, cq, eq, false, 3);
3abcdeda 2483 if (rc)
10ef9ab4 2484 return rc;
3abcdeda 2485 }
6b7c5b94 2486
d379142b 2487 dev_info(&adapter->pdev->dev,
71bb8bd0 2488 "created %d RX queue(s)\n", adapter->num_rx_qs);
10ef9ab4 2489 return 0;
b628bde2
SP
2490}
2491
6b7c5b94
SP
2492static irqreturn_t be_intx(int irq, void *dev)
2493{
e49cc34f
SP
2494 struct be_eq_obj *eqo = dev;
2495 struct be_adapter *adapter = eqo->adapter;
2496 int num_evts = 0;
6b7c5b94 2497
d0b9cec3
SP
2498 /* IRQ is not expected when NAPI is scheduled as the EQ
2499 * will not be armed.
2500 * But, this can happen on Lancer INTx where it takes
2501 * a while to de-assert INTx or in BE2 where occasionaly
2502 * an interrupt may be raised even when EQ is unarmed.
2503 * If NAPI is already scheduled, then counting & notifying
2504 * events will orphan them.
e49cc34f 2505 */
d0b9cec3 2506 if (napi_schedule_prep(&eqo->napi)) {
e49cc34f 2507 num_evts = events_get(eqo);
d0b9cec3
SP
2508 __napi_schedule(&eqo->napi);
2509 if (num_evts)
2510 eqo->spurious_intr = 0;
2511 }
2512 be_eq_notify(adapter, eqo->q.id, false, true, num_evts);
e49cc34f 2513
d0b9cec3
SP
2514 /* Return IRQ_HANDLED only for the the first spurious intr
2515 * after a valid intr to stop the kernel from branding
2516 * this irq as a bad one!
e49cc34f 2517 */
d0b9cec3
SP
2518 if (num_evts || eqo->spurious_intr++ == 0)
2519 return IRQ_HANDLED;
2520 else
2521 return IRQ_NONE;
6b7c5b94
SP
2522}
2523
10ef9ab4 2524static irqreturn_t be_msix(int irq, void *dev)
6b7c5b94 2525{
10ef9ab4 2526 struct be_eq_obj *eqo = dev;
6b7c5b94 2527
0b545a62
SP
2528 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
2529 napi_schedule(&eqo->napi);
6b7c5b94
SP
2530 return IRQ_HANDLED;
2531}
2532
2e588f84 2533static inline bool do_gro(struct be_rx_compl_info *rxcp)
6b7c5b94 2534{
e38b1706 2535 return (rxcp->tcpf && !rxcp->err && rxcp->l4_csum) ? true : false;
6b7c5b94
SP
2536}
2537
10ef9ab4 2538static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
748b539a 2539 int budget, int polling)
6b7c5b94 2540{
3abcdeda
SP
2541 struct be_adapter *adapter = rxo->adapter;
2542 struct be_queue_info *rx_cq = &rxo->cq;
2e588f84 2543 struct be_rx_compl_info *rxcp;
6b7c5b94 2544 u32 work_done;
c30d7266 2545 u32 frags_consumed = 0;
6b7c5b94
SP
2546
2547 for (work_done = 0; work_done < budget; work_done++) {
3abcdeda 2548 rxcp = be_rx_compl_get(rxo);
6b7c5b94
SP
2549 if (!rxcp)
2550 break;
2551
12004ae9
SP
2552 /* Is it a flush compl that has no data */
2553 if (unlikely(rxcp->num_rcvd == 0))
2554 goto loop_continue;
2555
2556 /* Discard compl with partial DMA Lancer B0 */
2557 if (unlikely(!rxcp->pkt_size)) {
10ef9ab4 2558 be_rx_compl_discard(rxo, rxcp);
12004ae9
SP
2559 goto loop_continue;
2560 }
2561
2562 /* On BE drop pkts that arrive due to imperfect filtering in
2563 * promiscuous mode on some skews
2564 */
2565 if (unlikely(rxcp->port != adapter->port_num &&
748b539a 2566 !lancer_chip(adapter))) {
10ef9ab4 2567 be_rx_compl_discard(rxo, rxcp);
12004ae9 2568 goto loop_continue;
64642811 2569 }
009dd872 2570
6384a4d0
SP
2571 /* Don't do gro when we're busy_polling */
2572 if (do_gro(rxcp) && polling != BUSY_POLLING)
10ef9ab4 2573 be_rx_compl_process_gro(rxo, napi, rxcp);
12004ae9 2574 else
6384a4d0
SP
2575 be_rx_compl_process(rxo, napi, rxcp);
2576
12004ae9 2577loop_continue:
c30d7266 2578 frags_consumed += rxcp->num_rcvd;
2e588f84 2579 be_rx_stats_update(rxo, rxcp);
6b7c5b94
SP
2580 }
2581
10ef9ab4
SP
2582 if (work_done) {
2583 be_cq_notify(adapter, rx_cq->id, true, work_done);
9372cacb 2584
6384a4d0
SP
2585 /* When an rx-obj gets into post_starved state, just
2586 * let be_worker do the posting.
2587 */
2588 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM &&
2589 !rxo->rx_post_starved)
c30d7266
AK
2590 be_post_rx_frags(rxo, GFP_ATOMIC,
2591 max_t(u32, MAX_RX_POST,
2592 frags_consumed));
6b7c5b94 2593 }
10ef9ab4 2594
6b7c5b94
SP
2595 return work_done;
2596}
2597
152ffe5b 2598static inline void be_update_tx_err(struct be_tx_obj *txo, u8 status)
512bb8a2
KA
2599{
2600 switch (status) {
2601 case BE_TX_COMP_HDR_PARSE_ERR:
2602 tx_stats(txo)->tx_hdr_parse_err++;
2603 break;
2604 case BE_TX_COMP_NDMA_ERR:
2605 tx_stats(txo)->tx_dma_err++;
2606 break;
2607 case BE_TX_COMP_ACL_ERR:
2608 tx_stats(txo)->tx_spoof_check_err++;
2609 break;
2610 }
2611}
2612
152ffe5b 2613static inline void lancer_update_tx_err(struct be_tx_obj *txo, u8 status)
512bb8a2
KA
2614{
2615 switch (status) {
2616 case LANCER_TX_COMP_LSO_ERR:
2617 tx_stats(txo)->tx_tso_err++;
2618 break;
2619 case LANCER_TX_COMP_HSW_DROP_MAC_ERR:
2620 case LANCER_TX_COMP_HSW_DROP_VLAN_ERR:
2621 tx_stats(txo)->tx_spoof_check_err++;
2622 break;
2623 case LANCER_TX_COMP_QINQ_ERR:
2624 tx_stats(txo)->tx_qinq_err++;
2625 break;
2626 case LANCER_TX_COMP_PARITY_ERR:
2627 tx_stats(txo)->tx_internal_parity_err++;
2628 break;
2629 case LANCER_TX_COMP_DMA_ERR:
2630 tx_stats(txo)->tx_dma_err++;
2631 break;
2632 }
2633}
2634
c8f64615
SP
2635static void be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
2636 int idx)
6b7c5b94 2637{
c8f64615 2638 int num_wrbs = 0, work_done = 0;
152ffe5b 2639 struct be_tx_compl_info *txcp;
c8f64615 2640
152ffe5b
SB
2641 while ((txcp = be_tx_compl_get(txo))) {
2642 num_wrbs += be_tx_compl_process(adapter, txo, txcp->end_index);
c8f64615 2643 work_done++;
3c8def97 2644
152ffe5b 2645 if (txcp->status) {
512bb8a2 2646 if (lancer_chip(adapter))
152ffe5b 2647 lancer_update_tx_err(txo, txcp->status);
512bb8a2 2648 else
152ffe5b 2649 be_update_tx_err(txo, txcp->status);
512bb8a2 2650 }
10ef9ab4 2651 }
6b7c5b94 2652
10ef9ab4
SP
2653 if (work_done) {
2654 be_cq_notify(adapter, txo->cq.id, true, work_done);
2655 atomic_sub(num_wrbs, &txo->q.used);
3c8def97 2656
10ef9ab4
SP
2657 /* As Tx wrbs have been freed up, wake up netdev queue
2658 * if it was stopped due to lack of tx wrbs. */
2659 if (__netif_subqueue_stopped(adapter->netdev, idx) &&
cf5671e6 2660 be_can_txq_wake(txo)) {
10ef9ab4 2661 netif_wake_subqueue(adapter->netdev, idx);
3c8def97 2662 }
10ef9ab4
SP
2663
2664 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
2665 tx_stats(txo)->tx_compl += work_done;
2666 u64_stats_update_end(&tx_stats(txo)->sync_compl);
6b7c5b94 2667 }
10ef9ab4 2668}
6b7c5b94 2669
f7062ee5
SP
2670#ifdef CONFIG_NET_RX_BUSY_POLL
2671static inline bool be_lock_napi(struct be_eq_obj *eqo)
2672{
2673 bool status = true;
2674
2675 spin_lock(&eqo->lock); /* BH is already disabled */
2676 if (eqo->state & BE_EQ_LOCKED) {
2677 WARN_ON(eqo->state & BE_EQ_NAPI);
2678 eqo->state |= BE_EQ_NAPI_YIELD;
2679 status = false;
2680 } else {
2681 eqo->state = BE_EQ_NAPI;
2682 }
2683 spin_unlock(&eqo->lock);
2684 return status;
2685}
2686
2687static inline void be_unlock_napi(struct be_eq_obj *eqo)
2688{
2689 spin_lock(&eqo->lock); /* BH is already disabled */
2690
2691 WARN_ON(eqo->state & (BE_EQ_POLL | BE_EQ_NAPI_YIELD));
2692 eqo->state = BE_EQ_IDLE;
2693
2694 spin_unlock(&eqo->lock);
2695}
2696
2697static inline bool be_lock_busy_poll(struct be_eq_obj *eqo)
2698{
2699 bool status = true;
2700
2701 spin_lock_bh(&eqo->lock);
2702 if (eqo->state & BE_EQ_LOCKED) {
2703 eqo->state |= BE_EQ_POLL_YIELD;
2704 status = false;
2705 } else {
2706 eqo->state |= BE_EQ_POLL;
2707 }
2708 spin_unlock_bh(&eqo->lock);
2709 return status;
2710}
2711
2712static inline void be_unlock_busy_poll(struct be_eq_obj *eqo)
2713{
2714 spin_lock_bh(&eqo->lock);
2715
2716 WARN_ON(eqo->state & (BE_EQ_NAPI));
2717 eqo->state = BE_EQ_IDLE;
2718
2719 spin_unlock_bh(&eqo->lock);
2720}
2721
2722static inline void be_enable_busy_poll(struct be_eq_obj *eqo)
2723{
2724 spin_lock_init(&eqo->lock);
2725 eqo->state = BE_EQ_IDLE;
2726}
2727
2728static inline void be_disable_busy_poll(struct be_eq_obj *eqo)
2729{
2730 local_bh_disable();
2731
2732 /* It's enough to just acquire napi lock on the eqo to stop
2733 * be_busy_poll() from processing any queueus.
2734 */
2735 while (!be_lock_napi(eqo))
2736 mdelay(1);
2737
2738 local_bh_enable();
2739}
2740
2741#else /* CONFIG_NET_RX_BUSY_POLL */
2742
2743static inline bool be_lock_napi(struct be_eq_obj *eqo)
2744{
2745 return true;
2746}
2747
2748static inline void be_unlock_napi(struct be_eq_obj *eqo)
2749{
2750}
2751
2752static inline bool be_lock_busy_poll(struct be_eq_obj *eqo)
2753{
2754 return false;
2755}
2756
2757static inline void be_unlock_busy_poll(struct be_eq_obj *eqo)
2758{
2759}
2760
2761static inline void be_enable_busy_poll(struct be_eq_obj *eqo)
2762{
2763}
2764
2765static inline void be_disable_busy_poll(struct be_eq_obj *eqo)
2766{
2767}
2768#endif /* CONFIG_NET_RX_BUSY_POLL */
2769
68d7bdcb 2770int be_poll(struct napi_struct *napi, int budget)
10ef9ab4
SP
2771{
2772 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2773 struct be_adapter *adapter = eqo->adapter;
0b545a62 2774 int max_work = 0, work, i, num_evts;
6384a4d0 2775 struct be_rx_obj *rxo;
a4906ea0 2776 struct be_tx_obj *txo;
f31e50a8 2777
0b545a62
SP
2778 num_evts = events_get(eqo);
2779
a4906ea0
SP
2780 for_all_tx_queues_on_eq(adapter, eqo, txo, i)
2781 be_process_tx(adapter, txo, i);
f31e50a8 2782
6384a4d0
SP
2783 if (be_lock_napi(eqo)) {
2784 /* This loop will iterate twice for EQ0 in which
2785 * completions of the last RXQ (default one) are also processed
2786 * For other EQs the loop iterates only once
2787 */
2788 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
2789 work = be_process_rx(rxo, napi, budget, NAPI_POLLING);
2790 max_work = max(work, max_work);
2791 }
2792 be_unlock_napi(eqo);
2793 } else {
2794 max_work = budget;
10ef9ab4 2795 }
6b7c5b94 2796
10ef9ab4
SP
2797 if (is_mcc_eqo(eqo))
2798 be_process_mcc(adapter);
93c86700 2799
10ef9ab4
SP
2800 if (max_work < budget) {
2801 napi_complete(napi);
0b545a62 2802 be_eq_notify(adapter, eqo->q.id, true, false, num_evts);
10ef9ab4
SP
2803 } else {
2804 /* As we'll continue in polling mode, count and clear events */
0b545a62 2805 be_eq_notify(adapter, eqo->q.id, false, false, num_evts);
93c86700 2806 }
10ef9ab4 2807 return max_work;
6b7c5b94
SP
2808}
2809
6384a4d0
SP
2810#ifdef CONFIG_NET_RX_BUSY_POLL
2811static int be_busy_poll(struct napi_struct *napi)
2812{
2813 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2814 struct be_adapter *adapter = eqo->adapter;
2815 struct be_rx_obj *rxo;
2816 int i, work = 0;
2817
2818 if (!be_lock_busy_poll(eqo))
2819 return LL_FLUSH_BUSY;
2820
2821 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
2822 work = be_process_rx(rxo, napi, 4, BUSY_POLLING);
2823 if (work)
2824 break;
2825 }
2826
2827 be_unlock_busy_poll(eqo);
2828 return work;
2829}
2830#endif
2831
f67ef7ba 2832void be_detect_error(struct be_adapter *adapter)
7c185276 2833{
e1cfb67a
PR
2834 u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
2835 u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
7c185276 2836 u32 i;
eb0eecc1
SK
2837 bool error_detected = false;
2838 struct device *dev = &adapter->pdev->dev;
2839 struct net_device *netdev = adapter->netdev;
7c185276 2840
d23e946c 2841 if (be_hw_error(adapter))
72f02485
SP
2842 return;
2843
e1cfb67a
PR
2844 if (lancer_chip(adapter)) {
2845 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2846 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2847 sliport_err1 = ioread32(adapter->db +
748b539a 2848 SLIPORT_ERROR1_OFFSET);
e1cfb67a 2849 sliport_err2 = ioread32(adapter->db +
748b539a 2850 SLIPORT_ERROR2_OFFSET);
eb0eecc1 2851 adapter->hw_error = true;
d0e1b319 2852 error_detected = true;
eb0eecc1
SK
2853 /* Do not log error messages if its a FW reset */
2854 if (sliport_err1 == SLIPORT_ERROR_FW_RESET1 &&
2855 sliport_err2 == SLIPORT_ERROR_FW_RESET2) {
2856 dev_info(dev, "Firmware update in progress\n");
2857 } else {
eb0eecc1
SK
2858 dev_err(dev, "Error detected in the card\n");
2859 dev_err(dev, "ERR: sliport status 0x%x\n",
2860 sliport_status);
2861 dev_err(dev, "ERR: sliport error1 0x%x\n",
2862 sliport_err1);
2863 dev_err(dev, "ERR: sliport error2 0x%x\n",
2864 sliport_err2);
2865 }
e1cfb67a
PR
2866 }
2867 } else {
2868 pci_read_config_dword(adapter->pdev,
748b539a 2869 PCICFG_UE_STATUS_LOW, &ue_lo);
e1cfb67a 2870 pci_read_config_dword(adapter->pdev,
748b539a 2871 PCICFG_UE_STATUS_HIGH, &ue_hi);
e1cfb67a 2872 pci_read_config_dword(adapter->pdev,
748b539a 2873 PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask);
e1cfb67a 2874 pci_read_config_dword(adapter->pdev,
748b539a 2875 PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
e1cfb67a 2876
f67ef7ba
PR
2877 ue_lo = (ue_lo & ~ue_lo_mask);
2878 ue_hi = (ue_hi & ~ue_hi_mask);
7c185276 2879
eb0eecc1
SK
2880 /* On certain platforms BE hardware can indicate spurious UEs.
2881 * Allow HW to stop working completely in case of a real UE.
2882 * Hence not setting the hw_error for UE detection.
2883 */
f67ef7ba 2884
eb0eecc1
SK
2885 if (ue_lo || ue_hi) {
2886 error_detected = true;
2887 dev_err(dev,
2888 "Unrecoverable Error detected in the adapter");
2889 dev_err(dev, "Please reboot server to recover");
2890 if (skyhawk_chip(adapter))
2891 adapter->hw_error = true;
2892 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
2893 if (ue_lo & 1)
2894 dev_err(dev, "UE: %s bit set\n",
2895 ue_status_low_desc[i]);
2896 }
2897 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
2898 if (ue_hi & 1)
2899 dev_err(dev, "UE: %s bit set\n",
2900 ue_status_hi_desc[i]);
2901 }
7c185276
AK
2902 }
2903 }
eb0eecc1
SK
2904 if (error_detected)
2905 netif_carrier_off(netdev);
7c185276
AK
2906}
2907
8d56ff11
SP
2908static void be_msix_disable(struct be_adapter *adapter)
2909{
ac6a0c4a 2910 if (msix_enabled(adapter)) {
8d56ff11 2911 pci_disable_msix(adapter->pdev);
ac6a0c4a 2912 adapter->num_msix_vec = 0;
68d7bdcb 2913 adapter->num_msix_roce_vec = 0;
3abcdeda
SP
2914 }
2915}
2916
c2bba3df 2917static int be_msix_enable(struct be_adapter *adapter)
6b7c5b94 2918{
7dc4c064 2919 int i, num_vec;
d379142b 2920 struct device *dev = &adapter->pdev->dev;
6b7c5b94 2921
92bf14ab
SP
2922 /* If RoCE is supported, program the max number of NIC vectors that
2923 * may be configured via set-channels, along with vectors needed for
2924 * RoCe. Else, just program the number we'll use initially.
2925 */
2926 if (be_roce_supported(adapter))
2927 num_vec = min_t(int, 2 * be_max_eqs(adapter),
2928 2 * num_online_cpus());
2929 else
2930 num_vec = adapter->cfg_num_qs;
3abcdeda 2931
ac6a0c4a 2932 for (i = 0; i < num_vec; i++)
6b7c5b94
SP
2933 adapter->msix_entries[i].entry = i;
2934
7dc4c064
AG
2935 num_vec = pci_enable_msix_range(adapter->pdev, adapter->msix_entries,
2936 MIN_MSIX_VECTORS, num_vec);
2937 if (num_vec < 0)
2938 goto fail;
92bf14ab 2939
92bf14ab
SP
2940 if (be_roce_supported(adapter) && num_vec > MIN_MSIX_VECTORS) {
2941 adapter->num_msix_roce_vec = num_vec / 2;
2942 dev_info(dev, "enabled %d MSI-x vector(s) for RoCE\n",
2943 adapter->num_msix_roce_vec);
2944 }
2945
2946 adapter->num_msix_vec = num_vec - adapter->num_msix_roce_vec;
2947
2948 dev_info(dev, "enabled %d MSI-x vector(s) for NIC\n",
2949 adapter->num_msix_vec);
c2bba3df 2950 return 0;
7dc4c064
AG
2951
2952fail:
2953 dev_warn(dev, "MSIx enable failed\n");
2954
2955 /* INTx is not supported in VFs, so fail probe if enable_msix fails */
2956 if (!be_physfn(adapter))
2957 return num_vec;
2958 return 0;
6b7c5b94
SP
2959}
2960
fe6d2a38 2961static inline int be_msix_vec_get(struct be_adapter *adapter,
748b539a 2962 struct be_eq_obj *eqo)
b628bde2 2963{
f2f781a7 2964 return adapter->msix_entries[eqo->msix_idx].vector;
b628bde2 2965}
6b7c5b94 2966
b628bde2
SP
2967static int be_msix_register(struct be_adapter *adapter)
2968{
10ef9ab4
SP
2969 struct net_device *netdev = adapter->netdev;
2970 struct be_eq_obj *eqo;
2971 int status, i, vec;
6b7c5b94 2972
10ef9ab4
SP
2973 for_all_evt_queues(adapter, eqo, i) {
2974 sprintf(eqo->desc, "%s-q%d", netdev->name, i);
2975 vec = be_msix_vec_get(adapter, eqo);
2976 status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
3abcdeda
SP
2977 if (status)
2978 goto err_msix;
2979 }
b628bde2 2980
6b7c5b94 2981 return 0;
3abcdeda 2982err_msix:
10ef9ab4
SP
2983 for (i--, eqo = &adapter->eq_obj[i]; i >= 0; i--, eqo--)
2984 free_irq(be_msix_vec_get(adapter, eqo), eqo);
2985 dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
748b539a 2986 status);
ac6a0c4a 2987 be_msix_disable(adapter);
6b7c5b94
SP
2988 return status;
2989}
2990
2991static int be_irq_register(struct be_adapter *adapter)
2992{
2993 struct net_device *netdev = adapter->netdev;
2994 int status;
2995
ac6a0c4a 2996 if (msix_enabled(adapter)) {
6b7c5b94
SP
2997 status = be_msix_register(adapter);
2998 if (status == 0)
2999 goto done;
ba343c77
SB
3000 /* INTx is not supported for VF */
3001 if (!be_physfn(adapter))
3002 return status;
6b7c5b94
SP
3003 }
3004
e49cc34f 3005 /* INTx: only the first EQ is used */
6b7c5b94
SP
3006 netdev->irq = adapter->pdev->irq;
3007 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
e49cc34f 3008 &adapter->eq_obj[0]);
6b7c5b94
SP
3009 if (status) {
3010 dev_err(&adapter->pdev->dev,
3011 "INTx request IRQ failed - err %d\n", status);
3012 return status;
3013 }
3014done:
3015 adapter->isr_registered = true;
3016 return 0;
3017}
3018
3019static void be_irq_unregister(struct be_adapter *adapter)
3020{
3021 struct net_device *netdev = adapter->netdev;
10ef9ab4 3022 struct be_eq_obj *eqo;
3abcdeda 3023 int i;
6b7c5b94
SP
3024
3025 if (!adapter->isr_registered)
3026 return;
3027
3028 /* INTx */
ac6a0c4a 3029 if (!msix_enabled(adapter)) {
e49cc34f 3030 free_irq(netdev->irq, &adapter->eq_obj[0]);
6b7c5b94
SP
3031 goto done;
3032 }
3033
3034 /* MSIx */
10ef9ab4
SP
3035 for_all_evt_queues(adapter, eqo, i)
3036 free_irq(be_msix_vec_get(adapter, eqo), eqo);
3abcdeda 3037
6b7c5b94
SP
3038done:
3039 adapter->isr_registered = false;
6b7c5b94
SP
3040}
3041
10ef9ab4 3042static void be_rx_qs_destroy(struct be_adapter *adapter)
482c9e79
SP
3043{
3044 struct be_queue_info *q;
3045 struct be_rx_obj *rxo;
3046 int i;
3047
3048 for_all_rx_queues(adapter, rxo, i) {
3049 q = &rxo->q;
3050 if (q->created) {
3051 be_cmd_rxq_destroy(adapter, q);
10ef9ab4 3052 be_rx_cq_clean(rxo);
482c9e79 3053 }
10ef9ab4 3054 be_queue_free(adapter, q);
482c9e79
SP
3055 }
3056}
3057
889cd4b2
SP
3058static int be_close(struct net_device *netdev)
3059{
3060 struct be_adapter *adapter = netdev_priv(netdev);
10ef9ab4
SP
3061 struct be_eq_obj *eqo;
3062 int i;
889cd4b2 3063
e1ad8e33
KA
3064 /* This protection is needed as be_close() may be called even when the
3065 * adapter is in cleared state (after eeh perm failure)
3066 */
3067 if (!(adapter->flags & BE_FLAGS_SETUP_DONE))
3068 return 0;
3069
045508a8
PP
3070 be_roce_dev_close(adapter);
3071
dff345c5
IV
3072 if (adapter->flags & BE_FLAGS_NAPI_ENABLED) {
3073 for_all_evt_queues(adapter, eqo, i) {
04d3d624 3074 napi_disable(&eqo->napi);
6384a4d0
SP
3075 be_disable_busy_poll(eqo);
3076 }
71237b6f 3077 adapter->flags &= ~BE_FLAGS_NAPI_ENABLED;
04d3d624 3078 }
a323d9bf
SP
3079
3080 be_async_mcc_disable(adapter);
3081
3082 /* Wait for all pending tx completions to arrive so that
3083 * all tx skbs are freed.
3084 */
fba87559 3085 netif_tx_disable(netdev);
6e1f9975 3086 be_tx_compl_clean(adapter);
a323d9bf
SP
3087
3088 be_rx_qs_destroy(adapter);
f66b7cfd 3089 be_clear_uc_list(adapter);
d11a347d 3090
a323d9bf 3091 for_all_evt_queues(adapter, eqo, i) {
10ef9ab4
SP
3092 if (msix_enabled(adapter))
3093 synchronize_irq(be_msix_vec_get(adapter, eqo));
3094 else
3095 synchronize_irq(netdev->irq);
3096 be_eq_clean(eqo);
63fcb27f
PR
3097 }
3098
889cd4b2
SP
3099 be_irq_unregister(adapter);
3100
482c9e79
SP
3101 return 0;
3102}
3103
10ef9ab4 3104static int be_rx_qs_create(struct be_adapter *adapter)
482c9e79 3105{
1dcf7b1c
ED
3106 struct rss_info *rss = &adapter->rss_info;
3107 u8 rss_key[RSS_HASH_KEY_LEN];
482c9e79 3108 struct be_rx_obj *rxo;
e9008ee9 3109 int rc, i, j;
482c9e79
SP
3110
3111 for_all_rx_queues(adapter, rxo, i) {
10ef9ab4
SP
3112 rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
3113 sizeof(struct be_eth_rx_d));
3114 if (rc)
3115 return rc;
3116 }
3117
71bb8bd0
VV
3118 if (adapter->need_def_rxq || !adapter->num_rss_qs) {
3119 rxo = default_rxo(adapter);
3120 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
3121 rx_frag_size, adapter->if_handle,
3122 false, &rxo->rss_id);
3123 if (rc)
3124 return rc;
3125 }
10ef9ab4
SP
3126
3127 for_all_rss_queues(adapter, rxo, i) {
482c9e79 3128 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
10ef9ab4
SP
3129 rx_frag_size, adapter->if_handle,
3130 true, &rxo->rss_id);
482c9e79
SP
3131 if (rc)
3132 return rc;
3133 }
3134
3135 if (be_multi_rxq(adapter)) {
71bb8bd0 3136 for (j = 0; j < RSS_INDIR_TABLE_LEN; j += adapter->num_rss_qs) {
e9008ee9 3137 for_all_rss_queues(adapter, rxo, i) {
e2557877 3138 if ((j + i) >= RSS_INDIR_TABLE_LEN)
e9008ee9 3139 break;
e2557877
VD
3140 rss->rsstable[j + i] = rxo->rss_id;
3141 rss->rss_queue[j + i] = i;
e9008ee9
PR
3142 }
3143 }
e2557877
VD
3144 rss->rss_flags = RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_IPV4 |
3145 RSS_ENABLE_TCP_IPV6 | RSS_ENABLE_IPV6;
594ad54a
SR
3146
3147 if (!BEx_chip(adapter))
e2557877
VD
3148 rss->rss_flags |= RSS_ENABLE_UDP_IPV4 |
3149 RSS_ENABLE_UDP_IPV6;
da1388d6
VV
3150 } else {
3151 /* Disable RSS, if only default RX Q is created */
e2557877 3152 rss->rss_flags = RSS_ENABLE_NONE;
da1388d6 3153 }
594ad54a 3154
1dcf7b1c 3155 netdev_rss_key_fill(rss_key, RSS_HASH_KEY_LEN);
748b539a 3156 rc = be_cmd_rss_config(adapter, rss->rsstable, rss->rss_flags,
1dcf7b1c 3157 128, rss_key);
da1388d6 3158 if (rc) {
e2557877 3159 rss->rss_flags = RSS_ENABLE_NONE;
da1388d6 3160 return rc;
482c9e79
SP
3161 }
3162
1dcf7b1c 3163 memcpy(rss->rss_hkey, rss_key, RSS_HASH_KEY_LEN);
e2557877 3164
482c9e79 3165 /* First time posting */
10ef9ab4 3166 for_all_rx_queues(adapter, rxo, i)
c30d7266 3167 be_post_rx_frags(rxo, GFP_KERNEL, MAX_RX_POST);
889cd4b2
SP
3168 return 0;
3169}
3170
6b7c5b94
SP
3171static int be_open(struct net_device *netdev)
3172{
3173 struct be_adapter *adapter = netdev_priv(netdev);
10ef9ab4 3174 struct be_eq_obj *eqo;
3abcdeda 3175 struct be_rx_obj *rxo;
10ef9ab4 3176 struct be_tx_obj *txo;
b236916a 3177 u8 link_status;
3abcdeda 3178 int status, i;
5fb379ee 3179
10ef9ab4 3180 status = be_rx_qs_create(adapter);
482c9e79
SP
3181 if (status)
3182 goto err;
3183
c2bba3df
SK
3184 status = be_irq_register(adapter);
3185 if (status)
3186 goto err;
5fb379ee 3187
10ef9ab4 3188 for_all_rx_queues(adapter, rxo, i)
3abcdeda 3189 be_cq_notify(adapter, rxo->cq.id, true, 0);
5fb379ee 3190
10ef9ab4
SP
3191 for_all_tx_queues(adapter, txo, i)
3192 be_cq_notify(adapter, txo->cq.id, true, 0);
3193
7a1e9b20
SP
3194 be_async_mcc_enable(adapter);
3195
10ef9ab4
SP
3196 for_all_evt_queues(adapter, eqo, i) {
3197 napi_enable(&eqo->napi);
6384a4d0 3198 be_enable_busy_poll(eqo);
4cad9f3b 3199 be_eq_notify(adapter, eqo->q.id, true, true, 0);
10ef9ab4 3200 }
04d3d624 3201 adapter->flags |= BE_FLAGS_NAPI_ENABLED;
10ef9ab4 3202
323ff71e 3203 status = be_cmd_link_status_query(adapter, NULL, &link_status, 0);
b236916a
AK
3204 if (!status)
3205 be_link_status_update(adapter, link_status);
3206
fba87559 3207 netif_tx_start_all_queues(netdev);
045508a8 3208 be_roce_dev_open(adapter);
c9c47142 3209
c5abe7c0 3210#ifdef CONFIG_BE2NET_VXLAN
c9c47142
SP
3211 if (skyhawk_chip(adapter))
3212 vxlan_get_rx_port(netdev);
c5abe7c0
SP
3213#endif
3214
889cd4b2
SP
3215 return 0;
3216err:
3217 be_close(adapter->netdev);
3218 return -EIO;
5fb379ee
SP
3219}
3220
71d8d1b5
AK
3221static int be_setup_wol(struct be_adapter *adapter, bool enable)
3222{
3223 struct be_dma_mem cmd;
3224 int status = 0;
3225 u8 mac[ETH_ALEN];
3226
c7bf7169 3227 eth_zero_addr(mac);
71d8d1b5
AK
3228
3229 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
ede23fa8
JP
3230 cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
3231 GFP_KERNEL);
ddf1169f 3232 if (!cmd.va)
6b568689 3233 return -ENOMEM;
71d8d1b5
AK
3234
3235 if (enable) {
3236 status = pci_write_config_dword(adapter->pdev,
748b539a
SP
3237 PCICFG_PM_CONTROL_OFFSET,
3238 PCICFG_PM_CONTROL_MASK);
71d8d1b5
AK
3239 if (status) {
3240 dev_err(&adapter->pdev->dev,
2381a55c 3241 "Could not enable Wake-on-lan\n");
2b7bcebf
IV
3242 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
3243 cmd.dma);
71d8d1b5
AK
3244 return status;
3245 }
3246 status = be_cmd_enable_magic_wol(adapter,
748b539a
SP
3247 adapter->netdev->dev_addr,
3248 &cmd);
71d8d1b5
AK
3249 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
3250 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
3251 } else {
3252 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
3253 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
3254 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
3255 }
3256
2b7bcebf 3257 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
71d8d1b5
AK
3258 return status;
3259}
3260
f7062ee5
SP
3261static void be_vf_eth_addr_generate(struct be_adapter *adapter, u8 *mac)
3262{
3263 u32 addr;
3264
3265 addr = jhash(adapter->netdev->dev_addr, ETH_ALEN, 0);
3266
3267 mac[5] = (u8)(addr & 0xFF);
3268 mac[4] = (u8)((addr >> 8) & 0xFF);
3269 mac[3] = (u8)((addr >> 16) & 0xFF);
3270 /* Use the OUI from the current MAC address */
3271 memcpy(mac, adapter->netdev->dev_addr, 3);
3272}
3273
6d87f5c3
AK
3274/*
3275 * Generate a seed MAC address from the PF MAC Address using jhash.
3276 * MAC Address for VFs are assigned incrementally starting from the seed.
3277 * These addresses are programmed in the ASIC by the PF and the VF driver
3278 * queries for the MAC address during its probe.
3279 */
4c876616 3280static int be_vf_eth_addr_config(struct be_adapter *adapter)
6d87f5c3 3281{
f9449ab7 3282 u32 vf;
3abcdeda 3283 int status = 0;
6d87f5c3 3284 u8 mac[ETH_ALEN];
11ac75ed 3285 struct be_vf_cfg *vf_cfg;
6d87f5c3
AK
3286
3287 be_vf_eth_addr_generate(adapter, mac);
3288
11ac75ed 3289 for_all_vfs(adapter, vf_cfg, vf) {
3175d8c2 3290 if (BEx_chip(adapter))
590c391d 3291 status = be_cmd_pmac_add(adapter, mac,
11ac75ed
SP
3292 vf_cfg->if_handle,
3293 &vf_cfg->pmac_id, vf + 1);
3175d8c2
SP
3294 else
3295 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
3296 vf + 1);
590c391d 3297
6d87f5c3
AK
3298 if (status)
3299 dev_err(&adapter->pdev->dev,
748b539a
SP
3300 "Mac address assignment failed for VF %d\n",
3301 vf);
6d87f5c3 3302 else
11ac75ed 3303 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
6d87f5c3
AK
3304
3305 mac[5] += 1;
3306 }
3307 return status;
3308}
3309
4c876616
SP
3310static int be_vfs_mac_query(struct be_adapter *adapter)
3311{
3312 int status, vf;
3313 u8 mac[ETH_ALEN];
3314 struct be_vf_cfg *vf_cfg;
4c876616
SP
3315
3316 for_all_vfs(adapter, vf_cfg, vf) {
b188f090
SR
3317 status = be_cmd_get_active_mac(adapter, vf_cfg->pmac_id,
3318 mac, vf_cfg->if_handle,
3319 false, vf+1);
4c876616
SP
3320 if (status)
3321 return status;
3322 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
3323 }
3324 return 0;
3325}
3326
f9449ab7 3327static void be_vf_clear(struct be_adapter *adapter)
6d87f5c3 3328{
11ac75ed 3329 struct be_vf_cfg *vf_cfg;
6d87f5c3
AK
3330 u32 vf;
3331
257a3feb 3332 if (pci_vfs_assigned(adapter->pdev)) {
4c876616
SP
3333 dev_warn(&adapter->pdev->dev,
3334 "VFs are assigned to VMs: not disabling VFs\n");
39f1d94d
SP
3335 goto done;
3336 }
3337
b4c1df93
SP
3338 pci_disable_sriov(adapter->pdev);
3339
11ac75ed 3340 for_all_vfs(adapter, vf_cfg, vf) {
3175d8c2 3341 if (BEx_chip(adapter))
11ac75ed
SP
3342 be_cmd_pmac_del(adapter, vf_cfg->if_handle,
3343 vf_cfg->pmac_id, vf + 1);
3175d8c2
SP
3344 else
3345 be_cmd_set_mac(adapter, NULL, vf_cfg->if_handle,
3346 vf + 1);
f9449ab7 3347
11ac75ed
SP
3348 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
3349 }
39f1d94d
SP
3350done:
3351 kfree(adapter->vf_cfg);
3352 adapter->num_vfs = 0;
f174c7ec 3353 adapter->flags &= ~BE_FLAGS_SRIOV_ENABLED;
6d87f5c3
AK
3354}
3355
7707133c
SP
3356static void be_clear_queues(struct be_adapter *adapter)
3357{
3358 be_mcc_queues_destroy(adapter);
3359 be_rx_cqs_destroy(adapter);
3360 be_tx_queues_destroy(adapter);
3361 be_evt_queues_destroy(adapter);
3362}
3363
68d7bdcb 3364static void be_cancel_worker(struct be_adapter *adapter)
a54769f5 3365{
191eb756
SP
3366 if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) {
3367 cancel_delayed_work_sync(&adapter->work);
3368 adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED;
3369 }
68d7bdcb
SP
3370}
3371
eb7dd46c
SP
3372static void be_cancel_err_detection(struct be_adapter *adapter)
3373{
3374 if (adapter->flags & BE_FLAGS_ERR_DETECTION_SCHEDULED) {
3375 cancel_delayed_work_sync(&adapter->be_err_detection_work);
3376 adapter->flags &= ~BE_FLAGS_ERR_DETECTION_SCHEDULED;
3377 }
3378}
3379
b05004ad 3380static void be_mac_clear(struct be_adapter *adapter)
68d7bdcb 3381{
b05004ad 3382 if (adapter->pmac_id) {
f66b7cfd
SP
3383 be_cmd_pmac_del(adapter, adapter->if_handle,
3384 adapter->pmac_id[0], 0);
b05004ad
SK
3385 kfree(adapter->pmac_id);
3386 adapter->pmac_id = NULL;
3387 }
3388}
3389
c5abe7c0 3390#ifdef CONFIG_BE2NET_VXLAN
c9c47142
SP
3391static void be_disable_vxlan_offloads(struct be_adapter *adapter)
3392{
630f4b70
SB
3393 struct net_device *netdev = adapter->netdev;
3394
c9c47142
SP
3395 if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS)
3396 be_cmd_manage_iface(adapter, adapter->if_handle,
3397 OP_CONVERT_TUNNEL_TO_NORMAL);
3398
3399 if (adapter->vxlan_port)
3400 be_cmd_set_vxlan_port(adapter, 0);
3401
3402 adapter->flags &= ~BE_FLAGS_VXLAN_OFFLOADS;
3403 adapter->vxlan_port = 0;
630f4b70
SB
3404
3405 netdev->hw_enc_features = 0;
3406 netdev->hw_features &= ~(NETIF_F_GSO_UDP_TUNNEL);
ac9a3d84 3407 netdev->features &= ~(NETIF_F_GSO_UDP_TUNNEL);
c9c47142 3408}
c5abe7c0 3409#endif
c9c47142 3410
f2858738
VV
3411static u16 be_calculate_vf_qs(struct be_adapter *adapter, u16 num_vfs)
3412{
3413 struct be_resources res = adapter->pool_res;
3414 u16 num_vf_qs = 1;
3415
3416 /* Distribute the queue resources equally among the PF and it's VFs
3417 * Do not distribute queue resources in multi-channel configuration.
3418 */
3419 if (num_vfs && !be_is_mc(adapter)) {
3420 /* If number of VFs requested is 8 less than max supported,
3421 * assign 8 queue pairs to the PF and divide the remaining
3422 * resources evenly among the VFs
3423 */
3424 if (num_vfs < (be_max_vfs(adapter) - 8))
3425 num_vf_qs = (res.max_rss_qs - 8) / num_vfs;
3426 else
3427 num_vf_qs = res.max_rss_qs / num_vfs;
3428
3429 /* Skyhawk-R chip supports only MAX_RSS_IFACES RSS capable
3430 * interfaces per port. Provide RSS on VFs, only if number
3431 * of VFs requested is less than MAX_RSS_IFACES limit.
3432 */
3433 if (num_vfs >= MAX_RSS_IFACES)
3434 num_vf_qs = 1;
3435 }
3436 return num_vf_qs;
3437}
3438
b05004ad
SK
3439static int be_clear(struct be_adapter *adapter)
3440{
f2858738
VV
3441 struct pci_dev *pdev = adapter->pdev;
3442 u16 num_vf_qs;
3443
68d7bdcb 3444 be_cancel_worker(adapter);
191eb756 3445
11ac75ed 3446 if (sriov_enabled(adapter))
f9449ab7
SP
3447 be_vf_clear(adapter);
3448
bec84e6b
VV
3449 /* Re-configure FW to distribute resources evenly across max-supported
3450 * number of VFs, only when VFs are not already enabled.
3451 */
f2858738
VV
3452 if (be_physfn(adapter) && !pci_vfs_assigned(pdev)) {
3453 num_vf_qs = be_calculate_vf_qs(adapter,
3454 pci_sriov_get_totalvfs(pdev));
bec84e6b 3455 be_cmd_set_sriov_config(adapter, adapter->pool_res,
f2858738
VV
3456 pci_sriov_get_totalvfs(pdev),
3457 num_vf_qs);
3458 }
bec84e6b 3459
c5abe7c0 3460#ifdef CONFIG_BE2NET_VXLAN
c9c47142 3461 be_disable_vxlan_offloads(adapter);
c5abe7c0 3462#endif
2d17f403 3463 /* delete the primary mac along with the uc-mac list */
b05004ad 3464 be_mac_clear(adapter);
fbc13f01 3465
f9449ab7 3466 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
a54769f5 3467
7707133c 3468 be_clear_queues(adapter);
a54769f5 3469
10ef9ab4 3470 be_msix_disable(adapter);
e1ad8e33 3471 adapter->flags &= ~BE_FLAGS_SETUP_DONE;
a54769f5
SP
3472 return 0;
3473}
3474
0700d816
KA
3475static int be_if_create(struct be_adapter *adapter, u32 *if_handle,
3476 u32 cap_flags, u32 vf)
3477{
3478 u32 en_flags;
3479 int status;
3480
3481 en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
3482 BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS |
71bb8bd0 3483 BE_IF_FLAGS_RSS | BE_IF_FLAGS_DEFQ_RSS;
0700d816
KA
3484
3485 en_flags &= cap_flags;
3486
3487 status = be_cmd_if_create(adapter, cap_flags, en_flags,
3488 if_handle, vf);
3489
3490 return status;
3491}
3492
4c876616 3493static int be_vfs_if_create(struct be_adapter *adapter)
abb93951 3494{
92bf14ab 3495 struct be_resources res = {0};
4c876616 3496 struct be_vf_cfg *vf_cfg;
0700d816
KA
3497 u32 cap_flags, vf;
3498 int status;
abb93951 3499
0700d816 3500 /* If a FW profile exists, then cap_flags are updated */
4c876616
SP
3501 cap_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
3502 BE_IF_FLAGS_MULTICAST;
abb93951 3503
4c876616 3504 for_all_vfs(adapter, vf_cfg, vf) {
92bf14ab
SP
3505 if (!BE3_chip(adapter)) {
3506 status = be_cmd_get_profile_config(adapter, &res,
f2858738 3507 RESOURCE_LIMITS,
92bf14ab
SP
3508 vf + 1);
3509 if (!status)
3510 cap_flags = res.if_cap_flags;
3511 }
4c876616 3512
0700d816
KA
3513 status = be_if_create(adapter, &vf_cfg->if_handle,
3514 cap_flags, vf + 1);
4c876616 3515 if (status)
0700d816 3516 return status;
4c876616 3517 }
0700d816
KA
3518
3519 return 0;
abb93951
PR
3520}
3521
39f1d94d 3522static int be_vf_setup_init(struct be_adapter *adapter)
30128031 3523{
11ac75ed 3524 struct be_vf_cfg *vf_cfg;
30128031
SP
3525 int vf;
3526
39f1d94d
SP
3527 adapter->vf_cfg = kcalloc(adapter->num_vfs, sizeof(*vf_cfg),
3528 GFP_KERNEL);
3529 if (!adapter->vf_cfg)
3530 return -ENOMEM;
3531
11ac75ed
SP
3532 for_all_vfs(adapter, vf_cfg, vf) {
3533 vf_cfg->if_handle = -1;
3534 vf_cfg->pmac_id = -1;
30128031 3535 }
39f1d94d 3536 return 0;
30128031
SP
3537}
3538
f9449ab7
SP
3539static int be_vf_setup(struct be_adapter *adapter)
3540{
c502224e 3541 struct device *dev = &adapter->pdev->dev;
11ac75ed 3542 struct be_vf_cfg *vf_cfg;
4c876616 3543 int status, old_vfs, vf;
04a06028 3544 u32 privileges;
39f1d94d 3545
257a3feb 3546 old_vfs = pci_num_vf(adapter->pdev);
39f1d94d
SP
3547
3548 status = be_vf_setup_init(adapter);
3549 if (status)
3550 goto err;
30128031 3551
4c876616
SP
3552 if (old_vfs) {
3553 for_all_vfs(adapter, vf_cfg, vf) {
3554 status = be_cmd_get_if_id(adapter, vf_cfg, vf);
3555 if (status)
3556 goto err;
3557 }
f9449ab7 3558
4c876616
SP
3559 status = be_vfs_mac_query(adapter);
3560 if (status)
3561 goto err;
3562 } else {
bec84e6b
VV
3563 status = be_vfs_if_create(adapter);
3564 if (status)
3565 goto err;
3566
39f1d94d
SP
3567 status = be_vf_eth_addr_config(adapter);
3568 if (status)
3569 goto err;
3570 }
f9449ab7 3571
11ac75ed 3572 for_all_vfs(adapter, vf_cfg, vf) {
04a06028
SP
3573 /* Allow VFs to programs MAC/VLAN filters */
3574 status = be_cmd_get_fn_privileges(adapter, &privileges, vf + 1);
3575 if (!status && !(privileges & BE_PRIV_FILTMGMT)) {
3576 status = be_cmd_set_fn_privileges(adapter,
3577 privileges |
3578 BE_PRIV_FILTMGMT,
3579 vf + 1);
3580 if (!status)
3581 dev_info(dev, "VF%d has FILTMGMT privilege\n",
3582 vf);
3583 }
3584
0f77ba73
RN
3585 /* Allow full available bandwidth */
3586 if (!old_vfs)
3587 be_cmd_config_qos(adapter, 0, 0, vf + 1);
f1f3ee1b 3588
bdce2ad7 3589 if (!old_vfs) {
0599863d 3590 be_cmd_enable_vf(adapter, vf + 1);
bdce2ad7
SR
3591 be_cmd_set_logical_link_config(adapter,
3592 IFLA_VF_LINK_STATE_AUTO,
3593 vf+1);
3594 }
f9449ab7 3595 }
b4c1df93
SP
3596
3597 if (!old_vfs) {
3598 status = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
3599 if (status) {
3600 dev_err(dev, "SRIOV enable failed\n");
3601 adapter->num_vfs = 0;
3602 goto err;
3603 }
3604 }
f174c7ec
VV
3605
3606 adapter->flags |= BE_FLAGS_SRIOV_ENABLED;
f9449ab7
SP
3607 return 0;
3608err:
4c876616
SP
3609 dev_err(dev, "VF setup failed\n");
3610 be_vf_clear(adapter);
f9449ab7
SP
3611 return status;
3612}
3613
f93f160b
VV
3614/* Converting function_mode bits on BE3 to SH mc_type enums */
3615
3616static u8 be_convert_mc_type(u32 function_mode)
3617{
66064dbc 3618 if (function_mode & VNIC_MODE && function_mode & QNQ_MODE)
f93f160b 3619 return vNIC1;
66064dbc 3620 else if (function_mode & QNQ_MODE)
f93f160b
VV
3621 return FLEX10;
3622 else if (function_mode & VNIC_MODE)
3623 return vNIC2;
3624 else if (function_mode & UMC_ENABLED)
3625 return UMC;
3626 else
3627 return MC_NONE;
3628}
3629
92bf14ab
SP
3630/* On BE2/BE3 FW does not suggest the supported limits */
3631static void BEx_get_resources(struct be_adapter *adapter,
3632 struct be_resources *res)
3633{
bec84e6b 3634 bool use_sriov = adapter->num_vfs ? 1 : 0;
92bf14ab
SP
3635
3636 if (be_physfn(adapter))
3637 res->max_uc_mac = BE_UC_PMAC_COUNT;
3638 else
3639 res->max_uc_mac = BE_VF_UC_PMAC_COUNT;
3640
f93f160b
VV
3641 adapter->mc_type = be_convert_mc_type(adapter->function_mode);
3642
3643 if (be_is_mc(adapter)) {
3644 /* Assuming that there are 4 channels per port,
3645 * when multi-channel is enabled
3646 */
3647 if (be_is_qnq_mode(adapter))
3648 res->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
3649 else
3650 /* In a non-qnq multichannel mode, the pvid
3651 * takes up one vlan entry
3652 */
3653 res->max_vlans = (BE_NUM_VLANS_SUPPORTED / 4) - 1;
3654 } else {
92bf14ab 3655 res->max_vlans = BE_NUM_VLANS_SUPPORTED;
f93f160b
VV
3656 }
3657
92bf14ab
SP
3658 res->max_mcast_mac = BE_MAX_MC;
3659
a5243dab
VV
3660 /* 1) For BE3 1Gb ports, FW does not support multiple TXQs
3661 * 2) Create multiple TX rings on a BE3-R multi-channel interface
3662 * *only* if it is RSS-capable.
3663 */
3664 if (BE2_chip(adapter) || use_sriov || (adapter->port_num > 1) ||
3665 !be_physfn(adapter) || (be_is_mc(adapter) &&
a28277dc 3666 !(adapter->function_caps & BE_FUNCTION_CAPS_RSS))) {
92bf14ab 3667 res->max_tx_qs = 1;
a28277dc
SR
3668 } else if (adapter->function_caps & BE_FUNCTION_CAPS_SUPER_NIC) {
3669 struct be_resources super_nic_res = {0};
3670
3671 /* On a SuperNIC profile, the driver needs to use the
3672 * GET_PROFILE_CONFIG cmd to query the per-function TXQ limits
3673 */
f2858738
VV
3674 be_cmd_get_profile_config(adapter, &super_nic_res,
3675 RESOURCE_LIMITS, 0);
a28277dc
SR
3676 /* Some old versions of BE3 FW don't report max_tx_qs value */
3677 res->max_tx_qs = super_nic_res.max_tx_qs ? : BE3_MAX_TX_QS;
3678 } else {
92bf14ab 3679 res->max_tx_qs = BE3_MAX_TX_QS;
a28277dc 3680 }
92bf14ab
SP
3681
3682 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
3683 !use_sriov && be_physfn(adapter))
3684 res->max_rss_qs = (adapter->be3_native) ?
3685 BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
3686 res->max_rx_qs = res->max_rss_qs + 1;
3687
e3dc867c 3688 if (be_physfn(adapter))
d3518e21 3689 res->max_evt_qs = (be_max_vfs(adapter) > 0) ?
e3dc867c
SR
3690 BE3_SRIOV_MAX_EVT_QS : BE3_MAX_EVT_QS;
3691 else
3692 res->max_evt_qs = 1;
92bf14ab
SP
3693
3694 res->if_cap_flags = BE_IF_CAP_FLAGS_WANT;
71bb8bd0 3695 res->if_cap_flags &= ~BE_IF_FLAGS_DEFQ_RSS;
92bf14ab
SP
3696 if (!(adapter->function_caps & BE_FUNCTION_CAPS_RSS))
3697 res->if_cap_flags &= ~BE_IF_FLAGS_RSS;
3698}
3699
30128031
SP
3700static void be_setup_init(struct be_adapter *adapter)
3701{
3702 adapter->vlan_prio_bmap = 0xff;
42f11cf2 3703 adapter->phy.link_speed = -1;
30128031
SP
3704 adapter->if_handle = -1;
3705 adapter->be3_native = false;
f66b7cfd 3706 adapter->if_flags = 0;
f25b119c
PR
3707 if (be_physfn(adapter))
3708 adapter->cmd_privileges = MAX_PRIVILEGES;
3709 else
3710 adapter->cmd_privileges = MIN_PRIVILEGES;
30128031
SP
3711}
3712
bec84e6b
VV
3713static int be_get_sriov_config(struct be_adapter *adapter)
3714{
3715 struct device *dev = &adapter->pdev->dev;
3716 struct be_resources res = {0};
d3d18312 3717 int max_vfs, old_vfs;
bec84e6b
VV
3718
3719 /* Some old versions of BE3 FW don't report max_vfs value */
f2858738 3720 be_cmd_get_profile_config(adapter, &res, RESOURCE_LIMITS, 0);
d3d18312 3721
bec84e6b
VV
3722 if (BE3_chip(adapter) && !res.max_vfs) {
3723 max_vfs = pci_sriov_get_totalvfs(adapter->pdev);
3724 res.max_vfs = max_vfs > 0 ? min(MAX_VFS, max_vfs) : 0;
3725 }
3726
d3d18312 3727 adapter->pool_res = res;
bec84e6b
VV
3728
3729 if (!be_max_vfs(adapter)) {
3730 if (num_vfs)
50762667 3731 dev_warn(dev, "SRIOV is disabled. Ignoring num_vfs\n");
bec84e6b
VV
3732 adapter->num_vfs = 0;
3733 return 0;
3734 }
3735
d3d18312
SP
3736 pci_sriov_set_totalvfs(adapter->pdev, be_max_vfs(adapter));
3737
bec84e6b
VV
3738 /* validate num_vfs module param */
3739 old_vfs = pci_num_vf(adapter->pdev);
3740 if (old_vfs) {
3741 dev_info(dev, "%d VFs are already enabled\n", old_vfs);
3742 if (old_vfs != num_vfs)
3743 dev_warn(dev, "Ignoring num_vfs=%d setting\n", num_vfs);
3744 adapter->num_vfs = old_vfs;
3745 } else {
3746 if (num_vfs > be_max_vfs(adapter)) {
3747 dev_info(dev, "Resources unavailable to init %d VFs\n",
3748 num_vfs);
3749 dev_info(dev, "Limiting to %d VFs\n",
3750 be_max_vfs(adapter));
3751 }
3752 adapter->num_vfs = min_t(u16, num_vfs, be_max_vfs(adapter));
3753 }
3754
3755 return 0;
3756}
3757
92bf14ab 3758static int be_get_resources(struct be_adapter *adapter)
abb93951 3759{
92bf14ab
SP
3760 struct device *dev = &adapter->pdev->dev;
3761 struct be_resources res = {0};
3762 int status;
abb93951 3763
92bf14ab
SP
3764 if (BEx_chip(adapter)) {
3765 BEx_get_resources(adapter, &res);
3766 adapter->res = res;
abb93951
PR
3767 }
3768
92bf14ab
SP
3769 /* For Lancer, SH etc read per-function resource limits from FW.
3770 * GET_FUNC_CONFIG returns per function guaranteed limits.
3771 * GET_PROFILE_CONFIG returns PCI-E related limits PF-pool limits
3772 */
3773 if (!BEx_chip(adapter)) {
3774 status = be_cmd_get_func_config(adapter, &res);
3775 if (status)
3776 return status;
abb93951 3777
71bb8bd0
VV
3778 /* If a deafault RXQ must be created, we'll use up one RSSQ*/
3779 if (res.max_rss_qs && res.max_rss_qs == res.max_rx_qs &&
3780 !(res.if_cap_flags & BE_IF_FLAGS_DEFQ_RSS))
3781 res.max_rss_qs -= 1;
3782
92bf14ab
SP
3783 /* If RoCE may be enabled stash away half the EQs for RoCE */
3784 if (be_roce_supported(adapter))
3785 res.max_evt_qs /= 2;
3786 adapter->res = res;
abb93951 3787 }
4c876616 3788
71bb8bd0
VV
3789 /* If FW supports RSS default queue, then skip creating non-RSS
3790 * queue for non-IP traffic.
3791 */
3792 adapter->need_def_rxq = (be_if_cap_flags(adapter) &
3793 BE_IF_FLAGS_DEFQ_RSS) ? 0 : 1;
3794
acbafeb1
SP
3795 dev_info(dev, "Max: txqs %d, rxqs %d, rss %d, eqs %d, vfs %d\n",
3796 be_max_txqs(adapter), be_max_rxqs(adapter),
3797 be_max_rss(adapter), be_max_eqs(adapter),
3798 be_max_vfs(adapter));
3799 dev_info(dev, "Max: uc-macs %d, mc-macs %d, vlans %d\n",
3800 be_max_uc(adapter), be_max_mc(adapter),
3801 be_max_vlans(adapter));
3802
92bf14ab 3803 return 0;
abb93951
PR
3804}
3805
d3d18312
SP
3806static void be_sriov_config(struct be_adapter *adapter)
3807{
3808 struct device *dev = &adapter->pdev->dev;
f2858738 3809 u16 num_vf_qs;
d3d18312
SP
3810 int status;
3811
3812 status = be_get_sriov_config(adapter);
3813 if (status) {
3814 dev_err(dev, "Failed to query SR-IOV configuration\n");
3815 dev_err(dev, "SR-IOV cannot be enabled\n");
3816 return;
3817 }
3818
3819 /* When the HW is in SRIOV capable configuration, the PF-pool
3820 * resources are equally distributed across the max-number of
3821 * VFs. The user may request only a subset of the max-vfs to be
3822 * enabled. Based on num_vfs, redistribute the resources across
3823 * num_vfs so that each VF will have access to more number of
3824 * resources. This facility is not available in BE3 FW.
3825 * Also, this is done by FW in Lancer chip.
3826 */
3827 if (be_max_vfs(adapter) && !pci_num_vf(adapter->pdev)) {
f2858738 3828 num_vf_qs = be_calculate_vf_qs(adapter, adapter->num_vfs);
d3d18312
SP
3829 status = be_cmd_set_sriov_config(adapter,
3830 adapter->pool_res,
f2858738 3831 adapter->num_vfs, num_vf_qs);
d3d18312
SP
3832 if (status)
3833 dev_err(dev, "Failed to optimize SR-IOV resources\n");
3834 }
3835}
3836
39f1d94d
SP
3837static int be_get_config(struct be_adapter *adapter)
3838{
6b085ba9 3839 int status, level;
542963b7 3840 u16 profile_id;
6b085ba9
SP
3841
3842 status = be_cmd_get_cntl_attributes(adapter);
3843 if (status)
3844 return status;
39f1d94d 3845
e97e3cda 3846 status = be_cmd_query_fw_cfg(adapter);
abb93951 3847 if (status)
92bf14ab 3848 return status;
abb93951 3849
6b085ba9
SP
3850 if (BEx_chip(adapter)) {
3851 level = be_cmd_get_fw_log_level(adapter);
3852 adapter->msg_enable =
3853 level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0;
3854 }
3855
3856 be_cmd_get_acpi_wol_cap(adapter);
3857
21252377
VV
3858 be_cmd_query_port_name(adapter);
3859
3860 if (be_physfn(adapter)) {
542963b7
VV
3861 status = be_cmd_get_active_profile(adapter, &profile_id);
3862 if (!status)
3863 dev_info(&adapter->pdev->dev,
3864 "Using profile 0x%x\n", profile_id);
962bcb75 3865 }
bec84e6b 3866
d3d18312
SP
3867 if (!BE2_chip(adapter) && be_physfn(adapter))
3868 be_sriov_config(adapter);
542963b7 3869
92bf14ab
SP
3870 status = be_get_resources(adapter);
3871 if (status)
3872 return status;
abb93951 3873
46ee9c14
RN
3874 adapter->pmac_id = kcalloc(be_max_uc(adapter),
3875 sizeof(*adapter->pmac_id), GFP_KERNEL);
92bf14ab
SP
3876 if (!adapter->pmac_id)
3877 return -ENOMEM;
abb93951 3878
92bf14ab
SP
3879 /* Sanitize cfg_num_qs based on HW and platform limits */
3880 adapter->cfg_num_qs = min(adapter->cfg_num_qs, be_max_qs(adapter));
3881
3882 return 0;
39f1d94d
SP
3883}
3884
95046b92
SP
3885static int be_mac_setup(struct be_adapter *adapter)
3886{
3887 u8 mac[ETH_ALEN];
3888 int status;
3889
3890 if (is_zero_ether_addr(adapter->netdev->dev_addr)) {
3891 status = be_cmd_get_perm_mac(adapter, mac);
3892 if (status)
3893 return status;
3894
3895 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
3896 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
3897 } else {
3898 /* Maybe the HW was reset; dev_addr must be re-programmed */
3899 memcpy(mac, adapter->netdev->dev_addr, ETH_ALEN);
3900 }
3901
2c7a9dc1
AK
3902 /* For BE3-R VFs, the PF programs the initial MAC address */
3903 if (!(BEx_chip(adapter) && be_virtfn(adapter)))
3904 be_cmd_pmac_add(adapter, mac, adapter->if_handle,
3905 &adapter->pmac_id[0], 0);
95046b92
SP
3906 return 0;
3907}
3908
68d7bdcb
SP
3909static void be_schedule_worker(struct be_adapter *adapter)
3910{
3911 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
3912 adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
3913}
3914
eb7dd46c
SP
3915static void be_schedule_err_detection(struct be_adapter *adapter)
3916{
3917 schedule_delayed_work(&adapter->be_err_detection_work,
3918 msecs_to_jiffies(1000));
3919 adapter->flags |= BE_FLAGS_ERR_DETECTION_SCHEDULED;
3920}
3921
7707133c 3922static int be_setup_queues(struct be_adapter *adapter)
5fb379ee 3923{
68d7bdcb 3924 struct net_device *netdev = adapter->netdev;
10ef9ab4 3925 int status;
ba343c77 3926
7707133c 3927 status = be_evt_queues_create(adapter);
abb93951
PR
3928 if (status)
3929 goto err;
73d540f2 3930
7707133c 3931 status = be_tx_qs_create(adapter);
c2bba3df
SK
3932 if (status)
3933 goto err;
10ef9ab4 3934
7707133c 3935 status = be_rx_cqs_create(adapter);
10ef9ab4 3936 if (status)
a54769f5 3937 goto err;
6b7c5b94 3938
7707133c 3939 status = be_mcc_queues_create(adapter);
10ef9ab4
SP
3940 if (status)
3941 goto err;
3942
68d7bdcb
SP
3943 status = netif_set_real_num_rx_queues(netdev, adapter->num_rx_qs);
3944 if (status)
3945 goto err;
3946
3947 status = netif_set_real_num_tx_queues(netdev, adapter->num_tx_qs);
3948 if (status)
3949 goto err;
3950
7707133c
SP
3951 return 0;
3952err:
3953 dev_err(&adapter->pdev->dev, "queue_setup failed\n");
3954 return status;
3955}
3956
68d7bdcb
SP
3957int be_update_queues(struct be_adapter *adapter)
3958{
3959 struct net_device *netdev = adapter->netdev;
3960 int status;
3961
3962 if (netif_running(netdev))
3963 be_close(netdev);
3964
3965 be_cancel_worker(adapter);
3966
3967 /* If any vectors have been shared with RoCE we cannot re-program
3968 * the MSIx table.
3969 */
3970 if (!adapter->num_msix_roce_vec)
3971 be_msix_disable(adapter);
3972
3973 be_clear_queues(adapter);
3974
3975 if (!msix_enabled(adapter)) {
3976 status = be_msix_enable(adapter);
3977 if (status)
3978 return status;
3979 }
3980
3981 status = be_setup_queues(adapter);
3982 if (status)
3983 return status;
3984
3985 be_schedule_worker(adapter);
3986
3987 if (netif_running(netdev))
3988 status = be_open(netdev);
3989
3990 return status;
3991}
3992
f7062ee5
SP
3993static inline int fw_major_num(const char *fw_ver)
3994{
3995 int fw_major = 0, i;
3996
3997 i = sscanf(fw_ver, "%d.", &fw_major);
3998 if (i != 1)
3999 return 0;
4000
4001 return fw_major;
4002}
4003
f962f840
SP
4004/* If any VFs are already enabled don't FLR the PF */
4005static bool be_reset_required(struct be_adapter *adapter)
4006{
4007 return pci_num_vf(adapter->pdev) ? false : true;
4008}
4009
4010/* Wait for the FW to be ready and perform the required initialization */
4011static int be_func_init(struct be_adapter *adapter)
4012{
4013 int status;
4014
4015 status = be_fw_wait_ready(adapter);
4016 if (status)
4017 return status;
4018
4019 if (be_reset_required(adapter)) {
4020 status = be_cmd_reset_function(adapter);
4021 if (status)
4022 return status;
4023
4024 /* Wait for interrupts to quiesce after an FLR */
4025 msleep(100);
4026
4027 /* We can clear all errors when function reset succeeds */
4028 be_clear_all_error(adapter);
4029 }
4030
4031 /* Tell FW we're ready to fire cmds */
4032 status = be_cmd_fw_init(adapter);
4033 if (status)
4034 return status;
4035
4036 /* Allow interrupts for other ULPs running on NIC function */
4037 be_intr_set(adapter, true);
4038
4039 return 0;
4040}
4041
7707133c
SP
4042static int be_setup(struct be_adapter *adapter)
4043{
4044 struct device *dev = &adapter->pdev->dev;
7707133c
SP
4045 int status;
4046
f962f840
SP
4047 status = be_func_init(adapter);
4048 if (status)
4049 return status;
4050
7707133c
SP
4051 be_setup_init(adapter);
4052
4053 if (!lancer_chip(adapter))
4054 be_cmd_req_native_mode(adapter);
4055
4056 status = be_get_config(adapter);
10ef9ab4 4057 if (status)
a54769f5 4058 goto err;
6b7c5b94 4059
7707133c 4060 status = be_msix_enable(adapter);
10ef9ab4 4061 if (status)
a54769f5 4062 goto err;
6b7c5b94 4063
0700d816
KA
4064 status = be_if_create(adapter, &adapter->if_handle,
4065 be_if_cap_flags(adapter), 0);
7707133c 4066 if (status)
a54769f5 4067 goto err;
6b7c5b94 4068
68d7bdcb
SP
4069 /* Updating real_num_tx/rx_queues() requires rtnl_lock() */
4070 rtnl_lock();
7707133c 4071 status = be_setup_queues(adapter);
68d7bdcb 4072 rtnl_unlock();
95046b92 4073 if (status)
1578e777
PR
4074 goto err;
4075
7707133c 4076 be_cmd_get_fn_privileges(adapter, &adapter->cmd_privileges, 0);
7707133c
SP
4077
4078 status = be_mac_setup(adapter);
10ef9ab4
SP
4079 if (status)
4080 goto err;
4081
e97e3cda 4082 be_cmd_get_fw_ver(adapter);
acbafeb1 4083 dev_info(dev, "FW version is %s\n", adapter->fw_ver);
5a56eb10 4084
e9e2a904 4085 if (BE2_chip(adapter) && fw_major_num(adapter->fw_ver) < 4) {
50762667 4086 dev_err(dev, "Firmware on card is old(%s), IRQs may not work",
e9e2a904
SK
4087 adapter->fw_ver);
4088 dev_err(dev, "Please upgrade firmware to version >= 4.0\n");
4089 }
4090
1d1e9a46 4091 if (adapter->vlans_added)
10329df8 4092 be_vid_config(adapter);
7ab8b0b4 4093
a54769f5 4094 be_set_rx_mode(adapter->netdev);
5fb379ee 4095
00d594c3
KA
4096 status = be_cmd_set_flow_control(adapter, adapter->tx_fc,
4097 adapter->rx_fc);
4098 if (status)
4099 be_cmd_get_flow_control(adapter, &adapter->tx_fc,
4100 &adapter->rx_fc);
590c391d 4101
00d594c3
KA
4102 dev_info(&adapter->pdev->dev, "HW Flow control - TX:%d RX:%d\n",
4103 adapter->tx_fc, adapter->rx_fc);
2dc1deb6 4104
bdce2ad7
SR
4105 if (be_physfn(adapter))
4106 be_cmd_set_logical_link_config(adapter,
4107 IFLA_VF_LINK_STATE_AUTO, 0);
4108
bec84e6b
VV
4109 if (adapter->num_vfs)
4110 be_vf_setup(adapter);
f9449ab7 4111
f25b119c
PR
4112 status = be_cmd_get_phy_info(adapter);
4113 if (!status && be_pause_supported(adapter))
42f11cf2
AK
4114 adapter->phy.fc_autoneg = 1;
4115
68d7bdcb 4116 be_schedule_worker(adapter);
e1ad8e33 4117 adapter->flags |= BE_FLAGS_SETUP_DONE;
f9449ab7 4118 return 0;
a54769f5
SP
4119err:
4120 be_clear(adapter);
4121 return status;
4122}
6b7c5b94 4123
66268739
IV
4124#ifdef CONFIG_NET_POLL_CONTROLLER
4125static void be_netpoll(struct net_device *netdev)
4126{
4127 struct be_adapter *adapter = netdev_priv(netdev);
10ef9ab4 4128 struct be_eq_obj *eqo;
66268739
IV
4129 int i;
4130
e49cc34f
SP
4131 for_all_evt_queues(adapter, eqo, i) {
4132 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
4133 napi_schedule(&eqo->napi);
4134 }
66268739
IV
4135}
4136#endif
4137
96c9b2e4 4138static char flash_cookie[2][16] = {"*** SE FLAS", "H DIRECTORY *** "};
fa9a6fed 4139
306f1348
SP
4140static bool phy_flashing_required(struct be_adapter *adapter)
4141{
e02cfd96 4142 return (adapter->phy.phy_type == PHY_TYPE_TN_8022 &&
42f11cf2 4143 adapter->phy.interface_type == PHY_TYPE_BASET_10GB);
306f1348
SP
4144}
4145
c165541e
PR
4146static bool is_comp_in_ufi(struct be_adapter *adapter,
4147 struct flash_section_info *fsec, int type)
4148{
4149 int i = 0, img_type = 0;
4150 struct flash_section_info_g2 *fsec_g2 = NULL;
4151
ca34fe38 4152 if (BE2_chip(adapter))
c165541e
PR
4153 fsec_g2 = (struct flash_section_info_g2 *)fsec;
4154
4155 for (i = 0; i < MAX_FLASH_COMP; i++) {
4156 if (fsec_g2)
4157 img_type = le32_to_cpu(fsec_g2->fsec_entry[i].type);
4158 else
4159 img_type = le32_to_cpu(fsec->fsec_entry[i].type);
4160
4161 if (img_type == type)
4162 return true;
4163 }
4164 return false;
4165
4166}
4167
4188e7df 4168static struct flash_section_info *get_fsec_info(struct be_adapter *adapter,
748b539a
SP
4169 int header_size,
4170 const struct firmware *fw)
c165541e
PR
4171{
4172 struct flash_section_info *fsec = NULL;
4173 const u8 *p = fw->data;
4174
4175 p += header_size;
4176 while (p < (fw->data + fw->size)) {
4177 fsec = (struct flash_section_info *)p;
4178 if (!memcmp(flash_cookie, fsec->cookie, sizeof(flash_cookie)))
4179 return fsec;
4180 p += 32;
4181 }
4182 return NULL;
4183}
4184
96c9b2e4
VV
4185static int be_check_flash_crc(struct be_adapter *adapter, const u8 *p,
4186 u32 img_offset, u32 img_size, int hdr_size,
4187 u16 img_optype, bool *crc_match)
4188{
4189 u32 crc_offset;
4190 int status;
4191 u8 crc[4];
4192
70a7b525
VV
4193 status = be_cmd_get_flash_crc(adapter, crc, img_optype, img_offset,
4194 img_size - 4);
96c9b2e4
VV
4195 if (status)
4196 return status;
4197
4198 crc_offset = hdr_size + img_offset + img_size - 4;
4199
4200 /* Skip flashing, if crc of flashed region matches */
4201 if (!memcmp(crc, p + crc_offset, 4))
4202 *crc_match = true;
4203 else
4204 *crc_match = false;
4205
4206 return status;
4207}
4208
773a2d7c 4209static int be_flash(struct be_adapter *adapter, const u8 *img,
70a7b525
VV
4210 struct be_dma_mem *flash_cmd, int optype, int img_size,
4211 u32 img_offset)
773a2d7c 4212{
70a7b525 4213 u32 flash_op, num_bytes, total_bytes = img_size, bytes_sent = 0;
773a2d7c 4214 struct be_cmd_write_flashrom *req = flash_cmd->va;
96c9b2e4 4215 int status;
773a2d7c 4216
773a2d7c
PR
4217 while (total_bytes) {
4218 num_bytes = min_t(u32, 32*1024, total_bytes);
4219
4220 total_bytes -= num_bytes;
4221
4222 if (!total_bytes) {
4223 if (optype == OPTYPE_PHY_FW)
4224 flash_op = FLASHROM_OPER_PHY_FLASH;
4225 else
4226 flash_op = FLASHROM_OPER_FLASH;
4227 } else {
4228 if (optype == OPTYPE_PHY_FW)
4229 flash_op = FLASHROM_OPER_PHY_SAVE;
4230 else
4231 flash_op = FLASHROM_OPER_SAVE;
4232 }
4233
be716446 4234 memcpy(req->data_buf, img, num_bytes);
773a2d7c
PR
4235 img += num_bytes;
4236 status = be_cmd_write_flashrom(adapter, flash_cmd, optype,
70a7b525
VV
4237 flash_op, img_offset +
4238 bytes_sent, num_bytes);
4c60005f 4239 if (base_status(status) == MCC_STATUS_ILLEGAL_REQUEST &&
96c9b2e4
VV
4240 optype == OPTYPE_PHY_FW)
4241 break;
4242 else if (status)
773a2d7c 4243 return status;
70a7b525
VV
4244
4245 bytes_sent += num_bytes;
773a2d7c
PR
4246 }
4247 return 0;
4248}
4249
0ad3157e 4250/* For BE2, BE3 and BE3-R */
ca34fe38 4251static int be_flash_BEx(struct be_adapter *adapter,
748b539a
SP
4252 const struct firmware *fw,
4253 struct be_dma_mem *flash_cmd, int num_of_images)
84517482 4254{
c165541e 4255 int img_hdrs_size = (num_of_images * sizeof(struct image_hdr));
96c9b2e4 4256 struct device *dev = &adapter->pdev->dev;
c165541e 4257 struct flash_section_info *fsec = NULL;
96c9b2e4
VV
4258 int status, i, filehdr_size, num_comp;
4259 const struct flash_comp *pflashcomp;
4260 bool crc_match;
4261 const u8 *p;
c165541e
PR
4262
4263 struct flash_comp gen3_flash_types[] = {
4264 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, OPTYPE_ISCSI_ACTIVE,
4265 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_iSCSI},
4266 { FLASH_REDBOOT_START_g3, OPTYPE_REDBOOT,
4267 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3, IMAGE_BOOT_CODE},
4268 { FLASH_iSCSI_BIOS_START_g3, OPTYPE_BIOS,
4269 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_ISCSI},
4270 { FLASH_PXE_BIOS_START_g3, OPTYPE_PXE_BIOS,
4271 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_PXE},
4272 { FLASH_FCoE_BIOS_START_g3, OPTYPE_FCOE_BIOS,
4273 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_FCoE},
4274 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, OPTYPE_ISCSI_BACKUP,
4275 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_iSCSI},
4276 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, OPTYPE_FCOE_FW_ACTIVE,
4277 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_FCoE},
4278 { FLASH_FCoE_BACKUP_IMAGE_START_g3, OPTYPE_FCOE_FW_BACKUP,
4279 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_FCoE},
4280 { FLASH_NCSI_START_g3, OPTYPE_NCSI_FW,
4281 FLASH_NCSI_IMAGE_MAX_SIZE_g3, IMAGE_NCSI},
4282 { FLASH_PHY_FW_START_g3, OPTYPE_PHY_FW,
4283 FLASH_PHY_FW_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_PHY}
3f0d4560 4284 };
c165541e
PR
4285
4286 struct flash_comp gen2_flash_types[] = {
4287 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, OPTYPE_ISCSI_ACTIVE,
4288 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_iSCSI},
4289 { FLASH_REDBOOT_START_g2, OPTYPE_REDBOOT,
4290 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2, IMAGE_BOOT_CODE},
4291 { FLASH_iSCSI_BIOS_START_g2, OPTYPE_BIOS,
4292 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_ISCSI},
4293 { FLASH_PXE_BIOS_START_g2, OPTYPE_PXE_BIOS,
4294 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_PXE},
4295 { FLASH_FCoE_BIOS_START_g2, OPTYPE_FCOE_BIOS,
4296 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_FCoE},
4297 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, OPTYPE_ISCSI_BACKUP,
4298 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_iSCSI},
4299 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, OPTYPE_FCOE_FW_ACTIVE,
4300 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_FCoE},
4301 { FLASH_FCoE_BACKUP_IMAGE_START_g2, OPTYPE_FCOE_FW_BACKUP,
4302 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_FCoE}
3f0d4560
AK
4303 };
4304
ca34fe38 4305 if (BE3_chip(adapter)) {
3f0d4560
AK
4306 pflashcomp = gen3_flash_types;
4307 filehdr_size = sizeof(struct flash_file_hdr_g3);
215faf9c 4308 num_comp = ARRAY_SIZE(gen3_flash_types);
3f0d4560
AK
4309 } else {
4310 pflashcomp = gen2_flash_types;
4311 filehdr_size = sizeof(struct flash_file_hdr_g2);
215faf9c 4312 num_comp = ARRAY_SIZE(gen2_flash_types);
5d3acd0d 4313 img_hdrs_size = 0;
84517482 4314 }
ca34fe38 4315
c165541e
PR
4316 /* Get flash section info*/
4317 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
4318 if (!fsec) {
96c9b2e4 4319 dev_err(dev, "Invalid Cookie. FW image may be corrupted\n");
c165541e
PR
4320 return -1;
4321 }
9fe96934 4322 for (i = 0; i < num_comp; i++) {
c165541e 4323 if (!is_comp_in_ufi(adapter, fsec, pflashcomp[i].img_type))
9fe96934 4324 continue;
c165541e
PR
4325
4326 if ((pflashcomp[i].optype == OPTYPE_NCSI_FW) &&
4327 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
4328 continue;
4329
773a2d7c
PR
4330 if (pflashcomp[i].optype == OPTYPE_PHY_FW &&
4331 !phy_flashing_required(adapter))
306f1348 4332 continue;
c165541e 4333
773a2d7c 4334 if (pflashcomp[i].optype == OPTYPE_REDBOOT) {
96c9b2e4
VV
4335 status = be_check_flash_crc(adapter, fw->data,
4336 pflashcomp[i].offset,
4337 pflashcomp[i].size,
4338 filehdr_size +
4339 img_hdrs_size,
4340 OPTYPE_REDBOOT, &crc_match);
4341 if (status) {
4342 dev_err(dev,
4343 "Could not get CRC for 0x%x region\n",
4344 pflashcomp[i].optype);
4345 continue;
4346 }
4347
4348 if (crc_match)
773a2d7c
PR
4349 continue;
4350 }
c165541e 4351
96c9b2e4
VV
4352 p = fw->data + filehdr_size + pflashcomp[i].offset +
4353 img_hdrs_size;
306f1348
SP
4354 if (p + pflashcomp[i].size > fw->data + fw->size)
4355 return -1;
773a2d7c
PR
4356
4357 status = be_flash(adapter, p, flash_cmd, pflashcomp[i].optype,
70a7b525 4358 pflashcomp[i].size, 0);
773a2d7c 4359 if (status) {
96c9b2e4 4360 dev_err(dev, "Flashing section type 0x%x failed\n",
773a2d7c
PR
4361 pflashcomp[i].img_type);
4362 return status;
84517482 4363 }
84517482 4364 }
84517482
AK
4365 return 0;
4366}
4367
96c9b2e4
VV
4368static u16 be_get_img_optype(struct flash_section_entry fsec_entry)
4369{
4370 u32 img_type = le32_to_cpu(fsec_entry.type);
4371 u16 img_optype = le16_to_cpu(fsec_entry.optype);
4372
4373 if (img_optype != 0xFFFF)
4374 return img_optype;
4375
4376 switch (img_type) {
4377 case IMAGE_FIRMWARE_iSCSI:
4378 img_optype = OPTYPE_ISCSI_ACTIVE;
4379 break;
4380 case IMAGE_BOOT_CODE:
4381 img_optype = OPTYPE_REDBOOT;
4382 break;
4383 case IMAGE_OPTION_ROM_ISCSI:
4384 img_optype = OPTYPE_BIOS;
4385 break;
4386 case IMAGE_OPTION_ROM_PXE:
4387 img_optype = OPTYPE_PXE_BIOS;
4388 break;
4389 case IMAGE_OPTION_ROM_FCoE:
4390 img_optype = OPTYPE_FCOE_BIOS;
4391 break;
4392 case IMAGE_FIRMWARE_BACKUP_iSCSI:
4393 img_optype = OPTYPE_ISCSI_BACKUP;
4394 break;
4395 case IMAGE_NCSI:
4396 img_optype = OPTYPE_NCSI_FW;
4397 break;
4398 case IMAGE_FLASHISM_JUMPVECTOR:
4399 img_optype = OPTYPE_FLASHISM_JUMPVECTOR;
4400 break;
4401 case IMAGE_FIRMWARE_PHY:
4402 img_optype = OPTYPE_SH_PHY_FW;
4403 break;
4404 case IMAGE_REDBOOT_DIR:
4405 img_optype = OPTYPE_REDBOOT_DIR;
4406 break;
4407 case IMAGE_REDBOOT_CONFIG:
4408 img_optype = OPTYPE_REDBOOT_CONFIG;
4409 break;
4410 case IMAGE_UFI_DIR:
4411 img_optype = OPTYPE_UFI_DIR;
4412 break;
4413 default:
4414 break;
4415 }
4416
4417 return img_optype;
4418}
4419
773a2d7c 4420static int be_flash_skyhawk(struct be_adapter *adapter,
748b539a
SP
4421 const struct firmware *fw,
4422 struct be_dma_mem *flash_cmd, int num_of_images)
3f0d4560 4423{
773a2d7c 4424 int img_hdrs_size = num_of_images * sizeof(struct image_hdr);
70a7b525 4425 bool crc_match, old_fw_img, flash_offset_support = true;
96c9b2e4 4426 struct device *dev = &adapter->pdev->dev;
773a2d7c 4427 struct flash_section_info *fsec = NULL;
96c9b2e4 4428 u32 img_offset, img_size, img_type;
70a7b525 4429 u16 img_optype, flash_optype;
96c9b2e4 4430 int status, i, filehdr_size;
96c9b2e4 4431 const u8 *p;
773a2d7c
PR
4432
4433 filehdr_size = sizeof(struct flash_file_hdr_g3);
4434 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
4435 if (!fsec) {
96c9b2e4 4436 dev_err(dev, "Invalid Cookie. FW image may be corrupted\n");
56ace3a0 4437 return -EINVAL;
773a2d7c
PR
4438 }
4439
70a7b525 4440retry_flash:
773a2d7c
PR
4441 for (i = 0; i < le32_to_cpu(fsec->fsec_hdr.num_images); i++) {
4442 img_offset = le32_to_cpu(fsec->fsec_entry[i].offset);
4443 img_size = le32_to_cpu(fsec->fsec_entry[i].pad_size);
96c9b2e4
VV
4444 img_type = le32_to_cpu(fsec->fsec_entry[i].type);
4445 img_optype = be_get_img_optype(fsec->fsec_entry[i]);
4446 old_fw_img = fsec->fsec_entry[i].optype == 0xFFFF;
773a2d7c 4447
96c9b2e4 4448 if (img_optype == 0xFFFF)
773a2d7c 4449 continue;
70a7b525
VV
4450
4451 if (flash_offset_support)
4452 flash_optype = OPTYPE_OFFSET_SPECIFIED;
4453 else
4454 flash_optype = img_optype;
4455
96c9b2e4
VV
4456 /* Don't bother verifying CRC if an old FW image is being
4457 * flashed
4458 */
4459 if (old_fw_img)
4460 goto flash;
4461
4462 status = be_check_flash_crc(adapter, fw->data, img_offset,
4463 img_size, filehdr_size +
70a7b525 4464 img_hdrs_size, flash_optype,
96c9b2e4 4465 &crc_match);
4c60005f
KA
4466 if (base_status(status) == MCC_STATUS_ILLEGAL_REQUEST ||
4467 base_status(status) == MCC_STATUS_ILLEGAL_FIELD) {
70a7b525
VV
4468 /* The current FW image on the card does not support
4469 * OFFSET based flashing. Retry using older mechanism
4470 * of OPTYPE based flashing
4471 */
4472 if (flash_optype == OPTYPE_OFFSET_SPECIFIED) {
4473 flash_offset_support = false;
4474 goto retry_flash;
4475 }
4476
4477 /* The current FW image on the card does not recognize
4478 * the new FLASH op_type. The FW download is partially
4479 * complete. Reboot the server now to enable FW image
4480 * to recognize the new FLASH op_type. To complete the
4481 * remaining process, download the same FW again after
4482 * the reboot.
4483 */
96c9b2e4
VV
4484 dev_err(dev, "Flash incomplete. Reset the server\n");
4485 dev_err(dev, "Download FW image again after reset\n");
4486 return -EAGAIN;
4487 } else if (status) {
4488 dev_err(dev, "Could not get CRC for 0x%x region\n",
4489 img_optype);
4490 return -EFAULT;
773a2d7c
PR
4491 }
4492
96c9b2e4
VV
4493 if (crc_match)
4494 continue;
773a2d7c 4495
96c9b2e4
VV
4496flash:
4497 p = fw->data + filehdr_size + img_offset + img_hdrs_size;
773a2d7c
PR
4498 if (p + img_size > fw->data + fw->size)
4499 return -1;
4500
70a7b525
VV
4501 status = be_flash(adapter, p, flash_cmd, flash_optype, img_size,
4502 img_offset);
4503
4504 /* The current FW image on the card does not support OFFSET
4505 * based flashing. Retry using older mechanism of OPTYPE based
4506 * flashing
4507 */
4508 if (base_status(status) == MCC_STATUS_ILLEGAL_FIELD &&
4509 flash_optype == OPTYPE_OFFSET_SPECIFIED) {
4510 flash_offset_support = false;
4511 goto retry_flash;
4512 }
4513
96c9b2e4
VV
4514 /* For old FW images ignore ILLEGAL_FIELD error or errors on
4515 * UFI_DIR region
4516 */
4c60005f
KA
4517 if (old_fw_img &&
4518 (base_status(status) == MCC_STATUS_ILLEGAL_FIELD ||
4519 (img_optype == OPTYPE_UFI_DIR &&
4520 base_status(status) == MCC_STATUS_FAILED))) {
96c9b2e4
VV
4521 continue;
4522 } else if (status) {
4523 dev_err(dev, "Flashing section type 0x%x failed\n",
4524 img_type);
4525 return -EFAULT;
773a2d7c
PR
4526 }
4527 }
4528 return 0;
3f0d4560
AK
4529}
4530
485bf569 4531static int lancer_fw_download(struct be_adapter *adapter,
748b539a 4532 const struct firmware *fw)
84517482 4533{
485bf569
SN
4534#define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024)
4535#define LANCER_FW_DOWNLOAD_LOCATION "/prg"
bb864e07 4536 struct device *dev = &adapter->pdev->dev;
84517482 4537 struct be_dma_mem flash_cmd;
485bf569
SN
4538 const u8 *data_ptr = NULL;
4539 u8 *dest_image_ptr = NULL;
4540 size_t image_size = 0;
4541 u32 chunk_size = 0;
4542 u32 data_written = 0;
4543 u32 offset = 0;
4544 int status = 0;
4545 u8 add_status = 0;
f67ef7ba 4546 u8 change_status;
84517482 4547
485bf569 4548 if (!IS_ALIGNED(fw->size, sizeof(u32))) {
bb864e07 4549 dev_err(dev, "FW image size should be multiple of 4\n");
3fb8cb80 4550 return -EINVAL;
d9efd2af
SB
4551 }
4552
485bf569
SN
4553 flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
4554 + LANCER_FW_DOWNLOAD_CHUNK;
bb864e07 4555 flash_cmd.va = dma_alloc_coherent(dev, flash_cmd.size,
d0320f75 4556 &flash_cmd.dma, GFP_KERNEL);
3fb8cb80
KA
4557 if (!flash_cmd.va)
4558 return -ENOMEM;
84517482 4559
485bf569
SN
4560 dest_image_ptr = flash_cmd.va +
4561 sizeof(struct lancer_cmd_req_write_object);
4562 image_size = fw->size;
4563 data_ptr = fw->data;
4564
4565 while (image_size) {
4566 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
4567
4568 /* Copy the image chunk content. */
4569 memcpy(dest_image_ptr, data_ptr, chunk_size);
4570
4571 status = lancer_cmd_write_object(adapter, &flash_cmd,
f67ef7ba
PR
4572 chunk_size, offset,
4573 LANCER_FW_DOWNLOAD_LOCATION,
4574 &data_written, &change_status,
4575 &add_status);
485bf569
SN
4576 if (status)
4577 break;
4578
4579 offset += data_written;
4580 data_ptr += data_written;
4581 image_size -= data_written;
4582 }
4583
4584 if (!status) {
4585 /* Commit the FW written */
4586 status = lancer_cmd_write_object(adapter, &flash_cmd,
f67ef7ba
PR
4587 0, offset,
4588 LANCER_FW_DOWNLOAD_LOCATION,
4589 &data_written, &change_status,
4590 &add_status);
485bf569
SN
4591 }
4592
bb864e07 4593 dma_free_coherent(dev, flash_cmd.size, flash_cmd.va, flash_cmd.dma);
485bf569 4594 if (status) {
bb864e07 4595 dev_err(dev, "Firmware load error\n");
3fb8cb80 4596 return be_cmd_status(status);
485bf569
SN
4597 }
4598
bb864e07
KA
4599 dev_info(dev, "Firmware flashed successfully\n");
4600
f67ef7ba 4601 if (change_status == LANCER_FW_RESET_NEEDED) {
bb864e07 4602 dev_info(dev, "Resetting adapter to activate new FW\n");
5c510811
SK
4603 status = lancer_physdev_ctrl(adapter,
4604 PHYSDEV_CONTROL_FW_RESET_MASK);
f67ef7ba 4605 if (status) {
bb864e07
KA
4606 dev_err(dev, "Adapter busy, could not reset FW\n");
4607 dev_err(dev, "Reboot server to activate new FW\n");
f67ef7ba
PR
4608 }
4609 } else if (change_status != LANCER_NO_RESET_NEEDED) {
bb864e07 4610 dev_info(dev, "Reboot server to activate new FW\n");
f67ef7ba 4611 }
3fb8cb80
KA
4612
4613 return 0;
485bf569
SN
4614}
4615
5d3acd0d
VV
4616#define BE2_UFI 2
4617#define BE3_UFI 3
4618#define BE3R_UFI 10
4619#define SH_UFI 4
81a9e226 4620#define SH_P2_UFI 11
5d3acd0d 4621
ca34fe38 4622static int be_get_ufi_type(struct be_adapter *adapter,
0ad3157e 4623 struct flash_file_hdr_g3 *fhdr)
773a2d7c 4624{
5d3acd0d
VV
4625 if (!fhdr) {
4626 dev_err(&adapter->pdev->dev, "Invalid FW UFI file");
4627 return -1;
4628 }
773a2d7c 4629
5d3acd0d
VV
4630 /* First letter of the build version is used to identify
4631 * which chip this image file is meant for.
4632 */
4633 switch (fhdr->build[0]) {
4634 case BLD_STR_UFI_TYPE_SH:
81a9e226
VV
4635 return (fhdr->asic_type_rev == ASIC_REV_P2) ? SH_P2_UFI :
4636 SH_UFI;
5d3acd0d
VV
4637 case BLD_STR_UFI_TYPE_BE3:
4638 return (fhdr->asic_type_rev == ASIC_REV_B0) ? BE3R_UFI :
4639 BE3_UFI;
4640 case BLD_STR_UFI_TYPE_BE2:
4641 return BE2_UFI;
4642 default:
4643 return -1;
4644 }
4645}
773a2d7c 4646
5d3acd0d
VV
4647/* Check if the flash image file is compatible with the adapter that
4648 * is being flashed.
4649 * BE3 chips with asic-rev B0 must be flashed only with BE3R_UFI type.
81a9e226 4650 * Skyhawk chips with asic-rev P2 must be flashed only with SH_P2_UFI type.
5d3acd0d
VV
4651 */
4652static bool be_check_ufi_compatibility(struct be_adapter *adapter,
4653 struct flash_file_hdr_g3 *fhdr)
4654{
4655 int ufi_type = be_get_ufi_type(adapter, fhdr);
4656
4657 switch (ufi_type) {
81a9e226 4658 case SH_P2_UFI:
5d3acd0d 4659 return skyhawk_chip(adapter);
81a9e226
VV
4660 case SH_UFI:
4661 return (skyhawk_chip(adapter) &&
4662 adapter->asic_rev < ASIC_REV_P2);
5d3acd0d
VV
4663 case BE3R_UFI:
4664 return BE3_chip(adapter);
4665 case BE3_UFI:
4666 return (BE3_chip(adapter) && adapter->asic_rev < ASIC_REV_B0);
4667 case BE2_UFI:
4668 return BE2_chip(adapter);
4669 default:
4670 return false;
4671 }
773a2d7c
PR
4672}
4673
485bf569
SN
4674static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
4675{
5d3acd0d 4676 struct device *dev = &adapter->pdev->dev;
485bf569 4677 struct flash_file_hdr_g3 *fhdr3;
5d3acd0d
VV
4678 struct image_hdr *img_hdr_ptr;
4679 int status = 0, i, num_imgs;
485bf569 4680 struct be_dma_mem flash_cmd;
84517482 4681
5d3acd0d
VV
4682 fhdr3 = (struct flash_file_hdr_g3 *)fw->data;
4683 if (!be_check_ufi_compatibility(adapter, fhdr3)) {
4684 dev_err(dev, "Flash image is not compatible with adapter\n");
4685 return -EINVAL;
84517482
AK
4686 }
4687
5d3acd0d
VV
4688 flash_cmd.size = sizeof(struct be_cmd_write_flashrom);
4689 flash_cmd.va = dma_alloc_coherent(dev, flash_cmd.size, &flash_cmd.dma,
4690 GFP_KERNEL);
4691 if (!flash_cmd.va)
4692 return -ENOMEM;
773a2d7c 4693
773a2d7c
PR
4694 num_imgs = le32_to_cpu(fhdr3->num_imgs);
4695 for (i = 0; i < num_imgs; i++) {
4696 img_hdr_ptr = (struct image_hdr *)(fw->data +
4697 (sizeof(struct flash_file_hdr_g3) +
4698 i * sizeof(struct image_hdr)));
5d3acd0d
VV
4699 if (!BE2_chip(adapter) &&
4700 le32_to_cpu(img_hdr_ptr->imageid) != 1)
4701 continue;
84517482 4702
5d3acd0d
VV
4703 if (skyhawk_chip(adapter))
4704 status = be_flash_skyhawk(adapter, fw, &flash_cmd,
4705 num_imgs);
4706 else
4707 status = be_flash_BEx(adapter, fw, &flash_cmd,
4708 num_imgs);
84517482
AK
4709 }
4710
5d3acd0d
VV
4711 dma_free_coherent(dev, flash_cmd.size, flash_cmd.va, flash_cmd.dma);
4712 if (!status)
4713 dev_info(dev, "Firmware flashed successfully\n");
84517482 4714
485bf569
SN
4715 return status;
4716}
4717
4718int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
4719{
4720 const struct firmware *fw;
4721 int status;
4722
4723 if (!netif_running(adapter->netdev)) {
4724 dev_err(&adapter->pdev->dev,
4725 "Firmware load not allowed (interface is down)\n");
940a3fcd 4726 return -ENETDOWN;
485bf569
SN
4727 }
4728
4729 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
4730 if (status)
4731 goto fw_exit;
4732
4733 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
4734
4735 if (lancer_chip(adapter))
4736 status = lancer_fw_download(adapter, fw);
4737 else
4738 status = be_fw_download(adapter, fw);
4739
eeb65ced 4740 if (!status)
e97e3cda 4741 be_cmd_get_fw_ver(adapter);
eeb65ced 4742
84517482
AK
4743fw_exit:
4744 release_firmware(fw);
4745 return status;
4746}
4747
add511b3
RP
4748static int be_ndo_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
4749 u16 flags)
a77dcb8c
AK
4750{
4751 struct be_adapter *adapter = netdev_priv(dev);
4752 struct nlattr *attr, *br_spec;
4753 int rem;
4754 int status = 0;
4755 u16 mode = 0;
4756
4757 if (!sriov_enabled(adapter))
4758 return -EOPNOTSUPP;
4759
4760 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
4ea85e83
TG
4761 if (!br_spec)
4762 return -EINVAL;
a77dcb8c
AK
4763
4764 nla_for_each_nested(attr, br_spec, rem) {
4765 if (nla_type(attr) != IFLA_BRIDGE_MODE)
4766 continue;
4767
b7c1a314
TG
4768 if (nla_len(attr) < sizeof(mode))
4769 return -EINVAL;
4770
a77dcb8c
AK
4771 mode = nla_get_u16(attr);
4772 if (mode != BRIDGE_MODE_VEPA && mode != BRIDGE_MODE_VEB)
4773 return -EINVAL;
4774
4775 status = be_cmd_set_hsw_config(adapter, 0, 0,
4776 adapter->if_handle,
4777 mode == BRIDGE_MODE_VEPA ?
4778 PORT_FWD_TYPE_VEPA :
4779 PORT_FWD_TYPE_VEB);
4780 if (status)
4781 goto err;
4782
4783 dev_info(&adapter->pdev->dev, "enabled switch mode: %s\n",
4784 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
4785
4786 return status;
4787 }
4788err:
4789 dev_err(&adapter->pdev->dev, "Failed to set switch mode %s\n",
4790 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
4791
4792 return status;
4793}
4794
4795static int be_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
748b539a 4796 struct net_device *dev, u32 filter_mask)
a77dcb8c
AK
4797{
4798 struct be_adapter *adapter = netdev_priv(dev);
4799 int status = 0;
4800 u8 hsw_mode;
4801
4802 if (!sriov_enabled(adapter))
4803 return 0;
4804
4805 /* BE and Lancer chips support VEB mode only */
4806 if (BEx_chip(adapter) || lancer_chip(adapter)) {
4807 hsw_mode = PORT_FWD_TYPE_VEB;
4808 } else {
4809 status = be_cmd_get_hsw_config(adapter, NULL, 0,
4810 adapter->if_handle, &hsw_mode);
4811 if (status)
4812 return 0;
4813 }
4814
4815 return ndo_dflt_bridge_getlink(skb, pid, seq, dev,
4816 hsw_mode == PORT_FWD_TYPE_VEPA ?
2c3c031c
SF
4817 BRIDGE_MODE_VEPA : BRIDGE_MODE_VEB,
4818 0, 0);
a77dcb8c
AK
4819}
4820
c5abe7c0 4821#ifdef CONFIG_BE2NET_VXLAN
630f4b70
SB
4822/* VxLAN offload Notes:
4823 *
4824 * The stack defines tunnel offload flags (hw_enc_features) for IP and doesn't
4825 * distinguish various types of transports (VxLAN, GRE, NVGRE ..). So, offload
4826 * is expected to work across all types of IP tunnels once exported. Skyhawk
4827 * supports offloads for either VxLAN or NVGRE, exclusively. So we export VxLAN
16dde0d6
SB
4828 * offloads in hw_enc_features only when a VxLAN port is added. If other (non
4829 * VxLAN) tunnels are configured while VxLAN offloads are enabled, offloads for
4830 * those other tunnels are unexported on the fly through ndo_features_check().
630f4b70
SB
4831 *
4832 * Skyhawk supports VxLAN offloads only for one UDP dport. So, if the stack
4833 * adds more than one port, disable offloads and don't re-enable them again
4834 * until after all the tunnels are removed.
4835 */
c9c47142
SP
4836static void be_add_vxlan_port(struct net_device *netdev, sa_family_t sa_family,
4837 __be16 port)
4838{
4839 struct be_adapter *adapter = netdev_priv(netdev);
4840 struct device *dev = &adapter->pdev->dev;
4841 int status;
4842
4843 if (lancer_chip(adapter) || BEx_chip(adapter))
4844 return;
4845
4846 if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS) {
c9c47142
SP
4847 dev_info(dev,
4848 "Only one UDP port supported for VxLAN offloads\n");
630f4b70
SB
4849 dev_info(dev, "Disabling VxLAN offloads\n");
4850 adapter->vxlan_port_count++;
4851 goto err;
c9c47142
SP
4852 }
4853
630f4b70
SB
4854 if (adapter->vxlan_port_count++ >= 1)
4855 return;
4856
c9c47142
SP
4857 status = be_cmd_manage_iface(adapter, adapter->if_handle,
4858 OP_CONVERT_NORMAL_TO_TUNNEL);
4859 if (status) {
4860 dev_warn(dev, "Failed to convert normal interface to tunnel\n");
4861 goto err;
4862 }
4863
4864 status = be_cmd_set_vxlan_port(adapter, port);
4865 if (status) {
4866 dev_warn(dev, "Failed to add VxLAN port\n");
4867 goto err;
4868 }
4869 adapter->flags |= BE_FLAGS_VXLAN_OFFLOADS;
4870 adapter->vxlan_port = port;
4871
630f4b70
SB
4872 netdev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
4873 NETIF_F_TSO | NETIF_F_TSO6 |
4874 NETIF_F_GSO_UDP_TUNNEL;
4875 netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL;
ac9a3d84 4876 netdev->features |= NETIF_F_GSO_UDP_TUNNEL;
630f4b70 4877
c9c47142
SP
4878 dev_info(dev, "Enabled VxLAN offloads for UDP port %d\n",
4879 be16_to_cpu(port));
4880 return;
4881err:
4882 be_disable_vxlan_offloads(adapter);
c9c47142
SP
4883}
4884
4885static void be_del_vxlan_port(struct net_device *netdev, sa_family_t sa_family,
4886 __be16 port)
4887{
4888 struct be_adapter *adapter = netdev_priv(netdev);
4889
4890 if (lancer_chip(adapter) || BEx_chip(adapter))
4891 return;
4892
4893 if (adapter->vxlan_port != port)
630f4b70 4894 goto done;
c9c47142
SP
4895
4896 be_disable_vxlan_offloads(adapter);
4897
4898 dev_info(&adapter->pdev->dev,
4899 "Disabled VxLAN offloads for UDP port %d\n",
4900 be16_to_cpu(port));
630f4b70
SB
4901done:
4902 adapter->vxlan_port_count--;
c9c47142 4903}
725d548f 4904
5f35227e
JG
4905static netdev_features_t be_features_check(struct sk_buff *skb,
4906 struct net_device *dev,
4907 netdev_features_t features)
725d548f 4908{
16dde0d6
SB
4909 struct be_adapter *adapter = netdev_priv(dev);
4910 u8 l4_hdr = 0;
4911
4912 /* The code below restricts offload features for some tunneled packets.
4913 * Offload features for normal (non tunnel) packets are unchanged.
4914 */
4915 if (!skb->encapsulation ||
4916 !(adapter->flags & BE_FLAGS_VXLAN_OFFLOADS))
4917 return features;
4918
4919 /* It's an encapsulated packet and VxLAN offloads are enabled. We
4920 * should disable tunnel offload features if it's not a VxLAN packet,
4921 * as tunnel offloads have been enabled only for VxLAN. This is done to
4922 * allow other tunneled traffic like GRE work fine while VxLAN
4923 * offloads are configured in Skyhawk-R.
4924 */
4925 switch (vlan_get_protocol(skb)) {
4926 case htons(ETH_P_IP):
4927 l4_hdr = ip_hdr(skb)->protocol;
4928 break;
4929 case htons(ETH_P_IPV6):
4930 l4_hdr = ipv6_hdr(skb)->nexthdr;
4931 break;
4932 default:
4933 return features;
4934 }
4935
4936 if (l4_hdr != IPPROTO_UDP ||
4937 skb->inner_protocol_type != ENCAP_TYPE_ETHER ||
4938 skb->inner_protocol != htons(ETH_P_TEB) ||
4939 skb_inner_mac_header(skb) - skb_transport_header(skb) !=
4940 sizeof(struct udphdr) + sizeof(struct vxlanhdr))
4941 return features & ~(NETIF_F_ALL_CSUM | NETIF_F_GSO_MASK);
4942
4943 return features;
725d548f 4944}
c5abe7c0 4945#endif
c9c47142 4946
e5686ad8 4947static const struct net_device_ops be_netdev_ops = {
6b7c5b94
SP
4948 .ndo_open = be_open,
4949 .ndo_stop = be_close,
4950 .ndo_start_xmit = be_xmit,
a54769f5 4951 .ndo_set_rx_mode = be_set_rx_mode,
6b7c5b94
SP
4952 .ndo_set_mac_address = be_mac_addr_set,
4953 .ndo_change_mtu = be_change_mtu,
ab1594e9 4954 .ndo_get_stats64 = be_get_stats64,
6b7c5b94 4955 .ndo_validate_addr = eth_validate_addr,
6b7c5b94
SP
4956 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
4957 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
64600ea5 4958 .ndo_set_vf_mac = be_set_vf_mac,
1da87b7f 4959 .ndo_set_vf_vlan = be_set_vf_vlan,
ed616689 4960 .ndo_set_vf_rate = be_set_vf_tx_rate,
66268739 4961 .ndo_get_vf_config = be_get_vf_config,
bdce2ad7 4962 .ndo_set_vf_link_state = be_set_vf_link_state,
66268739
IV
4963#ifdef CONFIG_NET_POLL_CONTROLLER
4964 .ndo_poll_controller = be_netpoll,
4965#endif
a77dcb8c
AK
4966 .ndo_bridge_setlink = be_ndo_bridge_setlink,
4967 .ndo_bridge_getlink = be_ndo_bridge_getlink,
6384a4d0 4968#ifdef CONFIG_NET_RX_BUSY_POLL
c9c47142 4969 .ndo_busy_poll = be_busy_poll,
6384a4d0 4970#endif
c5abe7c0 4971#ifdef CONFIG_BE2NET_VXLAN
c9c47142
SP
4972 .ndo_add_vxlan_port = be_add_vxlan_port,
4973 .ndo_del_vxlan_port = be_del_vxlan_port,
5f35227e 4974 .ndo_features_check = be_features_check,
c5abe7c0 4975#endif
6b7c5b94
SP
4976};
4977
4978static void be_netdev_init(struct net_device *netdev)
4979{
4980 struct be_adapter *adapter = netdev_priv(netdev);
4981
6332c8d3 4982 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
8b8ddc68 4983 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
f646968f 4984 NETIF_F_HW_VLAN_CTAG_TX;
8b8ddc68
MM
4985 if (be_multi_rxq(adapter))
4986 netdev->hw_features |= NETIF_F_RXHASH;
6332c8d3
MM
4987
4988 netdev->features |= netdev->hw_features |
f646968f 4989 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER;
4b972914 4990
eb8a50d9 4991 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
79032644 4992 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
51c59870 4993
fbc13f01
AK
4994 netdev->priv_flags |= IFF_UNICAST_FLT;
4995
6b7c5b94
SP
4996 netdev->flags |= IFF_MULTICAST;
4997
b7e5887e 4998 netif_set_gso_max_size(netdev, 65535 - ETH_HLEN);
c190e3c8 4999
10ef9ab4 5000 netdev->netdev_ops = &be_netdev_ops;
6b7c5b94 5001
7ad24ea4 5002 netdev->ethtool_ops = &be_ethtool_ops;
6b7c5b94
SP
5003}
5004
87ac1a52
KA
5005static void be_cleanup(struct be_adapter *adapter)
5006{
5007 struct net_device *netdev = adapter->netdev;
5008
5009 rtnl_lock();
5010 netif_device_detach(netdev);
5011 if (netif_running(netdev))
5012 be_close(netdev);
5013 rtnl_unlock();
5014
5015 be_clear(adapter);
5016}
5017
484d76fd 5018static int be_resume(struct be_adapter *adapter)
78fad34e 5019{
d0e1b319 5020 struct net_device *netdev = adapter->netdev;
78fad34e
SP
5021 int status;
5022
78fad34e
SP
5023 status = be_setup(adapter);
5024 if (status)
484d76fd 5025 return status;
78fad34e 5026
d0e1b319
KA
5027 if (netif_running(netdev)) {
5028 status = be_open(netdev);
78fad34e 5029 if (status)
484d76fd 5030 return status;
78fad34e
SP
5031 }
5032
d0e1b319
KA
5033 netif_device_attach(netdev);
5034
484d76fd
KA
5035 return 0;
5036}
5037
5038static int be_err_recover(struct be_adapter *adapter)
5039{
5040 struct device *dev = &adapter->pdev->dev;
5041 int status;
5042
5043 status = be_resume(adapter);
5044 if (status)
5045 goto err;
5046
9fa465c0 5047 dev_info(dev, "Adapter recovery successful\n");
78fad34e
SP
5048 return 0;
5049err:
9fa465c0 5050 if (be_physfn(adapter))
78fad34e 5051 dev_err(dev, "Adapter recovery failed\n");
9fa465c0
SP
5052 else
5053 dev_err(dev, "Re-trying adapter recovery\n");
78fad34e
SP
5054
5055 return status;
5056}
5057
eb7dd46c 5058static void be_err_detection_task(struct work_struct *work)
78fad34e
SP
5059{
5060 struct be_adapter *adapter =
eb7dd46c
SP
5061 container_of(work, struct be_adapter,
5062 be_err_detection_work.work);
78fad34e
SP
5063 int status = 0;
5064
5065 be_detect_error(adapter);
5066
d0e1b319 5067 if (adapter->hw_error) {
87ac1a52 5068 be_cleanup(adapter);
d0e1b319
KA
5069
5070 /* As of now error recovery support is in Lancer only */
5071 if (lancer_chip(adapter))
5072 status = be_err_recover(adapter);
78fad34e
SP
5073 }
5074
9fa465c0
SP
5075 /* Always attempt recovery on VFs */
5076 if (!status || be_virtfn(adapter))
eb7dd46c 5077 be_schedule_err_detection(adapter);
78fad34e
SP
5078}
5079
5080static void be_log_sfp_info(struct be_adapter *adapter)
5081{
5082 int status;
5083
5084 status = be_cmd_query_sfp_info(adapter);
5085 if (!status) {
5086 dev_err(&adapter->pdev->dev,
5087 "Unqualified SFP+ detected on %c from %s part no: %s",
5088 adapter->port_name, adapter->phy.vendor_name,
5089 adapter->phy.vendor_pn);
5090 }
5091 adapter->flags &= ~BE_FLAGS_EVT_INCOMPATIBLE_SFP;
5092}
5093
5094static void be_worker(struct work_struct *work)
5095{
5096 struct be_adapter *adapter =
5097 container_of(work, struct be_adapter, work.work);
5098 struct be_rx_obj *rxo;
5099 int i;
5100
5101 /* when interrupts are not yet enabled, just reap any pending
5102 * mcc completions
5103 */
5104 if (!netif_running(adapter->netdev)) {
5105 local_bh_disable();
5106 be_process_mcc(adapter);
5107 local_bh_enable();
5108 goto reschedule;
5109 }
5110
5111 if (!adapter->stats_cmd_sent) {
5112 if (lancer_chip(adapter))
5113 lancer_cmd_get_pport_stats(adapter,
5114 &adapter->stats_cmd);
5115 else
5116 be_cmd_get_stats(adapter, &adapter->stats_cmd);
5117 }
5118
5119 if (be_physfn(adapter) &&
5120 MODULO(adapter->work_counter, adapter->be_get_temp_freq) == 0)
5121 be_cmd_get_die_temperature(adapter);
5122
5123 for_all_rx_queues(adapter, rxo, i) {
5124 /* Replenish RX-queues starved due to memory
5125 * allocation failures.
5126 */
5127 if (rxo->rx_post_starved)
5128 be_post_rx_frags(rxo, GFP_KERNEL, MAX_RX_POST);
5129 }
5130
5131 be_eqd_update(adapter);
5132
5133 if (adapter->flags & BE_FLAGS_EVT_INCOMPATIBLE_SFP)
5134 be_log_sfp_info(adapter);
5135
5136reschedule:
5137 adapter->work_counter++;
5138 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
5139}
5140
6b7c5b94
SP
5141static void be_unmap_pci_bars(struct be_adapter *adapter)
5142{
c5b3ad4c
SP
5143 if (adapter->csr)
5144 pci_iounmap(adapter->pdev, adapter->csr);
8788fdc2 5145 if (adapter->db)
ce66f781 5146 pci_iounmap(adapter->pdev, adapter->db);
045508a8
PP
5147}
5148
ce66f781
SP
5149static int db_bar(struct be_adapter *adapter)
5150{
5151 if (lancer_chip(adapter) || !be_physfn(adapter))
5152 return 0;
5153 else
5154 return 4;
5155}
5156
5157static int be_roce_map_pci_bars(struct be_adapter *adapter)
045508a8 5158{
dbf0f2a7 5159 if (skyhawk_chip(adapter)) {
ce66f781
SP
5160 adapter->roce_db.size = 4096;
5161 adapter->roce_db.io_addr = pci_resource_start(adapter->pdev,
5162 db_bar(adapter));
5163 adapter->roce_db.total_size = pci_resource_len(adapter->pdev,
5164 db_bar(adapter));
5165 }
045508a8 5166 return 0;
6b7c5b94
SP
5167}
5168
5169static int be_map_pci_bars(struct be_adapter *adapter)
5170{
5171 u8 __iomem *addr;
78fad34e
SP
5172 u32 sli_intf;
5173
5174 pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
5175 adapter->sli_family = (sli_intf & SLI_INTF_FAMILY_MASK) >>
5176 SLI_INTF_FAMILY_SHIFT;
5177 adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
fe6d2a38 5178
c5b3ad4c
SP
5179 if (BEx_chip(adapter) && be_physfn(adapter)) {
5180 adapter->csr = pci_iomap(adapter->pdev, 2, 0);
ddf1169f 5181 if (!adapter->csr)
c5b3ad4c
SP
5182 return -ENOMEM;
5183 }
5184
ce66f781 5185 addr = pci_iomap(adapter->pdev, db_bar(adapter), 0);
ddf1169f 5186 if (!addr)
6b7c5b94 5187 goto pci_map_err;
ba343c77 5188 adapter->db = addr;
ce66f781
SP
5189
5190 be_roce_map_pci_bars(adapter);
6b7c5b94 5191 return 0;
ce66f781 5192
6b7c5b94 5193pci_map_err:
acbafeb1 5194 dev_err(&adapter->pdev->dev, "Error in mapping PCI BARs\n");
6b7c5b94
SP
5195 be_unmap_pci_bars(adapter);
5196 return -ENOMEM;
5197}
5198
78fad34e 5199static void be_drv_cleanup(struct be_adapter *adapter)
6b7c5b94 5200{
8788fdc2 5201 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
78fad34e 5202 struct device *dev = &adapter->pdev->dev;
6b7c5b94
SP
5203
5204 if (mem->va)
78fad34e 5205 dma_free_coherent(dev, mem->size, mem->va, mem->dma);
e7b909a6 5206
5b8821b7 5207 mem = &adapter->rx_filter;
e7b909a6 5208 if (mem->va)
78fad34e
SP
5209 dma_free_coherent(dev, mem->size, mem->va, mem->dma);
5210
5211 mem = &adapter->stats_cmd;
5212 if (mem->va)
5213 dma_free_coherent(dev, mem->size, mem->va, mem->dma);
6b7c5b94
SP
5214}
5215
78fad34e
SP
5216/* Allocate and initialize various fields in be_adapter struct */
5217static int be_drv_init(struct be_adapter *adapter)
6b7c5b94 5218{
8788fdc2
SP
5219 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
5220 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
5b8821b7 5221 struct be_dma_mem *rx_filter = &adapter->rx_filter;
78fad34e
SP
5222 struct be_dma_mem *stats_cmd = &adapter->stats_cmd;
5223 struct device *dev = &adapter->pdev->dev;
5224 int status = 0;
6b7c5b94
SP
5225
5226 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
78fad34e 5227 mbox_mem_alloc->va = dma_alloc_coherent(dev, mbox_mem_alloc->size,
2b7bcebf
IV
5228 &mbox_mem_alloc->dma,
5229 GFP_KERNEL);
78fad34e
SP
5230 if (!mbox_mem_alloc->va)
5231 return -ENOMEM;
5232
6b7c5b94
SP
5233 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
5234 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
5235 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
5236 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
e7b909a6 5237
5b8821b7 5238 rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
78fad34e
SP
5239 rx_filter->va = dma_zalloc_coherent(dev, rx_filter->size,
5240 &rx_filter->dma, GFP_KERNEL);
ddf1169f 5241 if (!rx_filter->va) {
e7b909a6
SP
5242 status = -ENOMEM;
5243 goto free_mbox;
5244 }
1f9061d2 5245
78fad34e
SP
5246 if (lancer_chip(adapter))
5247 stats_cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
5248 else if (BE2_chip(adapter))
5249 stats_cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
5250 else if (BE3_chip(adapter))
5251 stats_cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
5252 else
5253 stats_cmd->size = sizeof(struct be_cmd_req_get_stats_v2);
5254 stats_cmd->va = dma_zalloc_coherent(dev, stats_cmd->size,
5255 &stats_cmd->dma, GFP_KERNEL);
5256 if (!stats_cmd->va) {
5257 status = -ENOMEM;
5258 goto free_rx_filter;
5259 }
5260
2984961c 5261 mutex_init(&adapter->mbox_lock);
8788fdc2
SP
5262 spin_lock_init(&adapter->mcc_lock);
5263 spin_lock_init(&adapter->mcc_cq_lock);
5eeff635 5264 init_completion(&adapter->et_cmd_compl);
e7b909a6 5265
78fad34e 5266 pci_save_state(adapter->pdev);
6b7c5b94 5267
78fad34e 5268 INIT_DELAYED_WORK(&adapter->work, be_worker);
eb7dd46c
SP
5269 INIT_DELAYED_WORK(&adapter->be_err_detection_work,
5270 be_err_detection_task);
6b7c5b94 5271
78fad34e
SP
5272 adapter->rx_fc = true;
5273 adapter->tx_fc = true;
6b7c5b94 5274
78fad34e
SP
5275 /* Must be a power of 2 or else MODULO will BUG_ON */
5276 adapter->be_get_temp_freq = 64;
5277 adapter->cfg_num_qs = netif_get_num_default_rss_queues();
ca34fe38 5278
6b7c5b94 5279 return 0;
78fad34e
SP
5280
5281free_rx_filter:
5282 dma_free_coherent(dev, rx_filter->size, rx_filter->va, rx_filter->dma);
5283free_mbox:
5284 dma_free_coherent(dev, mbox_mem_alloc->size, mbox_mem_alloc->va,
5285 mbox_mem_alloc->dma);
5286 return status;
6b7c5b94
SP
5287}
5288
3bc6b06c 5289static void be_remove(struct pci_dev *pdev)
6b7c5b94
SP
5290{
5291 struct be_adapter *adapter = pci_get_drvdata(pdev);
8d56ff11 5292
6b7c5b94
SP
5293 if (!adapter)
5294 return;
5295
045508a8 5296 be_roce_dev_remove(adapter);
8cef7a78 5297 be_intr_set(adapter, false);
045508a8 5298
eb7dd46c 5299 be_cancel_err_detection(adapter);
f67ef7ba 5300
6b7c5b94
SP
5301 unregister_netdev(adapter->netdev);
5302
5fb379ee
SP
5303 be_clear(adapter);
5304
bf99e50d
PR
5305 /* tell fw we're done with firing cmds */
5306 be_cmd_fw_clean(adapter);
5307
78fad34e
SP
5308 be_unmap_pci_bars(adapter);
5309 be_drv_cleanup(adapter);
6b7c5b94 5310
d6b6d987
SP
5311 pci_disable_pcie_error_reporting(pdev);
5312
6b7c5b94
SP
5313 pci_release_regions(pdev);
5314 pci_disable_device(pdev);
5315
5316 free_netdev(adapter->netdev);
5317}
5318
d379142b
SP
5319static char *mc_name(struct be_adapter *adapter)
5320{
f93f160b
VV
5321 char *str = ""; /* default */
5322
5323 switch (adapter->mc_type) {
5324 case UMC:
5325 str = "UMC";
5326 break;
5327 case FLEX10:
5328 str = "FLEX10";
5329 break;
5330 case vNIC1:
5331 str = "vNIC-1";
5332 break;
5333 case nPAR:
5334 str = "nPAR";
5335 break;
5336 case UFP:
5337 str = "UFP";
5338 break;
5339 case vNIC2:
5340 str = "vNIC-2";
5341 break;
5342 default:
5343 str = "";
5344 }
5345
5346 return str;
d379142b
SP
5347}
5348
5349static inline char *func_name(struct be_adapter *adapter)
5350{
5351 return be_physfn(adapter) ? "PF" : "VF";
5352}
5353
f7062ee5
SP
5354static inline char *nic_name(struct pci_dev *pdev)
5355{
5356 switch (pdev->device) {
5357 case OC_DEVICE_ID1:
5358 return OC_NAME;
5359 case OC_DEVICE_ID2:
5360 return OC_NAME_BE;
5361 case OC_DEVICE_ID3:
5362 case OC_DEVICE_ID4:
5363 return OC_NAME_LANCER;
5364 case BE_DEVICE_ID2:
5365 return BE3_NAME;
5366 case OC_DEVICE_ID5:
5367 case OC_DEVICE_ID6:
5368 return OC_NAME_SH;
5369 default:
5370 return BE_NAME;
5371 }
5372}
5373
1dd06ae8 5374static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id)
6b7c5b94 5375{
6b7c5b94
SP
5376 struct be_adapter *adapter;
5377 struct net_device *netdev;
21252377 5378 int status = 0;
6b7c5b94 5379
acbafeb1
SP
5380 dev_info(&pdev->dev, "%s version is %s\n", DRV_NAME, DRV_VER);
5381
6b7c5b94
SP
5382 status = pci_enable_device(pdev);
5383 if (status)
5384 goto do_none;
5385
5386 status = pci_request_regions(pdev, DRV_NAME);
5387 if (status)
5388 goto disable_dev;
5389 pci_set_master(pdev);
5390
7f640062 5391 netdev = alloc_etherdev_mqs(sizeof(*adapter), MAX_TX_QS, MAX_RX_QS);
ddf1169f 5392 if (!netdev) {
6b7c5b94
SP
5393 status = -ENOMEM;
5394 goto rel_reg;
5395 }
5396 adapter = netdev_priv(netdev);
5397 adapter->pdev = pdev;
5398 pci_set_drvdata(pdev, adapter);
5399 adapter->netdev = netdev;
2243e2e9 5400 SET_NETDEV_DEV(netdev, &pdev->dev);
6b7c5b94 5401
4c15c243 5402 status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
6b7c5b94
SP
5403 if (!status) {
5404 netdev->features |= NETIF_F_HIGHDMA;
5405 } else {
4c15c243 5406 status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
6b7c5b94
SP
5407 if (status) {
5408 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
5409 goto free_netdev;
5410 }
5411 }
5412
2f951a9a
KA
5413 status = pci_enable_pcie_error_reporting(pdev);
5414 if (!status)
5415 dev_info(&pdev->dev, "PCIe error reporting enabled\n");
d6b6d987 5416
78fad34e 5417 status = be_map_pci_bars(adapter);
6b7c5b94 5418 if (status)
39f1d94d 5419 goto free_netdev;
6b7c5b94 5420
78fad34e
SP
5421 status = be_drv_init(adapter);
5422 if (status)
5423 goto unmap_bars;
5424
5fb379ee
SP
5425 status = be_setup(adapter);
5426 if (status)
78fad34e 5427 goto drv_cleanup;
2243e2e9 5428
3abcdeda 5429 be_netdev_init(netdev);
6b7c5b94
SP
5430 status = register_netdev(netdev);
5431 if (status != 0)
5fb379ee 5432 goto unsetup;
6b7c5b94 5433
045508a8
PP
5434 be_roce_dev_add(adapter);
5435
eb7dd46c 5436 be_schedule_err_detection(adapter);
b4e32a71 5437
d379142b 5438 dev_info(&pdev->dev, "%s: %s %s port %c\n", nic_name(pdev),
21252377 5439 func_name(adapter), mc_name(adapter), adapter->port_name);
34b1ef04 5440
6b7c5b94
SP
5441 return 0;
5442
5fb379ee
SP
5443unsetup:
5444 be_clear(adapter);
78fad34e
SP
5445drv_cleanup:
5446 be_drv_cleanup(adapter);
5447unmap_bars:
5448 be_unmap_pci_bars(adapter);
f9449ab7 5449free_netdev:
fe6d2a38 5450 free_netdev(netdev);
6b7c5b94
SP
5451rel_reg:
5452 pci_release_regions(pdev);
5453disable_dev:
5454 pci_disable_device(pdev);
5455do_none:
c4ca2374 5456 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
6b7c5b94
SP
5457 return status;
5458}
5459
5460static int be_suspend(struct pci_dev *pdev, pm_message_t state)
5461{
5462 struct be_adapter *adapter = pci_get_drvdata(pdev);
6b7c5b94 5463
76a9e08e 5464 if (adapter->wol_en)
71d8d1b5
AK
5465 be_setup_wol(adapter, true);
5466
d4360d6f 5467 be_intr_set(adapter, false);
eb7dd46c 5468 be_cancel_err_detection(adapter);
f67ef7ba 5469
87ac1a52 5470 be_cleanup(adapter);
6b7c5b94
SP
5471
5472 pci_save_state(pdev);
5473 pci_disable_device(pdev);
5474 pci_set_power_state(pdev, pci_choose_state(pdev, state));
5475 return 0;
5476}
5477
484d76fd 5478static int be_pci_resume(struct pci_dev *pdev)
6b7c5b94 5479{
6b7c5b94 5480 struct be_adapter *adapter = pci_get_drvdata(pdev);
484d76fd 5481 int status = 0;
6b7c5b94
SP
5482
5483 status = pci_enable_device(pdev);
5484 if (status)
5485 return status;
5486
1ca01512 5487 pci_set_power_state(pdev, PCI_D0);
6b7c5b94
SP
5488 pci_restore_state(pdev);
5489
484d76fd 5490 status = be_resume(adapter);
2243e2e9
SP
5491 if (status)
5492 return status;
5493
eb7dd46c
SP
5494 be_schedule_err_detection(adapter);
5495
76a9e08e 5496 if (adapter->wol_en)
71d8d1b5 5497 be_setup_wol(adapter, false);
a4ca055f 5498
6b7c5b94
SP
5499 return 0;
5500}
5501
82456b03
SP
5502/*
5503 * An FLR will stop BE from DMAing any data.
5504 */
5505static void be_shutdown(struct pci_dev *pdev)
5506{
5507 struct be_adapter *adapter = pci_get_drvdata(pdev);
82456b03 5508
2d5d4154
AK
5509 if (!adapter)
5510 return;
82456b03 5511
d114f99a 5512 be_roce_dev_shutdown(adapter);
0f4a6828 5513 cancel_delayed_work_sync(&adapter->work);
eb7dd46c 5514 be_cancel_err_detection(adapter);
a4ca055f 5515
2d5d4154 5516 netif_device_detach(adapter->netdev);
82456b03 5517
57841869
AK
5518 be_cmd_reset_function(adapter);
5519
82456b03 5520 pci_disable_device(pdev);
82456b03
SP
5521}
5522
cf588477 5523static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
748b539a 5524 pci_channel_state_t state)
cf588477
SP
5525{
5526 struct be_adapter *adapter = pci_get_drvdata(pdev);
cf588477
SP
5527
5528 dev_err(&adapter->pdev->dev, "EEH error detected\n");
5529
01e5b2c4
SK
5530 if (!adapter->eeh_error) {
5531 adapter->eeh_error = true;
cf588477 5532
eb7dd46c 5533 be_cancel_err_detection(adapter);
cf588477 5534
87ac1a52 5535 be_cleanup(adapter);
cf588477 5536 }
cf588477
SP
5537
5538 if (state == pci_channel_io_perm_failure)
5539 return PCI_ERS_RESULT_DISCONNECT;
5540
5541 pci_disable_device(pdev);
5542
eeb7fc7b
SK
5543 /* The error could cause the FW to trigger a flash debug dump.
5544 * Resetting the card while flash dump is in progress
c8a54163
PR
5545 * can cause it not to recover; wait for it to finish.
5546 * Wait only for first function as it is needed only once per
5547 * adapter.
eeb7fc7b 5548 */
c8a54163
PR
5549 if (pdev->devfn == 0)
5550 ssleep(30);
5551
cf588477
SP
5552 return PCI_ERS_RESULT_NEED_RESET;
5553}
5554
5555static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
5556{
5557 struct be_adapter *adapter = pci_get_drvdata(pdev);
5558 int status;
5559
5560 dev_info(&adapter->pdev->dev, "EEH reset\n");
cf588477
SP
5561
5562 status = pci_enable_device(pdev);
5563 if (status)
5564 return PCI_ERS_RESULT_DISCONNECT;
5565
5566 pci_set_master(pdev);
1ca01512 5567 pci_set_power_state(pdev, PCI_D0);
cf588477
SP
5568 pci_restore_state(pdev);
5569
5570 /* Check if card is ok and fw is ready */
c5b3ad4c
SP
5571 dev_info(&adapter->pdev->dev,
5572 "Waiting for FW to be ready after EEH reset\n");
bf99e50d 5573 status = be_fw_wait_ready(adapter);
cf588477
SP
5574 if (status)
5575 return PCI_ERS_RESULT_DISCONNECT;
5576
d6b6d987 5577 pci_cleanup_aer_uncorrect_error_status(pdev);
01e5b2c4 5578 be_clear_all_error(adapter);
cf588477
SP
5579 return PCI_ERS_RESULT_RECOVERED;
5580}
5581
5582static void be_eeh_resume(struct pci_dev *pdev)
5583{
5584 int status = 0;
5585 struct be_adapter *adapter = pci_get_drvdata(pdev);
cf588477
SP
5586
5587 dev_info(&adapter->pdev->dev, "EEH resume\n");
5588
5589 pci_save_state(pdev);
5590
484d76fd 5591 status = be_resume(adapter);
bf99e50d
PR
5592 if (status)
5593 goto err;
5594
eb7dd46c 5595 be_schedule_err_detection(adapter);
cf588477
SP
5596 return;
5597err:
5598 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
cf588477
SP
5599}
5600
3646f0e5 5601static const struct pci_error_handlers be_eeh_handlers = {
cf588477
SP
5602 .error_detected = be_eeh_err_detected,
5603 .slot_reset = be_eeh_reset,
5604 .resume = be_eeh_resume,
5605};
5606
6b7c5b94
SP
5607static struct pci_driver be_driver = {
5608 .name = DRV_NAME,
5609 .id_table = be_dev_ids,
5610 .probe = be_probe,
5611 .remove = be_remove,
5612 .suspend = be_suspend,
484d76fd 5613 .resume = be_pci_resume,
82456b03 5614 .shutdown = be_shutdown,
cf588477 5615 .err_handler = &be_eeh_handlers
6b7c5b94
SP
5616};
5617
5618static int __init be_init_module(void)
5619{
8e95a202
JP
5620 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
5621 rx_frag_size != 2048) {
6b7c5b94
SP
5622 printk(KERN_WARNING DRV_NAME
5623 " : Module param rx_frag_size must be 2048/4096/8192."
5624 " Using 2048\n");
5625 rx_frag_size = 2048;
5626 }
6b7c5b94
SP
5627
5628 return pci_register_driver(&be_driver);
5629}
5630module_init(be_init_module);
5631
5632static void __exit be_exit_module(void)
5633{
5634 pci_unregister_driver(&be_driver);
5635}
5636module_exit(be_exit_module);