be2net: Use GET_PROFILE_CONFIG cmd for BE3-R to query max-vfs
[linux-2.6-block.git] / drivers / net / ethernet / emulex / benet / be_main.c
CommitLineData
6b7c5b94 1/*
40263820 2 * Copyright (C) 2005 - 2014 Emulex
6b7c5b94
SP
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
d2145cde 11 * linux-drivers@emulex.com
6b7c5b94 12 *
d2145cde
AK
13 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
6b7c5b94
SP
16 */
17
70c71606 18#include <linux/prefetch.h>
9d9779e7 19#include <linux/module.h>
6b7c5b94 20#include "be.h"
8788fdc2 21#include "be_cmds.h"
65f71b8b 22#include <asm/div64.h>
d6b6d987 23#include <linux/aer.h>
a77dcb8c 24#include <linux/if_bridge.h>
6384a4d0 25#include <net/busy_poll.h>
6b7c5b94
SP
26
27MODULE_VERSION(DRV_VER);
28MODULE_DEVICE_TABLE(pci, be_dev_ids);
29MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
00d3d51e 30MODULE_AUTHOR("Emulex Corporation");
6b7c5b94
SP
31MODULE_LICENSE("GPL");
32
ba343c77 33static unsigned int num_vfs;
ba343c77 34module_param(num_vfs, uint, S_IRUGO);
ba343c77 35MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
6b7c5b94 36
11ac75ed
SP
37static ushort rx_frag_size = 2048;
38module_param(rx_frag_size, ushort, S_IRUGO);
39MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
40
6b7c5b94 41static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
c4ca2374 42 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
59fd5d87 43 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
c4ca2374
AK
44 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
45 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
fe6d2a38 46 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
12f4d0a8 47 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
ecedb6ae 48 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
76b73530 49 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID6)},
6b7c5b94
SP
50 { 0 }
51};
52MODULE_DEVICE_TABLE(pci, be_dev_ids);
7c185276 53/* UE Status Low CSR */
42c8b11e 54static const char * const ue_status_low_desc[] = {
7c185276
AK
55 "CEV",
56 "CTX",
57 "DBUF",
58 "ERX",
59 "Host",
60 "MPU",
61 "NDMA",
62 "PTC ",
63 "RDMA ",
64 "RXF ",
65 "RXIPS ",
66 "RXULP0 ",
67 "RXULP1 ",
68 "RXULP2 ",
69 "TIM ",
70 "TPOST ",
71 "TPRE ",
72 "TXIPS ",
73 "TXULP0 ",
74 "TXULP1 ",
75 "UC ",
76 "WDMA ",
77 "TXULP2 ",
78 "HOST1 ",
79 "P0_OB_LINK ",
80 "P1_OB_LINK ",
81 "HOST_GPIO ",
82 "MBOX ",
83 "AXGMAC0",
84 "AXGMAC1",
85 "JTAG",
86 "MPU_INTPEND"
87};
88/* UE Status High CSR */
42c8b11e 89static const char * const ue_status_hi_desc[] = {
7c185276
AK
90 "LPCMEMHOST",
91 "MGMT_MAC",
92 "PCS0ONLINE",
93 "MPU_IRAM",
94 "PCS1ONLINE",
95 "PCTL0",
96 "PCTL1",
97 "PMEM",
98 "RR",
99 "TXPB",
100 "RXPP",
101 "XAUI",
102 "TXP",
103 "ARM",
104 "IPC",
105 "HOST2",
106 "HOST3",
107 "HOST4",
108 "HOST5",
109 "HOST6",
110 "HOST7",
111 "HOST8",
112 "HOST9",
42c8b11e 113 "NETC",
7c185276
AK
114 "Unknown",
115 "Unknown",
116 "Unknown",
117 "Unknown",
118 "Unknown",
119 "Unknown",
120 "Unknown",
121 "Unknown"
122};
6b7c5b94 123
752961a1 124
6b7c5b94
SP
125static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
126{
127 struct be_dma_mem *mem = &q->dma_mem;
1cfafab9 128 if (mem->va) {
2b7bcebf
IV
129 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
130 mem->dma);
1cfafab9
SP
131 mem->va = NULL;
132 }
6b7c5b94
SP
133}
134
135static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
136 u16 len, u16 entry_size)
137{
138 struct be_dma_mem *mem = &q->dma_mem;
139
140 memset(q, 0, sizeof(*q));
141 q->len = len;
142 q->entry_size = entry_size;
143 mem->size = len * entry_size;
ede23fa8
JP
144 mem->va = dma_zalloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
145 GFP_KERNEL);
6b7c5b94 146 if (!mem->va)
10ef9ab4 147 return -ENOMEM;
6b7c5b94
SP
148 return 0;
149}
150
68c45a2d 151static void be_reg_intr_set(struct be_adapter *adapter, bool enable)
6b7c5b94 152{
db3ea781 153 u32 reg, enabled;
5f0b849e 154
db3ea781
SP
155 pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
156 &reg);
157 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
158
5f0b849e 159 if (!enabled && enable)
6b7c5b94 160 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
5f0b849e 161 else if (enabled && !enable)
6b7c5b94 162 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
5f0b849e 163 else
6b7c5b94 164 return;
5f0b849e 165
db3ea781
SP
166 pci_write_config_dword(adapter->pdev,
167 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
6b7c5b94
SP
168}
169
68c45a2d
SK
170static void be_intr_set(struct be_adapter *adapter, bool enable)
171{
172 int status = 0;
173
174 /* On lancer interrupts can't be controlled via this register */
175 if (lancer_chip(adapter))
176 return;
177
178 if (adapter->eeh_error)
179 return;
180
181 status = be_cmd_intr_set(adapter, enable);
182 if (status)
183 be_reg_intr_set(adapter, enable);
184}
185
8788fdc2 186static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
6b7c5b94
SP
187{
188 u32 val = 0;
189 val |= qid & DB_RQ_RING_ID_MASK;
190 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
f3eb62d2
SP
191
192 wmb();
8788fdc2 193 iowrite32(val, adapter->db + DB_RQ_OFFSET);
6b7c5b94
SP
194}
195
94d73aaa
VV
196static void be_txq_notify(struct be_adapter *adapter, struct be_tx_obj *txo,
197 u16 posted)
6b7c5b94
SP
198{
199 u32 val = 0;
94d73aaa 200 val |= txo->q.id & DB_TXULP_RING_ID_MASK;
6b7c5b94 201 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
f3eb62d2
SP
202
203 wmb();
94d73aaa 204 iowrite32(val, adapter->db + txo->db_offset);
6b7c5b94
SP
205}
206
8788fdc2 207static void be_eq_notify(struct be_adapter *adapter, u16 qid,
6b7c5b94
SP
208 bool arm, bool clear_int, u16 num_popped)
209{
210 u32 val = 0;
211 val |= qid & DB_EQ_RING_ID_MASK;
fe6d2a38
SP
212 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
213 DB_EQ_RING_ID_EXT_MASK_SHIFT);
cf588477 214
f67ef7ba 215 if (adapter->eeh_error)
cf588477
SP
216 return;
217
6b7c5b94
SP
218 if (arm)
219 val |= 1 << DB_EQ_REARM_SHIFT;
220 if (clear_int)
221 val |= 1 << DB_EQ_CLR_SHIFT;
222 val |= 1 << DB_EQ_EVNT_SHIFT;
223 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
8788fdc2 224 iowrite32(val, adapter->db + DB_EQ_OFFSET);
6b7c5b94
SP
225}
226
8788fdc2 227void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
6b7c5b94
SP
228{
229 u32 val = 0;
230 val |= qid & DB_CQ_RING_ID_MASK;
fe6d2a38
SP
231 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
232 DB_CQ_RING_ID_EXT_MASK_SHIFT);
cf588477 233
f67ef7ba 234 if (adapter->eeh_error)
cf588477
SP
235 return;
236
6b7c5b94
SP
237 if (arm)
238 val |= 1 << DB_CQ_REARM_SHIFT;
239 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
8788fdc2 240 iowrite32(val, adapter->db + DB_CQ_OFFSET);
6b7c5b94
SP
241}
242
6b7c5b94
SP
243static int be_mac_addr_set(struct net_device *netdev, void *p)
244{
245 struct be_adapter *adapter = netdev_priv(netdev);
5a712c13 246 struct device *dev = &adapter->pdev->dev;
6b7c5b94 247 struct sockaddr *addr = p;
5a712c13
SP
248 int status;
249 u8 mac[ETH_ALEN];
250 u32 old_pmac_id = adapter->pmac_id[0], curr_pmac_id = 0;
6b7c5b94 251
ca9e4988
AK
252 if (!is_valid_ether_addr(addr->sa_data))
253 return -EADDRNOTAVAIL;
254
ff32f8ab
VV
255 /* Proceed further only if, User provided MAC is different
256 * from active MAC
257 */
258 if (ether_addr_equal(addr->sa_data, netdev->dev_addr))
259 return 0;
260
5a712c13
SP
261 /* The PMAC_ADD cmd may fail if the VF doesn't have FILTMGMT
262 * privilege or if PF did not provision the new MAC address.
263 * On BE3, this cmd will always fail if the VF doesn't have the
264 * FILTMGMT privilege. This failure is OK, only if the PF programmed
265 * the MAC for the VF.
704e4c88 266 */
5a712c13
SP
267 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
268 adapter->if_handle, &adapter->pmac_id[0], 0);
269 if (!status) {
270 curr_pmac_id = adapter->pmac_id[0];
271
272 /* Delete the old programmed MAC. This call may fail if the
273 * old MAC was already deleted by the PF driver.
274 */
275 if (adapter->pmac_id[0] != old_pmac_id)
276 be_cmd_pmac_del(adapter, adapter->if_handle,
277 old_pmac_id, 0);
704e4c88
PR
278 }
279
5a712c13
SP
280 /* Decide if the new MAC is successfully activated only after
281 * querying the FW
704e4c88 282 */
b188f090
SR
283 status = be_cmd_get_active_mac(adapter, curr_pmac_id, mac,
284 adapter->if_handle, true, 0);
a65027e4 285 if (status)
e3a7ae2c 286 goto err;
6b7c5b94 287
5a712c13
SP
288 /* The MAC change did not happen, either due to lack of privilege
289 * or PF didn't pre-provision.
290 */
61d23e9f 291 if (!ether_addr_equal(addr->sa_data, mac)) {
5a712c13
SP
292 status = -EPERM;
293 goto err;
294 }
295
e3a7ae2c 296 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
5a712c13 297 dev_info(dev, "MAC address changed to %pM\n", mac);
e3a7ae2c
SK
298 return 0;
299err:
5a712c13 300 dev_warn(dev, "MAC address change to %pM failed\n", addr->sa_data);
6b7c5b94
SP
301 return status;
302}
303
ca34fe38
SP
304/* BE2 supports only v0 cmd */
305static void *hw_stats_from_cmd(struct be_adapter *adapter)
306{
307 if (BE2_chip(adapter)) {
308 struct be_cmd_resp_get_stats_v0 *cmd = adapter->stats_cmd.va;
309
310 return &cmd->hw_stats;
61000861 311 } else if (BE3_chip(adapter)) {
ca34fe38
SP
312 struct be_cmd_resp_get_stats_v1 *cmd = adapter->stats_cmd.va;
313
61000861
AK
314 return &cmd->hw_stats;
315 } else {
316 struct be_cmd_resp_get_stats_v2 *cmd = adapter->stats_cmd.va;
317
ca34fe38
SP
318 return &cmd->hw_stats;
319 }
320}
321
322/* BE2 supports only v0 cmd */
323static void *be_erx_stats_from_cmd(struct be_adapter *adapter)
324{
325 if (BE2_chip(adapter)) {
326 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
327
328 return &hw_stats->erx;
61000861 329 } else if (BE3_chip(adapter)) {
ca34fe38
SP
330 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
331
61000861
AK
332 return &hw_stats->erx;
333 } else {
334 struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
335
ca34fe38
SP
336 return &hw_stats->erx;
337 }
338}
339
340static void populate_be_v0_stats(struct be_adapter *adapter)
89a88ab8 341{
ac124ff9
SP
342 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
343 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
344 struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
89a88ab8 345 struct be_port_rxf_stats_v0 *port_stats =
ac124ff9
SP
346 &rxf_stats->port[adapter->port_num];
347 struct be_drv_stats *drvs = &adapter->drv_stats;
89a88ab8 348
ac124ff9 349 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
89a88ab8
AK
350 drvs->rx_pause_frames = port_stats->rx_pause_frames;
351 drvs->rx_crc_errors = port_stats->rx_crc_errors;
352 drvs->rx_control_frames = port_stats->rx_control_frames;
353 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
354 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
355 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
356 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
357 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
358 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
359 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
360 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
361 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
362 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
363 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
ac124ff9 364 drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
89a88ab8
AK
365 drvs->rx_dropped_header_too_small =
366 port_stats->rx_dropped_header_too_small;
18fb06a1
SR
367 drvs->rx_address_filtered =
368 port_stats->rx_address_filtered +
369 port_stats->rx_vlan_filtered;
89a88ab8
AK
370 drvs->rx_alignment_symbol_errors =
371 port_stats->rx_alignment_symbol_errors;
372
373 drvs->tx_pauseframes = port_stats->tx_pauseframes;
374 drvs->tx_controlframes = port_stats->tx_controlframes;
375
376 if (adapter->port_num)
ac124ff9 377 drvs->jabber_events = rxf_stats->port1_jabber_events;
89a88ab8 378 else
ac124ff9 379 drvs->jabber_events = rxf_stats->port0_jabber_events;
89a88ab8 380 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
89a88ab8 381 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
89a88ab8
AK
382 drvs->forwarded_packets = rxf_stats->forwarded_packets;
383 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
ac124ff9
SP
384 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
385 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
89a88ab8
AK
386 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
387}
388
ca34fe38 389static void populate_be_v1_stats(struct be_adapter *adapter)
89a88ab8 390{
ac124ff9
SP
391 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
392 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
393 struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
89a88ab8 394 struct be_port_rxf_stats_v1 *port_stats =
ac124ff9
SP
395 &rxf_stats->port[adapter->port_num];
396 struct be_drv_stats *drvs = &adapter->drv_stats;
89a88ab8 397
ac124ff9 398 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
02fe7027
AK
399 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
400 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
89a88ab8
AK
401 drvs->rx_pause_frames = port_stats->rx_pause_frames;
402 drvs->rx_crc_errors = port_stats->rx_crc_errors;
403 drvs->rx_control_frames = port_stats->rx_control_frames;
404 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
405 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
406 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
407 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
408 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
409 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
410 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
411 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
412 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
413 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
414 drvs->rx_dropped_header_too_small =
415 port_stats->rx_dropped_header_too_small;
416 drvs->rx_input_fifo_overflow_drop =
417 port_stats->rx_input_fifo_overflow_drop;
18fb06a1 418 drvs->rx_address_filtered = port_stats->rx_address_filtered;
89a88ab8
AK
419 drvs->rx_alignment_symbol_errors =
420 port_stats->rx_alignment_symbol_errors;
ac124ff9 421 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
89a88ab8
AK
422 drvs->tx_pauseframes = port_stats->tx_pauseframes;
423 drvs->tx_controlframes = port_stats->tx_controlframes;
b5adffc4 424 drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
89a88ab8
AK
425 drvs->jabber_events = port_stats->jabber_events;
426 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
89a88ab8 427 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
89a88ab8
AK
428 drvs->forwarded_packets = rxf_stats->forwarded_packets;
429 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
ac124ff9
SP
430 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
431 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
89a88ab8
AK
432 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
433}
434
61000861
AK
435static void populate_be_v2_stats(struct be_adapter *adapter)
436{
437 struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
438 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
439 struct be_rxf_stats_v2 *rxf_stats = &hw_stats->rxf;
440 struct be_port_rxf_stats_v2 *port_stats =
441 &rxf_stats->port[adapter->port_num];
442 struct be_drv_stats *drvs = &adapter->drv_stats;
443
444 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
445 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
446 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
447 drvs->rx_pause_frames = port_stats->rx_pause_frames;
448 drvs->rx_crc_errors = port_stats->rx_crc_errors;
449 drvs->rx_control_frames = port_stats->rx_control_frames;
450 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
451 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
452 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
453 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
454 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
455 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
456 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
457 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
458 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
459 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
460 drvs->rx_dropped_header_too_small =
461 port_stats->rx_dropped_header_too_small;
462 drvs->rx_input_fifo_overflow_drop =
463 port_stats->rx_input_fifo_overflow_drop;
464 drvs->rx_address_filtered = port_stats->rx_address_filtered;
465 drvs->rx_alignment_symbol_errors =
466 port_stats->rx_alignment_symbol_errors;
467 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
468 drvs->tx_pauseframes = port_stats->tx_pauseframes;
469 drvs->tx_controlframes = port_stats->tx_controlframes;
470 drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
471 drvs->jabber_events = port_stats->jabber_events;
472 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
473 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
474 drvs->forwarded_packets = rxf_stats->forwarded_packets;
475 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
476 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
477 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
478 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
461ae379
AK
479 if (be_roce_supported(adapter)) {
480 drvs->rx_roce_bytes_lsd = port_stats->roce_bytes_received_lsd;
481 drvs->rx_roce_bytes_msd = port_stats->roce_bytes_received_msd;
482 drvs->rx_roce_frames = port_stats->roce_frames_received;
483 drvs->roce_drops_crc = port_stats->roce_drops_crc;
484 drvs->roce_drops_payload_len =
485 port_stats->roce_drops_payload_len;
486 }
61000861
AK
487}
488
005d5696
SX
489static void populate_lancer_stats(struct be_adapter *adapter)
490{
89a88ab8 491
005d5696 492 struct be_drv_stats *drvs = &adapter->drv_stats;
ac124ff9
SP
493 struct lancer_pport_stats *pport_stats =
494 pport_stats_from_cmd(adapter);
495
496 be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
497 drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
498 drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
499 drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
005d5696 500 drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
ac124ff9 501 drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
005d5696
SX
502 drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
503 drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
504 drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
505 drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
506 drvs->rx_dropped_tcp_length =
507 pport_stats->rx_dropped_invalid_tcp_length;
508 drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
509 drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
510 drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
511 drvs->rx_dropped_header_too_small =
512 pport_stats->rx_dropped_header_too_small;
513 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
18fb06a1
SR
514 drvs->rx_address_filtered =
515 pport_stats->rx_address_filtered +
516 pport_stats->rx_vlan_filtered;
ac124ff9 517 drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
005d5696 518 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
ac124ff9
SP
519 drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
520 drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
005d5696 521 drvs->jabber_events = pport_stats->rx_jabbers;
ac124ff9
SP
522 drvs->forwarded_packets = pport_stats->num_forwards_lo;
523 drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
005d5696 524 drvs->rx_drops_too_many_frags =
ac124ff9 525 pport_stats->rx_drops_too_many_frags_lo;
005d5696 526}
89a88ab8 527
09c1c68f
SP
528static void accumulate_16bit_val(u32 *acc, u16 val)
529{
530#define lo(x) (x & 0xFFFF)
531#define hi(x) (x & 0xFFFF0000)
532 bool wrapped = val < lo(*acc);
533 u32 newacc = hi(*acc) + val;
534
535 if (wrapped)
536 newacc += 65536;
537 ACCESS_ONCE(*acc) = newacc;
538}
539
4188e7df 540static void populate_erx_stats(struct be_adapter *adapter,
a6c578ef
AK
541 struct be_rx_obj *rxo,
542 u32 erx_stat)
543{
544 if (!BEx_chip(adapter))
545 rx_stats(rxo)->rx_drops_no_frags = erx_stat;
546 else
547 /* below erx HW counter can actually wrap around after
548 * 65535. Driver accumulates a 32-bit value
549 */
550 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
551 (u16)erx_stat);
552}
553
89a88ab8
AK
554void be_parse_stats(struct be_adapter *adapter)
555{
61000861 556 struct be_erx_stats_v2 *erx = be_erx_stats_from_cmd(adapter);
ac124ff9
SP
557 struct be_rx_obj *rxo;
558 int i;
a6c578ef 559 u32 erx_stat;
ac124ff9 560
ca34fe38
SP
561 if (lancer_chip(adapter)) {
562 populate_lancer_stats(adapter);
005d5696 563 } else {
ca34fe38
SP
564 if (BE2_chip(adapter))
565 populate_be_v0_stats(adapter);
61000861
AK
566 else if (BE3_chip(adapter))
567 /* for BE3 */
ca34fe38 568 populate_be_v1_stats(adapter);
61000861
AK
569 else
570 populate_be_v2_stats(adapter);
d51ebd33 571
61000861 572 /* erx_v2 is longer than v0, v1. use v2 for v0, v1 access */
ca34fe38 573 for_all_rx_queues(adapter, rxo, i) {
a6c578ef
AK
574 erx_stat = erx->rx_drops_no_fragments[rxo->q.id];
575 populate_erx_stats(adapter, rxo, erx_stat);
ca34fe38 576 }
09c1c68f 577 }
89a88ab8
AK
578}
579
ab1594e9
SP
580static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
581 struct rtnl_link_stats64 *stats)
6b7c5b94 582{
ab1594e9 583 struct be_adapter *adapter = netdev_priv(netdev);
89a88ab8 584 struct be_drv_stats *drvs = &adapter->drv_stats;
3abcdeda 585 struct be_rx_obj *rxo;
3c8def97 586 struct be_tx_obj *txo;
ab1594e9
SP
587 u64 pkts, bytes;
588 unsigned int start;
3abcdeda 589 int i;
6b7c5b94 590
3abcdeda 591 for_all_rx_queues(adapter, rxo, i) {
ab1594e9
SP
592 const struct be_rx_stats *rx_stats = rx_stats(rxo);
593 do {
594 start = u64_stats_fetch_begin_bh(&rx_stats->sync);
595 pkts = rx_stats(rxo)->rx_pkts;
596 bytes = rx_stats(rxo)->rx_bytes;
597 } while (u64_stats_fetch_retry_bh(&rx_stats->sync, start));
598 stats->rx_packets += pkts;
599 stats->rx_bytes += bytes;
600 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
601 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
602 rx_stats(rxo)->rx_drops_no_frags;
3abcdeda
SP
603 }
604
3c8def97 605 for_all_tx_queues(adapter, txo, i) {
ab1594e9
SP
606 const struct be_tx_stats *tx_stats = tx_stats(txo);
607 do {
608 start = u64_stats_fetch_begin_bh(&tx_stats->sync);
609 pkts = tx_stats(txo)->tx_pkts;
610 bytes = tx_stats(txo)->tx_bytes;
611 } while (u64_stats_fetch_retry_bh(&tx_stats->sync, start));
612 stats->tx_packets += pkts;
613 stats->tx_bytes += bytes;
3c8def97 614 }
6b7c5b94
SP
615
616 /* bad pkts received */
ab1594e9 617 stats->rx_errors = drvs->rx_crc_errors +
89a88ab8
AK
618 drvs->rx_alignment_symbol_errors +
619 drvs->rx_in_range_errors +
620 drvs->rx_out_range_errors +
621 drvs->rx_frame_too_long +
622 drvs->rx_dropped_too_small +
623 drvs->rx_dropped_too_short +
624 drvs->rx_dropped_header_too_small +
625 drvs->rx_dropped_tcp_length +
ab1594e9 626 drvs->rx_dropped_runt;
68110868 627
6b7c5b94 628 /* detailed rx errors */
ab1594e9 629 stats->rx_length_errors = drvs->rx_in_range_errors +
89a88ab8
AK
630 drvs->rx_out_range_errors +
631 drvs->rx_frame_too_long;
68110868 632
ab1594e9 633 stats->rx_crc_errors = drvs->rx_crc_errors;
6b7c5b94
SP
634
635 /* frame alignment errors */
ab1594e9 636 stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
68110868 637
6b7c5b94
SP
638 /* receiver fifo overrun */
639 /* drops_no_pbuf is no per i/f, it's per BE card */
ab1594e9 640 stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
89a88ab8
AK
641 drvs->rx_input_fifo_overflow_drop +
642 drvs->rx_drops_no_pbuf;
ab1594e9 643 return stats;
6b7c5b94
SP
644}
645
b236916a 646void be_link_status_update(struct be_adapter *adapter, u8 link_status)
6b7c5b94 647{
6b7c5b94
SP
648 struct net_device *netdev = adapter->netdev;
649
b236916a 650 if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
ea172a01 651 netif_carrier_off(netdev);
b236916a 652 adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
6b7c5b94 653 }
b236916a
AK
654
655 if ((link_status & LINK_STATUS_MASK) == LINK_UP)
656 netif_carrier_on(netdev);
657 else
658 netif_carrier_off(netdev);
6b7c5b94
SP
659}
660
3c8def97 661static void be_tx_stats_update(struct be_tx_obj *txo,
91992e44 662 u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
6b7c5b94 663{
3c8def97
SP
664 struct be_tx_stats *stats = tx_stats(txo);
665
ab1594e9 666 u64_stats_update_begin(&stats->sync);
ac124ff9
SP
667 stats->tx_reqs++;
668 stats->tx_wrbs += wrb_cnt;
669 stats->tx_bytes += copied;
670 stats->tx_pkts += (gso_segs ? gso_segs : 1);
6b7c5b94 671 if (stopped)
ac124ff9 672 stats->tx_stops++;
ab1594e9 673 u64_stats_update_end(&stats->sync);
6b7c5b94
SP
674}
675
676/* Determine number of WRB entries needed to xmit data in an skb */
fe6d2a38
SP
677static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
678 bool *dummy)
6b7c5b94 679{
ebc8d2ab
DM
680 int cnt = (skb->len > skb->data_len);
681
682 cnt += skb_shinfo(skb)->nr_frags;
683
6b7c5b94
SP
684 /* to account for hdr wrb */
685 cnt++;
fe6d2a38
SP
686 if (lancer_chip(adapter) || !(cnt & 1)) {
687 *dummy = false;
688 } else {
6b7c5b94
SP
689 /* add a dummy to make it an even num */
690 cnt++;
691 *dummy = true;
fe6d2a38 692 }
6b7c5b94
SP
693 BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
694 return cnt;
695}
696
697static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
698{
699 wrb->frag_pa_hi = upper_32_bits(addr);
700 wrb->frag_pa_lo = addr & 0xFFFFFFFF;
701 wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
89b1f496 702 wrb->rsvd0 = 0;
6b7c5b94
SP
703}
704
1ded132d
AK
705static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
706 struct sk_buff *skb)
707{
708 u8 vlan_prio;
709 u16 vlan_tag;
710
711 vlan_tag = vlan_tx_tag_get(skb);
712 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
713 /* If vlan priority provided by OS is NOT in available bmap */
714 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
715 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
716 adapter->recommended_prio;
717
718 return vlan_tag;
719}
720
cc4ce020 721static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
bc0c3405 722 struct sk_buff *skb, u32 wrb_cnt, u32 len, bool skip_hw_vlan)
6b7c5b94 723{
1ded132d 724 u16 vlan_tag;
cc4ce020 725
6b7c5b94
SP
726 memset(hdr, 0, sizeof(*hdr));
727
728 AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
729
49e4b847 730 if (skb_is_gso(skb)) {
6b7c5b94
SP
731 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
732 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
733 hdr, skb_shinfo(skb)->gso_size);
fe6d2a38 734 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
49e4b847 735 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
6b7c5b94
SP
736 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
737 if (is_tcp_pkt(skb))
738 AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
739 else if (is_udp_pkt(skb))
740 AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
741 }
742
4c5102f9 743 if (vlan_tx_tag_present(skb)) {
6b7c5b94 744 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
1ded132d 745 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
cc4ce020 746 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
6b7c5b94
SP
747 }
748
bc0c3405
AK
749 /* To skip HW VLAN tagging: evt = 1, compl = 0 */
750 AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, !skip_hw_vlan);
6b7c5b94 751 AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
6b7c5b94
SP
752 AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
753 AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
754}
755
2b7bcebf 756static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
7101e111
SP
757 bool unmap_single)
758{
759 dma_addr_t dma;
760
761 be_dws_le_to_cpu(wrb, sizeof(*wrb));
762
763 dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
b681ee77 764 if (wrb->frag_len) {
7101e111 765 if (unmap_single)
2b7bcebf
IV
766 dma_unmap_single(dev, dma, wrb->frag_len,
767 DMA_TO_DEVICE);
7101e111 768 else
2b7bcebf 769 dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
7101e111
SP
770 }
771}
6b7c5b94 772
3c8def97 773static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
bc0c3405
AK
774 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb,
775 bool skip_hw_vlan)
6b7c5b94 776{
7101e111
SP
777 dma_addr_t busaddr;
778 int i, copied = 0;
2b7bcebf 779 struct device *dev = &adapter->pdev->dev;
6b7c5b94 780 struct sk_buff *first_skb = skb;
6b7c5b94
SP
781 struct be_eth_wrb *wrb;
782 struct be_eth_hdr_wrb *hdr;
7101e111
SP
783 bool map_single = false;
784 u16 map_head;
6b7c5b94 785
6b7c5b94
SP
786 hdr = queue_head_node(txq);
787 queue_head_inc(txq);
7101e111 788 map_head = txq->head;
6b7c5b94 789
ebc8d2ab 790 if (skb->len > skb->data_len) {
e743d313 791 int len = skb_headlen(skb);
2b7bcebf
IV
792 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
793 if (dma_mapping_error(dev, busaddr))
7101e111
SP
794 goto dma_err;
795 map_single = true;
ebc8d2ab
DM
796 wrb = queue_head_node(txq);
797 wrb_fill(wrb, busaddr, len);
798 be_dws_cpu_to_le(wrb, sizeof(*wrb));
799 queue_head_inc(txq);
800 copied += len;
801 }
6b7c5b94 802
ebc8d2ab 803 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
9e903e08 804 const struct skb_frag_struct *frag =
ebc8d2ab 805 &skb_shinfo(skb)->frags[i];
b061b39e 806 busaddr = skb_frag_dma_map(dev, frag, 0,
9e903e08 807 skb_frag_size(frag), DMA_TO_DEVICE);
2b7bcebf 808 if (dma_mapping_error(dev, busaddr))
7101e111 809 goto dma_err;
ebc8d2ab 810 wrb = queue_head_node(txq);
9e903e08 811 wrb_fill(wrb, busaddr, skb_frag_size(frag));
ebc8d2ab
DM
812 be_dws_cpu_to_le(wrb, sizeof(*wrb));
813 queue_head_inc(txq);
9e903e08 814 copied += skb_frag_size(frag);
6b7c5b94
SP
815 }
816
817 if (dummy_wrb) {
818 wrb = queue_head_node(txq);
819 wrb_fill(wrb, 0, 0);
820 be_dws_cpu_to_le(wrb, sizeof(*wrb));
821 queue_head_inc(txq);
822 }
823
bc0c3405 824 wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied, skip_hw_vlan);
6b7c5b94
SP
825 be_dws_cpu_to_le(hdr, sizeof(*hdr));
826
827 return copied;
7101e111
SP
828dma_err:
829 txq->head = map_head;
830 while (copied) {
831 wrb = queue_head_node(txq);
2b7bcebf 832 unmap_tx_frag(dev, wrb, map_single);
7101e111
SP
833 map_single = false;
834 copied -= wrb->frag_len;
835 queue_head_inc(txq);
836 }
837 return 0;
6b7c5b94
SP
838}
839
93040ae5 840static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
bc0c3405
AK
841 struct sk_buff *skb,
842 bool *skip_hw_vlan)
93040ae5
SK
843{
844 u16 vlan_tag = 0;
845
846 skb = skb_share_check(skb, GFP_ATOMIC);
847 if (unlikely(!skb))
848 return skb;
849
efee8e87 850 if (vlan_tx_tag_present(skb))
93040ae5 851 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
52fe29e4
SB
852
853 if (qnq_async_evt_rcvd(adapter) && adapter->pvid) {
854 if (!vlan_tag)
855 vlan_tag = adapter->pvid;
856 /* f/w workaround to set skip_hw_vlan = 1, informs the F/W to
857 * skip VLAN insertion
858 */
859 if (skip_hw_vlan)
860 *skip_hw_vlan = true;
861 }
bc0c3405
AK
862
863 if (vlan_tag) {
58717686 864 skb = __vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
bc0c3405
AK
865 if (unlikely(!skb))
866 return skb;
bc0c3405
AK
867 skb->vlan_tci = 0;
868 }
869
870 /* Insert the outer VLAN, if any */
871 if (adapter->qnq_vid) {
872 vlan_tag = adapter->qnq_vid;
58717686 873 skb = __vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
bc0c3405
AK
874 if (unlikely(!skb))
875 return skb;
876 if (skip_hw_vlan)
877 *skip_hw_vlan = true;
878 }
879
93040ae5
SK
880 return skb;
881}
882
bc0c3405
AK
883static bool be_ipv6_exthdr_check(struct sk_buff *skb)
884{
885 struct ethhdr *eh = (struct ethhdr *)skb->data;
886 u16 offset = ETH_HLEN;
887
888 if (eh->h_proto == htons(ETH_P_IPV6)) {
889 struct ipv6hdr *ip6h = (struct ipv6hdr *)(skb->data + offset);
890
891 offset += sizeof(struct ipv6hdr);
892 if (ip6h->nexthdr != NEXTHDR_TCP &&
893 ip6h->nexthdr != NEXTHDR_UDP) {
894 struct ipv6_opt_hdr *ehdr =
895 (struct ipv6_opt_hdr *) (skb->data + offset);
896
897 /* offending pkt: 2nd byte following IPv6 hdr is 0xff */
898 if (ehdr->hdrlen == 0xff)
899 return true;
900 }
901 }
902 return false;
903}
904
905static int be_vlan_tag_tx_chk(struct be_adapter *adapter, struct sk_buff *skb)
906{
907 return vlan_tx_tag_present(skb) || adapter->pvid || adapter->qnq_vid;
908}
909
ee9c799c
SP
910static int be_ipv6_tx_stall_chk(struct be_adapter *adapter,
911 struct sk_buff *skb)
bc0c3405 912{
ee9c799c 913 return BE3_chip(adapter) && be_ipv6_exthdr_check(skb);
bc0c3405
AK
914}
915
ec495fac
VV
916static struct sk_buff *be_lancer_xmit_workarounds(struct be_adapter *adapter,
917 struct sk_buff *skb,
918 bool *skip_hw_vlan)
6b7c5b94 919{
d2cb6ce7 920 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
ee9c799c
SP
921 unsigned int eth_hdr_len;
922 struct iphdr *ip;
93040ae5 923
1297f9db
AK
924 /* For padded packets, BE HW modifies tot_len field in IP header
925 * incorrecly when VLAN tag is inserted by HW.
3904dcc4 926 * For padded packets, Lancer computes incorrect checksum.
1ded132d 927 */
ee9c799c
SP
928 eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
929 VLAN_ETH_HLEN : ETH_HLEN;
3904dcc4
SK
930 if (skb->len <= 60 &&
931 (lancer_chip(adapter) || vlan_tx_tag_present(skb)) &&
ee9c799c 932 is_ipv4_pkt(skb)) {
93040ae5
SK
933 ip = (struct iphdr *)ip_hdr(skb);
934 pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len));
935 }
1ded132d 936
d2cb6ce7 937 /* If vlan tag is already inlined in the packet, skip HW VLAN
f93f160b 938 * tagging in pvid-tagging mode
d2cb6ce7 939 */
f93f160b 940 if (be_pvid_tagging_enabled(adapter) &&
d2cb6ce7 941 veh->h_vlan_proto == htons(ETH_P_8021Q))
ee9c799c 942 *skip_hw_vlan = true;
d2cb6ce7 943
93040ae5
SK
944 /* HW has a bug wherein it will calculate CSUM for VLAN
945 * pkts even though it is disabled.
946 * Manually insert VLAN in pkt.
947 */
948 if (skb->ip_summed != CHECKSUM_PARTIAL &&
ee9c799c
SP
949 vlan_tx_tag_present(skb)) {
950 skb = be_insert_vlan_in_pkt(adapter, skb, skip_hw_vlan);
bc0c3405 951 if (unlikely(!skb))
c9128951 952 goto err;
bc0c3405
AK
953 }
954
955 /* HW may lockup when VLAN HW tagging is requested on
956 * certain ipv6 packets. Drop such pkts if the HW workaround to
957 * skip HW tagging is not enabled by FW.
958 */
959 if (unlikely(be_ipv6_tx_stall_chk(adapter, skb) &&
ee9c799c
SP
960 (adapter->pvid || adapter->qnq_vid) &&
961 !qnq_async_evt_rcvd(adapter)))
bc0c3405
AK
962 goto tx_drop;
963
964 /* Manual VLAN tag insertion to prevent:
965 * ASIC lockup when the ASIC inserts VLAN tag into
966 * certain ipv6 packets. Insert VLAN tags in driver,
967 * and set event, completion, vlan bits accordingly
968 * in the Tx WRB.
969 */
970 if (be_ipv6_tx_stall_chk(adapter, skb) &&
971 be_vlan_tag_tx_chk(adapter, skb)) {
ee9c799c 972 skb = be_insert_vlan_in_pkt(adapter, skb, skip_hw_vlan);
1ded132d 973 if (unlikely(!skb))
c9128951 974 goto err;
1ded132d
AK
975 }
976
ee9c799c
SP
977 return skb;
978tx_drop:
979 dev_kfree_skb_any(skb);
c9128951 980err:
ee9c799c
SP
981 return NULL;
982}
983
ec495fac
VV
984static struct sk_buff *be_xmit_workarounds(struct be_adapter *adapter,
985 struct sk_buff *skb,
986 bool *skip_hw_vlan)
987{
988 /* Lancer, SH-R ASICs have a bug wherein Packets that are 32 bytes or
989 * less may cause a transmit stall on that port. So the work-around is
990 * to pad short packets (<= 32 bytes) to a 36-byte length.
991 */
992 if (unlikely(!BEx_chip(adapter) && skb->len <= 32)) {
993 if (skb_padto(skb, 36))
994 return NULL;
995 skb->len = 36;
996 }
997
998 if (BEx_chip(adapter) || lancer_chip(adapter)) {
999 skb = be_lancer_xmit_workarounds(adapter, skb, skip_hw_vlan);
1000 if (!skb)
1001 return NULL;
1002 }
1003
1004 return skb;
1005}
1006
ee9c799c
SP
1007static netdev_tx_t be_xmit(struct sk_buff *skb, struct net_device *netdev)
1008{
1009 struct be_adapter *adapter = netdev_priv(netdev);
1010 struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
1011 struct be_queue_info *txq = &txo->q;
1012 bool dummy_wrb, stopped = false;
1013 u32 wrb_cnt = 0, copied = 0;
1014 bool skip_hw_vlan = false;
1015 u32 start = txq->head;
1016
1017 skb = be_xmit_workarounds(adapter, skb, &skip_hw_vlan);
bc617526
SP
1018 if (!skb) {
1019 tx_stats(txo)->tx_drv_drops++;
ee9c799c 1020 return NETDEV_TX_OK;
bc617526 1021 }
ee9c799c 1022
fe6d2a38 1023 wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
6b7c5b94 1024
bc0c3405
AK
1025 copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb,
1026 skip_hw_vlan);
c190e3c8 1027 if (copied) {
cd8f76c0
ED
1028 int gso_segs = skb_shinfo(skb)->gso_segs;
1029
c190e3c8 1030 /* record the sent skb in the sent_skb table */
3c8def97
SP
1031 BUG_ON(txo->sent_skb_list[start]);
1032 txo->sent_skb_list[start] = skb;
c190e3c8
AK
1033
1034 /* Ensure txq has space for the next skb; Else stop the queue
1035 * *BEFORE* ringing the tx doorbell, so that we serialze the
1036 * tx compls of the current transmit which'll wake up the queue
1037 */
7101e111 1038 atomic_add(wrb_cnt, &txq->used);
c190e3c8
AK
1039 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
1040 txq->len) {
3c8def97 1041 netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
c190e3c8
AK
1042 stopped = true;
1043 }
6b7c5b94 1044
94d73aaa 1045 be_txq_notify(adapter, txo, wrb_cnt);
6b7c5b94 1046
cd8f76c0 1047 be_tx_stats_update(txo, wrb_cnt, copied, gso_segs, stopped);
c190e3c8
AK
1048 } else {
1049 txq->head = start;
bc617526 1050 tx_stats(txo)->tx_drv_drops++;
c190e3c8 1051 dev_kfree_skb_any(skb);
6b7c5b94 1052 }
6b7c5b94
SP
1053 return NETDEV_TX_OK;
1054}
1055
1056static int be_change_mtu(struct net_device *netdev, int new_mtu)
1057{
1058 struct be_adapter *adapter = netdev_priv(netdev);
1059 if (new_mtu < BE_MIN_MTU ||
34a89b8c
AK
1060 new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
1061 (ETH_HLEN + ETH_FCS_LEN))) {
6b7c5b94
SP
1062 dev_info(&adapter->pdev->dev,
1063 "MTU must be between %d and %d bytes\n",
34a89b8c
AK
1064 BE_MIN_MTU,
1065 (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
6b7c5b94
SP
1066 return -EINVAL;
1067 }
1068 dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
1069 netdev->mtu, new_mtu);
1070 netdev->mtu = new_mtu;
1071 return 0;
1072}
1073
1074/*
82903e4b
AK
1075 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
1076 * If the user configures more, place BE in vlan promiscuous mode.
6b7c5b94 1077 */
10329df8 1078static int be_vid_config(struct be_adapter *adapter)
6b7c5b94 1079{
10329df8
SP
1080 u16 vids[BE_NUM_VLANS_SUPPORTED];
1081 u16 num = 0, i;
82903e4b 1082 int status = 0;
1da87b7f 1083
c0e64ef4
SP
1084 /* No need to further configure vids if in promiscuous mode */
1085 if (adapter->promiscuous)
1086 return 0;
1087
92bf14ab 1088 if (adapter->vlans_added > be_max_vlans(adapter))
0fc16ebf
PR
1089 goto set_vlan_promisc;
1090
1091 /* Construct VLAN Table to give to HW */
1092 for (i = 0; i < VLAN_N_VID; i++)
1093 if (adapter->vlan_tag[i])
10329df8 1094 vids[num++] = cpu_to_le16(i);
0fc16ebf
PR
1095
1096 status = be_cmd_vlan_config(adapter, adapter->if_handle,
012bd387 1097 vids, num, 0);
0fc16ebf 1098
0fc16ebf 1099 if (status) {
d9d604f8
AK
1100 /* Set to VLAN promisc mode as setting VLAN filter failed */
1101 if (status == MCC_ADDL_STS_INSUFFICIENT_RESOURCES)
1102 goto set_vlan_promisc;
1103 dev_err(&adapter->pdev->dev,
1104 "Setting HW VLAN filtering failed.\n");
1105 } else {
1106 if (adapter->flags & BE_FLAGS_VLAN_PROMISC) {
1107 /* hw VLAN filtering re-enabled. */
1108 status = be_cmd_rx_filter(adapter,
1109 BE_FLAGS_VLAN_PROMISC, OFF);
1110 if (!status) {
1111 dev_info(&adapter->pdev->dev,
1112 "Disabling VLAN Promiscuous mode.\n");
1113 adapter->flags &= ~BE_FLAGS_VLAN_PROMISC;
d9d604f8
AK
1114 }
1115 }
6b7c5b94 1116 }
1da87b7f 1117
b31c50a7 1118 return status;
0fc16ebf
PR
1119
1120set_vlan_promisc:
a6b74e01
SK
1121 if (adapter->flags & BE_FLAGS_VLAN_PROMISC)
1122 return 0;
d9d604f8
AK
1123
1124 status = be_cmd_rx_filter(adapter, BE_FLAGS_VLAN_PROMISC, ON);
1125 if (!status) {
1126 dev_info(&adapter->pdev->dev, "Enable VLAN Promiscuous mode\n");
d9d604f8
AK
1127 adapter->flags |= BE_FLAGS_VLAN_PROMISC;
1128 } else
1129 dev_err(&adapter->pdev->dev,
1130 "Failed to enable VLAN Promiscuous mode.\n");
0fc16ebf 1131 return status;
6b7c5b94
SP
1132}
1133
80d5c368 1134static int be_vlan_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
6b7c5b94
SP
1135{
1136 struct be_adapter *adapter = netdev_priv(netdev);
80817cbf 1137 int status = 0;
6b7c5b94 1138
a85e9986
PR
1139 /* Packets with VID 0 are always received by Lancer by default */
1140 if (lancer_chip(adapter) && vid == 0)
1141 goto ret;
1142
6b7c5b94 1143 adapter->vlan_tag[vid] = 1;
a6b74e01 1144 adapter->vlans_added++;
8e586137 1145
a6b74e01
SK
1146 status = be_vid_config(adapter);
1147 if (status) {
1148 adapter->vlans_added--;
80817cbf 1149 adapter->vlan_tag[vid] = 0;
a6b74e01 1150 }
80817cbf
AK
1151ret:
1152 return status;
6b7c5b94
SP
1153}
1154
80d5c368 1155static int be_vlan_rem_vid(struct net_device *netdev, __be16 proto, u16 vid)
6b7c5b94
SP
1156{
1157 struct be_adapter *adapter = netdev_priv(netdev);
80817cbf 1158 int status = 0;
6b7c5b94 1159
a85e9986
PR
1160 /* Packets with VID 0 are always received by Lancer by default */
1161 if (lancer_chip(adapter) && vid == 0)
1162 goto ret;
1163
6b7c5b94 1164 adapter->vlan_tag[vid] = 0;
a6b74e01 1165 status = be_vid_config(adapter);
80817cbf
AK
1166 if (!status)
1167 adapter->vlans_added--;
1168 else
1169 adapter->vlan_tag[vid] = 1;
1170ret:
1171 return status;
6b7c5b94
SP
1172}
1173
7ad09458
S
1174static void be_clear_promisc(struct be_adapter *adapter)
1175{
1176 adapter->promiscuous = false;
1177 adapter->flags &= ~BE_FLAGS_VLAN_PROMISC;
1178
1179 be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
1180}
1181
a54769f5 1182static void be_set_rx_mode(struct net_device *netdev)
6b7c5b94
SP
1183{
1184 struct be_adapter *adapter = netdev_priv(netdev);
0fc16ebf 1185 int status;
6b7c5b94 1186
24307eef 1187 if (netdev->flags & IFF_PROMISC) {
5b8821b7 1188 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
24307eef
SP
1189 adapter->promiscuous = true;
1190 goto done;
6b7c5b94
SP
1191 }
1192
25985edc 1193 /* BE was previously in promiscuous mode; disable it */
24307eef 1194 if (adapter->promiscuous) {
7ad09458 1195 be_clear_promisc(adapter);
c0e64ef4 1196 if (adapter->vlans_added)
10329df8 1197 be_vid_config(adapter);
6b7c5b94
SP
1198 }
1199
e7b909a6 1200 /* Enable multicast promisc if num configured exceeds what we support */
4cd24eaf 1201 if (netdev->flags & IFF_ALLMULTI ||
92bf14ab 1202 netdev_mc_count(netdev) > be_max_mc(adapter)) {
5b8821b7 1203 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
24307eef 1204 goto done;
6b7c5b94 1205 }
6b7c5b94 1206
fbc13f01
AK
1207 if (netdev_uc_count(netdev) != adapter->uc_macs) {
1208 struct netdev_hw_addr *ha;
1209 int i = 1; /* First slot is claimed by the Primary MAC */
1210
1211 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++) {
1212 be_cmd_pmac_del(adapter, adapter->if_handle,
1213 adapter->pmac_id[i], 0);
1214 }
1215
92bf14ab 1216 if (netdev_uc_count(netdev) > be_max_uc(adapter)) {
fbc13f01
AK
1217 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
1218 adapter->promiscuous = true;
1219 goto done;
1220 }
1221
1222 netdev_for_each_uc_addr(ha, adapter->netdev) {
1223 adapter->uc_macs++; /* First slot is for Primary MAC */
1224 be_cmd_pmac_add(adapter, (u8 *)ha->addr,
1225 adapter->if_handle,
1226 &adapter->pmac_id[adapter->uc_macs], 0);
1227 }
1228 }
1229
0fc16ebf
PR
1230 status = be_cmd_rx_filter(adapter, IFF_MULTICAST, ON);
1231
1232 /* Set to MCAST promisc mode if setting MULTICAST address fails */
1233 if (status) {
1234 dev_info(&adapter->pdev->dev, "Exhausted multicast HW filters.\n");
1235 dev_info(&adapter->pdev->dev, "Disabling HW multicast filtering.\n");
1236 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
1237 }
24307eef
SP
1238done:
1239 return;
6b7c5b94
SP
1240}
1241
ba343c77
SB
1242static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
1243{
1244 struct be_adapter *adapter = netdev_priv(netdev);
11ac75ed 1245 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
ba343c77
SB
1246 int status;
1247
11ac75ed 1248 if (!sriov_enabled(adapter))
ba343c77
SB
1249 return -EPERM;
1250
11ac75ed 1251 if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
ba343c77
SB
1252 return -EINVAL;
1253
3175d8c2
SP
1254 if (BEx_chip(adapter)) {
1255 be_cmd_pmac_del(adapter, vf_cfg->if_handle, vf_cfg->pmac_id,
1256 vf + 1);
ba343c77 1257
11ac75ed
SP
1258 status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
1259 &vf_cfg->pmac_id, vf + 1);
3175d8c2
SP
1260 } else {
1261 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
1262 vf + 1);
590c391d
PR
1263 }
1264
64600ea5 1265 if (status)
ba343c77
SB
1266 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
1267 mac, vf);
64600ea5 1268 else
11ac75ed 1269 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
64600ea5 1270
ba343c77
SB
1271 return status;
1272}
1273
64600ea5
AK
1274static int be_get_vf_config(struct net_device *netdev, int vf,
1275 struct ifla_vf_info *vi)
1276{
1277 struct be_adapter *adapter = netdev_priv(netdev);
11ac75ed 1278 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
64600ea5 1279
11ac75ed 1280 if (!sriov_enabled(adapter))
64600ea5
AK
1281 return -EPERM;
1282
11ac75ed 1283 if (vf >= adapter->num_vfs)
64600ea5
AK
1284 return -EINVAL;
1285
1286 vi->vf = vf;
11ac75ed 1287 vi->tx_rate = vf_cfg->tx_rate;
a60b3a13
AK
1288 vi->vlan = vf_cfg->vlan_tag & VLAN_VID_MASK;
1289 vi->qos = vf_cfg->vlan_tag >> VLAN_PRIO_SHIFT;
11ac75ed 1290 memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
64600ea5
AK
1291
1292 return 0;
1293}
1294
1da87b7f
AK
1295static int be_set_vf_vlan(struct net_device *netdev,
1296 int vf, u16 vlan, u8 qos)
1297{
1298 struct be_adapter *adapter = netdev_priv(netdev);
b9fc0e53 1299 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1da87b7f
AK
1300 int status = 0;
1301
11ac75ed 1302 if (!sriov_enabled(adapter))
1da87b7f
AK
1303 return -EPERM;
1304
b9fc0e53 1305 if (vf >= adapter->num_vfs || vlan > 4095 || qos > 7)
1da87b7f
AK
1306 return -EINVAL;
1307
b9fc0e53
AK
1308 if (vlan || qos) {
1309 vlan |= qos << VLAN_PRIO_SHIFT;
c502224e 1310 if (vf_cfg->vlan_tag != vlan)
b9fc0e53
AK
1311 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
1312 vf_cfg->if_handle, 0);
1da87b7f 1313 } else {
f1f3ee1b 1314 /* Reset Transparent Vlan Tagging. */
c502224e
SK
1315 status = be_cmd_set_hsw_config(adapter, BE_RESET_VLAN_TAG_ID,
1316 vf + 1, vf_cfg->if_handle, 0);
1da87b7f
AK
1317 }
1318
c502224e
SK
1319 if (!status)
1320 vf_cfg->vlan_tag = vlan;
1321 else
1da87b7f 1322 dev_info(&adapter->pdev->dev,
c502224e 1323 "VLAN %d config on VF %d failed\n", vlan, vf);
1da87b7f
AK
1324 return status;
1325}
1326
e1d18735
AK
1327static int be_set_vf_tx_rate(struct net_device *netdev,
1328 int vf, int rate)
1329{
1330 struct be_adapter *adapter = netdev_priv(netdev);
1331 int status = 0;
1332
11ac75ed 1333 if (!sriov_enabled(adapter))
e1d18735
AK
1334 return -EPERM;
1335
94f434c2 1336 if (vf >= adapter->num_vfs)
e1d18735
AK
1337 return -EINVAL;
1338
94f434c2
AK
1339 if (rate < 100 || rate > 10000) {
1340 dev_err(&adapter->pdev->dev,
1341 "tx rate must be between 100 and 10000 Mbps\n");
1342 return -EINVAL;
1343 }
e1d18735 1344
d5c18473
PR
1345 if (lancer_chip(adapter))
1346 status = be_cmd_set_profile_config(adapter, rate / 10, vf + 1);
1347 else
1348 status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
e1d18735
AK
1349
1350 if (status)
94f434c2 1351 dev_err(&adapter->pdev->dev,
e1d18735 1352 "tx rate %d on VF %d failed\n", rate, vf);
94f434c2
AK
1353 else
1354 adapter->vf_cfg[vf].tx_rate = rate;
e1d18735
AK
1355 return status;
1356}
1357
2632bafd
SP
1358static void be_aic_update(struct be_aic_obj *aic, u64 rx_pkts, u64 tx_pkts,
1359 ulong now)
6b7c5b94 1360{
2632bafd
SP
1361 aic->rx_pkts_prev = rx_pkts;
1362 aic->tx_reqs_prev = tx_pkts;
1363 aic->jiffies = now;
1364}
ac124ff9 1365
2632bafd
SP
1366static void be_eqd_update(struct be_adapter *adapter)
1367{
1368 struct be_set_eqd set_eqd[MAX_EVT_QS];
1369 int eqd, i, num = 0, start;
1370 struct be_aic_obj *aic;
1371 struct be_eq_obj *eqo;
1372 struct be_rx_obj *rxo;
1373 struct be_tx_obj *txo;
1374 u64 rx_pkts, tx_pkts;
1375 ulong now;
1376 u32 pps, delta;
10ef9ab4 1377
2632bafd
SP
1378 for_all_evt_queues(adapter, eqo, i) {
1379 aic = &adapter->aic_obj[eqo->idx];
1380 if (!aic->enable) {
1381 if (aic->jiffies)
1382 aic->jiffies = 0;
1383 eqd = aic->et_eqd;
1384 goto modify_eqd;
1385 }
6b7c5b94 1386
2632bafd
SP
1387 rxo = &adapter->rx_obj[eqo->idx];
1388 do {
1389 start = u64_stats_fetch_begin_bh(&rxo->stats.sync);
1390 rx_pkts = rxo->stats.rx_pkts;
1391 } while (u64_stats_fetch_retry_bh(&rxo->stats.sync, start));
10ef9ab4 1392
2632bafd
SP
1393 txo = &adapter->tx_obj[eqo->idx];
1394 do {
1395 start = u64_stats_fetch_begin_bh(&txo->stats.sync);
1396 tx_pkts = txo->stats.tx_reqs;
1397 } while (u64_stats_fetch_retry_bh(&txo->stats.sync, start));
6b7c5b94 1398
6b7c5b94 1399
2632bafd
SP
1400 /* Skip, if wrapped around or first calculation */
1401 now = jiffies;
1402 if (!aic->jiffies || time_before(now, aic->jiffies) ||
1403 rx_pkts < aic->rx_pkts_prev ||
1404 tx_pkts < aic->tx_reqs_prev) {
1405 be_aic_update(aic, rx_pkts, tx_pkts, now);
1406 continue;
1407 }
1408
1409 delta = jiffies_to_msecs(now - aic->jiffies);
1410 pps = (((u32)(rx_pkts - aic->rx_pkts_prev) * 1000) / delta) +
1411 (((u32)(tx_pkts - aic->tx_reqs_prev) * 1000) / delta);
1412 eqd = (pps / 15000) << 2;
10ef9ab4 1413
2632bafd
SP
1414 if (eqd < 8)
1415 eqd = 0;
1416 eqd = min_t(u32, eqd, aic->max_eqd);
1417 eqd = max_t(u32, eqd, aic->min_eqd);
1418
1419 be_aic_update(aic, rx_pkts, tx_pkts, now);
10ef9ab4 1420modify_eqd:
2632bafd
SP
1421 if (eqd != aic->prev_eqd) {
1422 set_eqd[num].delay_multiplier = (eqd * 65)/100;
1423 set_eqd[num].eq_id = eqo->q.id;
1424 aic->prev_eqd = eqd;
1425 num++;
1426 }
ac124ff9 1427 }
2632bafd
SP
1428
1429 if (num)
1430 be_cmd_modify_eqd(adapter, set_eqd, num);
6b7c5b94
SP
1431}
1432
3abcdeda 1433static void be_rx_stats_update(struct be_rx_obj *rxo,
2e588f84 1434 struct be_rx_compl_info *rxcp)
4097f663 1435{
ac124ff9 1436 struct be_rx_stats *stats = rx_stats(rxo);
1ef78abe 1437
ab1594e9 1438 u64_stats_update_begin(&stats->sync);
3abcdeda 1439 stats->rx_compl++;
2e588f84 1440 stats->rx_bytes += rxcp->pkt_size;
3abcdeda 1441 stats->rx_pkts++;
2e588f84 1442 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
3abcdeda 1443 stats->rx_mcast_pkts++;
2e588f84 1444 if (rxcp->err)
ac124ff9 1445 stats->rx_compl_err++;
ab1594e9 1446 u64_stats_update_end(&stats->sync);
4097f663
SP
1447}
1448
2e588f84 1449static inline bool csum_passed(struct be_rx_compl_info *rxcp)
728a9972 1450{
19fad86f
PR
1451 /* L4 checksum is not reliable for non TCP/UDP packets.
1452 * Also ignore ipcksm for ipv6 pkts */
2e588f84
SP
1453 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
1454 (rxcp->ip_csum || rxcp->ipv6);
728a9972
AK
1455}
1456
0b0ef1d0 1457static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo)
6b7c5b94 1458{
10ef9ab4 1459 struct be_adapter *adapter = rxo->adapter;
6b7c5b94 1460 struct be_rx_page_info *rx_page_info;
3abcdeda 1461 struct be_queue_info *rxq = &rxo->q;
0b0ef1d0 1462 u16 frag_idx = rxq->tail;
6b7c5b94 1463
3abcdeda 1464 rx_page_info = &rxo->page_info_tbl[frag_idx];
6b7c5b94
SP
1465 BUG_ON(!rx_page_info->page);
1466
e50287be 1467 if (rx_page_info->last_frag) {
2b7bcebf
IV
1468 dma_unmap_page(&adapter->pdev->dev,
1469 dma_unmap_addr(rx_page_info, bus),
1470 adapter->big_page_size, DMA_FROM_DEVICE);
e50287be
SP
1471 rx_page_info->last_frag = false;
1472 } else {
1473 dma_sync_single_for_cpu(&adapter->pdev->dev,
1474 dma_unmap_addr(rx_page_info, bus),
1475 rx_frag_size, DMA_FROM_DEVICE);
205859a2 1476 }
6b7c5b94 1477
0b0ef1d0 1478 queue_tail_inc(rxq);
6b7c5b94
SP
1479 atomic_dec(&rxq->used);
1480 return rx_page_info;
1481}
1482
1483/* Throwaway the data in the Rx completion */
10ef9ab4
SP
1484static void be_rx_compl_discard(struct be_rx_obj *rxo,
1485 struct be_rx_compl_info *rxcp)
6b7c5b94 1486{
6b7c5b94 1487 struct be_rx_page_info *page_info;
2e588f84 1488 u16 i, num_rcvd = rxcp->num_rcvd;
6b7c5b94 1489
e80d9da6 1490 for (i = 0; i < num_rcvd; i++) {
0b0ef1d0 1491 page_info = get_rx_page_info(rxo);
e80d9da6
PR
1492 put_page(page_info->page);
1493 memset(page_info, 0, sizeof(*page_info));
6b7c5b94
SP
1494 }
1495}
1496
1497/*
1498 * skb_fill_rx_data forms a complete skb for an ether frame
1499 * indicated by rxcp.
1500 */
10ef9ab4
SP
1501static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
1502 struct be_rx_compl_info *rxcp)
6b7c5b94 1503{
6b7c5b94 1504 struct be_rx_page_info *page_info;
2e588f84
SP
1505 u16 i, j;
1506 u16 hdr_len, curr_frag_len, remaining;
6b7c5b94 1507 u8 *start;
6b7c5b94 1508
0b0ef1d0 1509 page_info = get_rx_page_info(rxo);
6b7c5b94
SP
1510 start = page_address(page_info->page) + page_info->page_offset;
1511 prefetch(start);
1512
1513 /* Copy data in the first descriptor of this completion */
2e588f84 1514 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
6b7c5b94 1515
6b7c5b94
SP
1516 skb->len = curr_frag_len;
1517 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
ac1ae5f3 1518 memcpy(skb->data, start, curr_frag_len);
6b7c5b94
SP
1519 /* Complete packet has now been moved to data */
1520 put_page(page_info->page);
1521 skb->data_len = 0;
1522 skb->tail += curr_frag_len;
1523 } else {
ac1ae5f3
ED
1524 hdr_len = ETH_HLEN;
1525 memcpy(skb->data, start, hdr_len);
6b7c5b94 1526 skb_shinfo(skb)->nr_frags = 1;
b061b39e 1527 skb_frag_set_page(skb, 0, page_info->page);
6b7c5b94
SP
1528 skb_shinfo(skb)->frags[0].page_offset =
1529 page_info->page_offset + hdr_len;
9e903e08 1530 skb_frag_size_set(&skb_shinfo(skb)->frags[0], curr_frag_len - hdr_len);
6b7c5b94 1531 skb->data_len = curr_frag_len - hdr_len;
bdb28a97 1532 skb->truesize += rx_frag_size;
6b7c5b94
SP
1533 skb->tail += hdr_len;
1534 }
205859a2 1535 page_info->page = NULL;
6b7c5b94 1536
2e588f84
SP
1537 if (rxcp->pkt_size <= rx_frag_size) {
1538 BUG_ON(rxcp->num_rcvd != 1);
1539 return;
6b7c5b94
SP
1540 }
1541
1542 /* More frags present for this completion */
2e588f84
SP
1543 remaining = rxcp->pkt_size - curr_frag_len;
1544 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
0b0ef1d0 1545 page_info = get_rx_page_info(rxo);
2e588f84 1546 curr_frag_len = min(remaining, rx_frag_size);
6b7c5b94 1547
bd46cb6c
AK
1548 /* Coalesce all frags from the same physical page in one slot */
1549 if (page_info->page_offset == 0) {
1550 /* Fresh page */
1551 j++;
b061b39e 1552 skb_frag_set_page(skb, j, page_info->page);
bd46cb6c
AK
1553 skb_shinfo(skb)->frags[j].page_offset =
1554 page_info->page_offset;
9e903e08 1555 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
bd46cb6c
AK
1556 skb_shinfo(skb)->nr_frags++;
1557 } else {
1558 put_page(page_info->page);
1559 }
1560
9e903e08 1561 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
6b7c5b94
SP
1562 skb->len += curr_frag_len;
1563 skb->data_len += curr_frag_len;
bdb28a97 1564 skb->truesize += rx_frag_size;
2e588f84 1565 remaining -= curr_frag_len;
205859a2 1566 page_info->page = NULL;
6b7c5b94 1567 }
bd46cb6c 1568 BUG_ON(j > MAX_SKB_FRAGS);
6b7c5b94
SP
1569}
1570
5be93b9a 1571/* Process the RX completion indicated by rxcp when GRO is disabled */
6384a4d0 1572static void be_rx_compl_process(struct be_rx_obj *rxo, struct napi_struct *napi,
10ef9ab4 1573 struct be_rx_compl_info *rxcp)
6b7c5b94 1574{
10ef9ab4 1575 struct be_adapter *adapter = rxo->adapter;
6332c8d3 1576 struct net_device *netdev = adapter->netdev;
6b7c5b94 1577 struct sk_buff *skb;
89420424 1578
bb349bb4 1579 skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE);
a058a632 1580 if (unlikely(!skb)) {
ac124ff9 1581 rx_stats(rxo)->rx_drops_no_skbs++;
10ef9ab4 1582 be_rx_compl_discard(rxo, rxcp);
6b7c5b94
SP
1583 return;
1584 }
1585
10ef9ab4 1586 skb_fill_rx_data(rxo, skb, rxcp);
6b7c5b94 1587
6332c8d3 1588 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
728a9972 1589 skb->ip_summed = CHECKSUM_UNNECESSARY;
c6ce2f4b
SK
1590 else
1591 skb_checksum_none_assert(skb);
6b7c5b94 1592
6332c8d3 1593 skb->protocol = eth_type_trans(skb, netdev);
aaa6daec 1594 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
10ef9ab4 1595 if (netdev->features & NETIF_F_RXHASH)
d2464c8c 1596 skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
6384a4d0 1597 skb_mark_napi_id(skb, napi);
6b7c5b94 1598
343e43c0 1599 if (rxcp->vlanf)
86a9bad3 1600 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
4c5102f9
AK
1601
1602 netif_receive_skb(skb);
6b7c5b94
SP
1603}
1604
5be93b9a 1605/* Process the RX completion indicated by rxcp when GRO is enabled */
4188e7df
JH
1606static void be_rx_compl_process_gro(struct be_rx_obj *rxo,
1607 struct napi_struct *napi,
1608 struct be_rx_compl_info *rxcp)
6b7c5b94 1609{
10ef9ab4 1610 struct be_adapter *adapter = rxo->adapter;
6b7c5b94 1611 struct be_rx_page_info *page_info;
5be93b9a 1612 struct sk_buff *skb = NULL;
2e588f84
SP
1613 u16 remaining, curr_frag_len;
1614 u16 i, j;
3968fa1e 1615
10ef9ab4 1616 skb = napi_get_frags(napi);
5be93b9a 1617 if (!skb) {
10ef9ab4 1618 be_rx_compl_discard(rxo, rxcp);
5be93b9a
AK
1619 return;
1620 }
1621
2e588f84
SP
1622 remaining = rxcp->pkt_size;
1623 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
0b0ef1d0 1624 page_info = get_rx_page_info(rxo);
6b7c5b94
SP
1625
1626 curr_frag_len = min(remaining, rx_frag_size);
1627
bd46cb6c
AK
1628 /* Coalesce all frags from the same physical page in one slot */
1629 if (i == 0 || page_info->page_offset == 0) {
1630 /* First frag or Fresh page */
1631 j++;
b061b39e 1632 skb_frag_set_page(skb, j, page_info->page);
5be93b9a
AK
1633 skb_shinfo(skb)->frags[j].page_offset =
1634 page_info->page_offset;
9e903e08 1635 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
bd46cb6c
AK
1636 } else {
1637 put_page(page_info->page);
1638 }
9e903e08 1639 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
bdb28a97 1640 skb->truesize += rx_frag_size;
bd46cb6c 1641 remaining -= curr_frag_len;
6b7c5b94
SP
1642 memset(page_info, 0, sizeof(*page_info));
1643 }
bd46cb6c 1644 BUG_ON(j > MAX_SKB_FRAGS);
6b7c5b94 1645
5be93b9a 1646 skb_shinfo(skb)->nr_frags = j + 1;
2e588f84
SP
1647 skb->len = rxcp->pkt_size;
1648 skb->data_len = rxcp->pkt_size;
5be93b9a 1649 skb->ip_summed = CHECKSUM_UNNECESSARY;
aaa6daec 1650 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
4b972914 1651 if (adapter->netdev->features & NETIF_F_RXHASH)
d2464c8c 1652 skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
6384a4d0 1653 skb_mark_napi_id(skb, napi);
5be93b9a 1654
343e43c0 1655 if (rxcp->vlanf)
86a9bad3 1656 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
4c5102f9 1657
10ef9ab4 1658 napi_gro_frags(napi);
2e588f84
SP
1659}
1660
10ef9ab4
SP
1661static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
1662 struct be_rx_compl_info *rxcp)
2e588f84
SP
1663{
1664 rxcp->pkt_size =
1665 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1666 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1667 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1668 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
9ecb42fd 1669 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
2e588f84
SP
1670 rxcp->ip_csum =
1671 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1672 rxcp->l4_csum =
1673 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1674 rxcp->ipv6 =
1675 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
2e588f84
SP
1676 rxcp->num_rcvd =
1677 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1678 rxcp->pkt_type =
1679 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
4b972914 1680 rxcp->rss_hash =
c297977e 1681 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, compl);
15d72184 1682 if (rxcp->vlanf) {
f93f160b 1683 rxcp->qnq = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, qnq,
3c709f8f
DM
1684 compl);
1685 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
1686 compl);
15d72184 1687 }
12004ae9 1688 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, port, compl);
2e588f84
SP
1689}
1690
10ef9ab4
SP
1691static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
1692 struct be_rx_compl_info *rxcp)
2e588f84
SP
1693{
1694 rxcp->pkt_size =
1695 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1696 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1697 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1698 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
9ecb42fd 1699 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
2e588f84
SP
1700 rxcp->ip_csum =
1701 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1702 rxcp->l4_csum =
1703 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1704 rxcp->ipv6 =
1705 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
2e588f84
SP
1706 rxcp->num_rcvd =
1707 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1708 rxcp->pkt_type =
1709 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
4b972914 1710 rxcp->rss_hash =
c297977e 1711 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, compl);
15d72184 1712 if (rxcp->vlanf) {
f93f160b 1713 rxcp->qnq = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, qnq,
3c709f8f
DM
1714 compl);
1715 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
1716 compl);
15d72184 1717 }
12004ae9 1718 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, port, compl);
e38b1706
SK
1719 rxcp->ip_frag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0,
1720 ip_frag, compl);
2e588f84
SP
1721}
1722
1723static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1724{
1725 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1726 struct be_rx_compl_info *rxcp = &rxo->rxcp;
1727 struct be_adapter *adapter = rxo->adapter;
6b7c5b94 1728
2e588f84
SP
1729 /* For checking the valid bit it is Ok to use either definition as the
1730 * valid bit is at the same position in both v0 and v1 Rx compl */
1731 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
1732 return NULL;
6b7c5b94 1733
2e588f84
SP
1734 rmb();
1735 be_dws_le_to_cpu(compl, sizeof(*compl));
6b7c5b94 1736
2e588f84 1737 if (adapter->be3_native)
10ef9ab4 1738 be_parse_rx_compl_v1(compl, rxcp);
2e588f84 1739 else
10ef9ab4 1740 be_parse_rx_compl_v0(compl, rxcp);
6b7c5b94 1741
e38b1706
SK
1742 if (rxcp->ip_frag)
1743 rxcp->l4_csum = 0;
1744
15d72184 1745 if (rxcp->vlanf) {
f93f160b
VV
1746 /* In QNQ modes, if qnq bit is not set, then the packet was
1747 * tagged only with the transparent outer vlan-tag and must
1748 * not be treated as a vlan packet by host
1749 */
1750 if (be_is_qnq_mode(adapter) && !rxcp->qnq)
15d72184 1751 rxcp->vlanf = 0;
6b7c5b94 1752
15d72184 1753 if (!lancer_chip(adapter))
3c709f8f 1754 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
6b7c5b94 1755
939cf306 1756 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
3c709f8f 1757 !adapter->vlan_tag[rxcp->vlan_tag])
15d72184
SP
1758 rxcp->vlanf = 0;
1759 }
2e588f84
SP
1760
1761 /* As the compl has been parsed, reset it; we wont touch it again */
1762 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
6b7c5b94 1763
3abcdeda 1764 queue_tail_inc(&rxo->cq);
6b7c5b94
SP
1765 return rxcp;
1766}
1767
1829b086 1768static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
6b7c5b94 1769{
6b7c5b94 1770 u32 order = get_order(size);
1829b086 1771
6b7c5b94 1772 if (order > 0)
1829b086
ED
1773 gfp |= __GFP_COMP;
1774 return alloc_pages(gfp, order);
6b7c5b94
SP
1775}
1776
1777/*
1778 * Allocate a page, split it to fragments of size rx_frag_size and post as
1779 * receive buffers to BE
1780 */
1829b086 1781static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
6b7c5b94 1782{
3abcdeda 1783 struct be_adapter *adapter = rxo->adapter;
26d92f92 1784 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
3abcdeda 1785 struct be_queue_info *rxq = &rxo->q;
6b7c5b94 1786 struct page *pagep = NULL;
ba42fad0 1787 struct device *dev = &adapter->pdev->dev;
6b7c5b94
SP
1788 struct be_eth_rx_d *rxd;
1789 u64 page_dmaaddr = 0, frag_dmaaddr;
1790 u32 posted, page_offset = 0;
1791
3abcdeda 1792 page_info = &rxo->page_info_tbl[rxq->head];
6b7c5b94
SP
1793 for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1794 if (!pagep) {
1829b086 1795 pagep = be_alloc_pages(adapter->big_page_size, gfp);
6b7c5b94 1796 if (unlikely(!pagep)) {
ac124ff9 1797 rx_stats(rxo)->rx_post_fail++;
6b7c5b94
SP
1798 break;
1799 }
ba42fad0
IV
1800 page_dmaaddr = dma_map_page(dev, pagep, 0,
1801 adapter->big_page_size,
2b7bcebf 1802 DMA_FROM_DEVICE);
ba42fad0
IV
1803 if (dma_mapping_error(dev, page_dmaaddr)) {
1804 put_page(pagep);
1805 pagep = NULL;
1806 rx_stats(rxo)->rx_post_fail++;
1807 break;
1808 }
e50287be 1809 page_offset = 0;
6b7c5b94
SP
1810 } else {
1811 get_page(pagep);
e50287be 1812 page_offset += rx_frag_size;
6b7c5b94 1813 }
e50287be 1814 page_info->page_offset = page_offset;
6b7c5b94 1815 page_info->page = pagep;
6b7c5b94
SP
1816
1817 rxd = queue_head_node(rxq);
e50287be 1818 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
6b7c5b94
SP
1819 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1820 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
6b7c5b94
SP
1821
1822 /* Any space left in the current big page for another frag? */
1823 if ((page_offset + rx_frag_size + rx_frag_size) >
1824 adapter->big_page_size) {
1825 pagep = NULL;
e50287be
SP
1826 page_info->last_frag = true;
1827 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
1828 } else {
1829 dma_unmap_addr_set(page_info, bus, frag_dmaaddr);
6b7c5b94 1830 }
26d92f92
SP
1831
1832 prev_page_info = page_info;
1833 queue_head_inc(rxq);
10ef9ab4 1834 page_info = &rxo->page_info_tbl[rxq->head];
6b7c5b94 1835 }
e50287be
SP
1836
1837 /* Mark the last frag of a page when we break out of the above loop
1838 * with no more slots available in the RXQ
1839 */
1840 if (pagep) {
1841 prev_page_info->last_frag = true;
1842 dma_unmap_addr_set(prev_page_info, bus, page_dmaaddr);
1843 }
6b7c5b94
SP
1844
1845 if (posted) {
6b7c5b94 1846 atomic_add(posted, &rxq->used);
6384a4d0
SP
1847 if (rxo->rx_post_starved)
1848 rxo->rx_post_starved = false;
8788fdc2 1849 be_rxq_notify(adapter, rxq->id, posted);
ea1dae11
SP
1850 } else if (atomic_read(&rxq->used) == 0) {
1851 /* Let be_worker replenish when memory is available */
3abcdeda 1852 rxo->rx_post_starved = true;
6b7c5b94 1853 }
6b7c5b94
SP
1854}
1855
5fb379ee 1856static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
6b7c5b94 1857{
6b7c5b94
SP
1858 struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1859
1860 if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1861 return NULL;
1862
f3eb62d2 1863 rmb();
6b7c5b94
SP
1864 be_dws_le_to_cpu(txcp, sizeof(*txcp));
1865
1866 txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1867
1868 queue_tail_inc(tx_cq);
1869 return txcp;
1870}
1871
3c8def97
SP
1872static u16 be_tx_compl_process(struct be_adapter *adapter,
1873 struct be_tx_obj *txo, u16 last_index)
6b7c5b94 1874{
3c8def97 1875 struct be_queue_info *txq = &txo->q;
a73b796e 1876 struct be_eth_wrb *wrb;
3c8def97 1877 struct sk_buff **sent_skbs = txo->sent_skb_list;
6b7c5b94 1878 struct sk_buff *sent_skb;
ec43b1a6
SP
1879 u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1880 bool unmap_skb_hdr = true;
6b7c5b94 1881
ec43b1a6 1882 sent_skb = sent_skbs[txq->tail];
6b7c5b94 1883 BUG_ON(!sent_skb);
ec43b1a6
SP
1884 sent_skbs[txq->tail] = NULL;
1885
1886 /* skip header wrb */
a73b796e 1887 queue_tail_inc(txq);
6b7c5b94 1888
ec43b1a6 1889 do {
6b7c5b94 1890 cur_index = txq->tail;
a73b796e 1891 wrb = queue_tail_node(txq);
2b7bcebf
IV
1892 unmap_tx_frag(&adapter->pdev->dev, wrb,
1893 (unmap_skb_hdr && skb_headlen(sent_skb)));
ec43b1a6
SP
1894 unmap_skb_hdr = false;
1895
6b7c5b94
SP
1896 num_wrbs++;
1897 queue_tail_inc(txq);
ec43b1a6 1898 } while (cur_index != last_index);
6b7c5b94 1899
6b7c5b94 1900 kfree_skb(sent_skb);
4d586b82 1901 return num_wrbs;
6b7c5b94
SP
1902}
1903
10ef9ab4
SP
1904/* Return the number of events in the event queue */
1905static inline int events_get(struct be_eq_obj *eqo)
859b1e4e 1906{
10ef9ab4
SP
1907 struct be_eq_entry *eqe;
1908 int num = 0;
859b1e4e 1909
10ef9ab4
SP
1910 do {
1911 eqe = queue_tail_node(&eqo->q);
1912 if (eqe->evt == 0)
1913 break;
859b1e4e 1914
10ef9ab4
SP
1915 rmb();
1916 eqe->evt = 0;
1917 num++;
1918 queue_tail_inc(&eqo->q);
1919 } while (true);
1920
1921 return num;
859b1e4e
SP
1922}
1923
10ef9ab4
SP
1924/* Leaves the EQ is disarmed state */
1925static void be_eq_clean(struct be_eq_obj *eqo)
859b1e4e 1926{
10ef9ab4 1927 int num = events_get(eqo);
859b1e4e 1928
10ef9ab4 1929 be_eq_notify(eqo->adapter, eqo->q.id, false, true, num);
859b1e4e
SP
1930}
1931
10ef9ab4 1932static void be_rx_cq_clean(struct be_rx_obj *rxo)
6b7c5b94
SP
1933{
1934 struct be_rx_page_info *page_info;
3abcdeda
SP
1935 struct be_queue_info *rxq = &rxo->q;
1936 struct be_queue_info *rx_cq = &rxo->cq;
2e588f84 1937 struct be_rx_compl_info *rxcp;
d23e946c
SP
1938 struct be_adapter *adapter = rxo->adapter;
1939 int flush_wait = 0;
6b7c5b94 1940
d23e946c
SP
1941 /* Consume pending rx completions.
1942 * Wait for the flush completion (identified by zero num_rcvd)
1943 * to arrive. Notify CQ even when there are no more CQ entries
1944 * for HW to flush partially coalesced CQ entries.
1945 * In Lancer, there is no need to wait for flush compl.
1946 */
1947 for (;;) {
1948 rxcp = be_rx_compl_get(rxo);
1949 if (rxcp == NULL) {
1950 if (lancer_chip(adapter))
1951 break;
1952
1953 if (flush_wait++ > 10 || be_hw_error(adapter)) {
1954 dev_warn(&adapter->pdev->dev,
1955 "did not receive flush compl\n");
1956 break;
1957 }
1958 be_cq_notify(adapter, rx_cq->id, true, 0);
1959 mdelay(1);
1960 } else {
1961 be_rx_compl_discard(rxo, rxcp);
3f5dffe6 1962 be_cq_notify(adapter, rx_cq->id, false, 1);
d23e946c
SP
1963 if (rxcp->num_rcvd == 0)
1964 break;
1965 }
6b7c5b94
SP
1966 }
1967
d23e946c
SP
1968 /* After cleanup, leave the CQ in unarmed state */
1969 be_cq_notify(adapter, rx_cq->id, false, 0);
1970
1971 /* Then free posted rx buffers that were not used */
0b0ef1d0
SR
1972 while (atomic_read(&rxq->used) > 0) {
1973 page_info = get_rx_page_info(rxo);
6b7c5b94
SP
1974 put_page(page_info->page);
1975 memset(page_info, 0, sizeof(*page_info));
1976 }
1977 BUG_ON(atomic_read(&rxq->used));
482c9e79 1978 rxq->tail = rxq->head = 0;
6b7c5b94
SP
1979}
1980
0ae57bb3 1981static void be_tx_compl_clean(struct be_adapter *adapter)
6b7c5b94 1982{
0ae57bb3
SP
1983 struct be_tx_obj *txo;
1984 struct be_queue_info *txq;
a8e9179a 1985 struct be_eth_tx_compl *txcp;
4d586b82 1986 u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
b03388d6
SP
1987 struct sk_buff *sent_skb;
1988 bool dummy_wrb;
0ae57bb3 1989 int i, pending_txqs;
a8e9179a
SP
1990
1991 /* Wait for a max of 200ms for all the tx-completions to arrive. */
1992 do {
0ae57bb3
SP
1993 pending_txqs = adapter->num_tx_qs;
1994
1995 for_all_tx_queues(adapter, txo, i) {
1996 txq = &txo->q;
1997 while ((txcp = be_tx_compl_get(&txo->cq))) {
1998 end_idx =
1999 AMAP_GET_BITS(struct amap_eth_tx_compl,
2000 wrb_index, txcp);
2001 num_wrbs += be_tx_compl_process(adapter, txo,
2002 end_idx);
2003 cmpl++;
2004 }
2005 if (cmpl) {
2006 be_cq_notify(adapter, txo->cq.id, false, cmpl);
2007 atomic_sub(num_wrbs, &txq->used);
2008 cmpl = 0;
2009 num_wrbs = 0;
2010 }
2011 if (atomic_read(&txq->used) == 0)
2012 pending_txqs--;
a8e9179a
SP
2013 }
2014
0ae57bb3 2015 if (pending_txqs == 0 || ++timeo > 200)
a8e9179a
SP
2016 break;
2017
2018 mdelay(1);
2019 } while (true);
2020
0ae57bb3
SP
2021 for_all_tx_queues(adapter, txo, i) {
2022 txq = &txo->q;
2023 if (atomic_read(&txq->used))
2024 dev_err(&adapter->pdev->dev, "%d pending tx-compls\n",
2025 atomic_read(&txq->used));
2026
2027 /* free posted tx for which compls will never arrive */
2028 while (atomic_read(&txq->used)) {
2029 sent_skb = txo->sent_skb_list[txq->tail];
2030 end_idx = txq->tail;
2031 num_wrbs = wrb_cnt_for_skb(adapter, sent_skb,
2032 &dummy_wrb);
2033 index_adv(&end_idx, num_wrbs - 1, txq->len);
2034 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
2035 atomic_sub(num_wrbs, &txq->used);
2036 }
b03388d6 2037 }
6b7c5b94
SP
2038}
2039
10ef9ab4
SP
2040static void be_evt_queues_destroy(struct be_adapter *adapter)
2041{
2042 struct be_eq_obj *eqo;
2043 int i;
2044
2045 for_all_evt_queues(adapter, eqo, i) {
19d59aa7
PR
2046 if (eqo->q.created) {
2047 be_eq_clean(eqo);
10ef9ab4 2048 be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
6384a4d0 2049 napi_hash_del(&eqo->napi);
68d7bdcb 2050 netif_napi_del(&eqo->napi);
19d59aa7 2051 }
10ef9ab4
SP
2052 be_queue_free(adapter, &eqo->q);
2053 }
2054}
2055
2056static int be_evt_queues_create(struct be_adapter *adapter)
2057{
2058 struct be_queue_info *eq;
2059 struct be_eq_obj *eqo;
2632bafd 2060 struct be_aic_obj *aic;
10ef9ab4
SP
2061 int i, rc;
2062
92bf14ab
SP
2063 adapter->num_evt_qs = min_t(u16, num_irqs(adapter),
2064 adapter->cfg_num_qs);
10ef9ab4
SP
2065
2066 for_all_evt_queues(adapter, eqo, i) {
68d7bdcb
SP
2067 netif_napi_add(adapter->netdev, &eqo->napi, be_poll,
2068 BE_NAPI_WEIGHT);
6384a4d0 2069 napi_hash_add(&eqo->napi);
2632bafd 2070 aic = &adapter->aic_obj[i];
10ef9ab4
SP
2071 eqo->adapter = adapter;
2072 eqo->tx_budget = BE_TX_BUDGET;
2073 eqo->idx = i;
2632bafd
SP
2074 aic->max_eqd = BE_MAX_EQD;
2075 aic->enable = true;
10ef9ab4
SP
2076
2077 eq = &eqo->q;
2078 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
2079 sizeof(struct be_eq_entry));
2080 if (rc)
2081 return rc;
2082
f2f781a7 2083 rc = be_cmd_eq_create(adapter, eqo);
10ef9ab4
SP
2084 if (rc)
2085 return rc;
2086 }
1cfafab9 2087 return 0;
10ef9ab4
SP
2088}
2089
5fb379ee
SP
2090static void be_mcc_queues_destroy(struct be_adapter *adapter)
2091{
2092 struct be_queue_info *q;
5fb379ee 2093
8788fdc2 2094 q = &adapter->mcc_obj.q;
5fb379ee 2095 if (q->created)
8788fdc2 2096 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
5fb379ee
SP
2097 be_queue_free(adapter, q);
2098
8788fdc2 2099 q = &adapter->mcc_obj.cq;
5fb379ee 2100 if (q->created)
8788fdc2 2101 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
5fb379ee
SP
2102 be_queue_free(adapter, q);
2103}
2104
2105/* Must be called only after TX qs are created as MCC shares TX EQ */
2106static int be_mcc_queues_create(struct be_adapter *adapter)
2107{
2108 struct be_queue_info *q, *cq;
5fb379ee 2109
8788fdc2 2110 cq = &adapter->mcc_obj.cq;
5fb379ee 2111 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
efd2e40a 2112 sizeof(struct be_mcc_compl)))
5fb379ee
SP
2113 goto err;
2114
10ef9ab4
SP
2115 /* Use the default EQ for MCC completions */
2116 if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0))
5fb379ee
SP
2117 goto mcc_cq_free;
2118
8788fdc2 2119 q = &adapter->mcc_obj.q;
5fb379ee
SP
2120 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
2121 goto mcc_cq_destroy;
2122
8788fdc2 2123 if (be_cmd_mccq_create(adapter, q, cq))
5fb379ee
SP
2124 goto mcc_q_free;
2125
2126 return 0;
2127
2128mcc_q_free:
2129 be_queue_free(adapter, q);
2130mcc_cq_destroy:
8788fdc2 2131 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
5fb379ee
SP
2132mcc_cq_free:
2133 be_queue_free(adapter, cq);
2134err:
2135 return -1;
2136}
2137
6b7c5b94
SP
2138static void be_tx_queues_destroy(struct be_adapter *adapter)
2139{
2140 struct be_queue_info *q;
3c8def97
SP
2141 struct be_tx_obj *txo;
2142 u8 i;
6b7c5b94 2143
3c8def97
SP
2144 for_all_tx_queues(adapter, txo, i) {
2145 q = &txo->q;
2146 if (q->created)
2147 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
2148 be_queue_free(adapter, q);
6b7c5b94 2149
3c8def97
SP
2150 q = &txo->cq;
2151 if (q->created)
2152 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2153 be_queue_free(adapter, q);
2154 }
6b7c5b94
SP
2155}
2156
7707133c 2157static int be_tx_qs_create(struct be_adapter *adapter)
6b7c5b94 2158{
10ef9ab4 2159 struct be_queue_info *cq, *eq;
3c8def97 2160 struct be_tx_obj *txo;
92bf14ab 2161 int status, i;
6b7c5b94 2162
92bf14ab 2163 adapter->num_tx_qs = min(adapter->num_evt_qs, be_max_txqs(adapter));
dafc0fe3 2164
10ef9ab4
SP
2165 for_all_tx_queues(adapter, txo, i) {
2166 cq = &txo->cq;
2167 status = be_queue_alloc(adapter, cq, TX_CQ_LEN,
2168 sizeof(struct be_eth_tx_compl));
2169 if (status)
2170 return status;
3c8def97 2171
827da44c
JS
2172 u64_stats_init(&txo->stats.sync);
2173 u64_stats_init(&txo->stats.sync_compl);
2174
10ef9ab4
SP
2175 /* If num_evt_qs is less than num_tx_qs, then more than
2176 * one txq share an eq
2177 */
2178 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2179 status = be_cmd_cq_create(adapter, cq, eq, false, 3);
2180 if (status)
2181 return status;
6b7c5b94 2182
10ef9ab4
SP
2183 status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
2184 sizeof(struct be_eth_wrb));
2185 if (status)
2186 return status;
6b7c5b94 2187
94d73aaa 2188 status = be_cmd_txq_create(adapter, txo);
10ef9ab4
SP
2189 if (status)
2190 return status;
3c8def97 2191 }
6b7c5b94 2192
d379142b
SP
2193 dev_info(&adapter->pdev->dev, "created %d TX queue(s)\n",
2194 adapter->num_tx_qs);
10ef9ab4 2195 return 0;
6b7c5b94
SP
2196}
2197
10ef9ab4 2198static void be_rx_cqs_destroy(struct be_adapter *adapter)
6b7c5b94
SP
2199{
2200 struct be_queue_info *q;
3abcdeda
SP
2201 struct be_rx_obj *rxo;
2202 int i;
2203
2204 for_all_rx_queues(adapter, rxo, i) {
3abcdeda
SP
2205 q = &rxo->cq;
2206 if (q->created)
2207 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2208 be_queue_free(adapter, q);
ac6a0c4a
SP
2209 }
2210}
2211
10ef9ab4 2212static int be_rx_cqs_create(struct be_adapter *adapter)
6b7c5b94 2213{
10ef9ab4 2214 struct be_queue_info *eq, *cq;
3abcdeda
SP
2215 struct be_rx_obj *rxo;
2216 int rc, i;
6b7c5b94 2217
92bf14ab
SP
2218 /* We can create as many RSS rings as there are EQs. */
2219 adapter->num_rx_qs = adapter->num_evt_qs;
2220
2221 /* We'll use RSS only if atleast 2 RSS rings are supported.
2222 * When RSS is used, we'll need a default RXQ for non-IP traffic.
10ef9ab4 2223 */
92bf14ab
SP
2224 if (adapter->num_rx_qs > 1)
2225 adapter->num_rx_qs++;
2226
6b7c5b94 2227 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
3abcdeda
SP
2228 for_all_rx_queues(adapter, rxo, i) {
2229 rxo->adapter = adapter;
3abcdeda
SP
2230 cq = &rxo->cq;
2231 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
2232 sizeof(struct be_eth_rx_compl));
2233 if (rc)
10ef9ab4 2234 return rc;
3abcdeda 2235
827da44c 2236 u64_stats_init(&rxo->stats.sync);
10ef9ab4
SP
2237 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2238 rc = be_cmd_cq_create(adapter, cq, eq, false, 3);
3abcdeda 2239 if (rc)
10ef9ab4 2240 return rc;
3abcdeda 2241 }
6b7c5b94 2242
d379142b
SP
2243 dev_info(&adapter->pdev->dev,
2244 "created %d RSS queue(s) and 1 default RX queue\n",
2245 adapter->num_rx_qs - 1);
10ef9ab4 2246 return 0;
b628bde2
SP
2247}
2248
6b7c5b94
SP
2249static irqreturn_t be_intx(int irq, void *dev)
2250{
e49cc34f
SP
2251 struct be_eq_obj *eqo = dev;
2252 struct be_adapter *adapter = eqo->adapter;
2253 int num_evts = 0;
6b7c5b94 2254
d0b9cec3
SP
2255 /* IRQ is not expected when NAPI is scheduled as the EQ
2256 * will not be armed.
2257 * But, this can happen on Lancer INTx where it takes
2258 * a while to de-assert INTx or in BE2 where occasionaly
2259 * an interrupt may be raised even when EQ is unarmed.
2260 * If NAPI is already scheduled, then counting & notifying
2261 * events will orphan them.
e49cc34f 2262 */
d0b9cec3 2263 if (napi_schedule_prep(&eqo->napi)) {
e49cc34f 2264 num_evts = events_get(eqo);
d0b9cec3
SP
2265 __napi_schedule(&eqo->napi);
2266 if (num_evts)
2267 eqo->spurious_intr = 0;
2268 }
2269 be_eq_notify(adapter, eqo->q.id, false, true, num_evts);
e49cc34f 2270
d0b9cec3
SP
2271 /* Return IRQ_HANDLED only for the the first spurious intr
2272 * after a valid intr to stop the kernel from branding
2273 * this irq as a bad one!
e49cc34f 2274 */
d0b9cec3
SP
2275 if (num_evts || eqo->spurious_intr++ == 0)
2276 return IRQ_HANDLED;
2277 else
2278 return IRQ_NONE;
6b7c5b94
SP
2279}
2280
10ef9ab4 2281static irqreturn_t be_msix(int irq, void *dev)
6b7c5b94 2282{
10ef9ab4 2283 struct be_eq_obj *eqo = dev;
6b7c5b94 2284
0b545a62
SP
2285 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
2286 napi_schedule(&eqo->napi);
6b7c5b94
SP
2287 return IRQ_HANDLED;
2288}
2289
2e588f84 2290static inline bool do_gro(struct be_rx_compl_info *rxcp)
6b7c5b94 2291{
e38b1706 2292 return (rxcp->tcpf && !rxcp->err && rxcp->l4_csum) ? true : false;
6b7c5b94
SP
2293}
2294
10ef9ab4 2295static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
6384a4d0 2296 int budget, int polling)
6b7c5b94 2297{
3abcdeda
SP
2298 struct be_adapter *adapter = rxo->adapter;
2299 struct be_queue_info *rx_cq = &rxo->cq;
2e588f84 2300 struct be_rx_compl_info *rxcp;
6b7c5b94
SP
2301 u32 work_done;
2302
2303 for (work_done = 0; work_done < budget; work_done++) {
3abcdeda 2304 rxcp = be_rx_compl_get(rxo);
6b7c5b94
SP
2305 if (!rxcp)
2306 break;
2307
12004ae9
SP
2308 /* Is it a flush compl that has no data */
2309 if (unlikely(rxcp->num_rcvd == 0))
2310 goto loop_continue;
2311
2312 /* Discard compl with partial DMA Lancer B0 */
2313 if (unlikely(!rxcp->pkt_size)) {
10ef9ab4 2314 be_rx_compl_discard(rxo, rxcp);
12004ae9
SP
2315 goto loop_continue;
2316 }
2317
2318 /* On BE drop pkts that arrive due to imperfect filtering in
2319 * promiscuous mode on some skews
2320 */
2321 if (unlikely(rxcp->port != adapter->port_num &&
2322 !lancer_chip(adapter))) {
10ef9ab4 2323 be_rx_compl_discard(rxo, rxcp);
12004ae9 2324 goto loop_continue;
64642811 2325 }
009dd872 2326
6384a4d0
SP
2327 /* Don't do gro when we're busy_polling */
2328 if (do_gro(rxcp) && polling != BUSY_POLLING)
10ef9ab4 2329 be_rx_compl_process_gro(rxo, napi, rxcp);
12004ae9 2330 else
6384a4d0
SP
2331 be_rx_compl_process(rxo, napi, rxcp);
2332
12004ae9 2333loop_continue:
2e588f84 2334 be_rx_stats_update(rxo, rxcp);
6b7c5b94
SP
2335 }
2336
10ef9ab4
SP
2337 if (work_done) {
2338 be_cq_notify(adapter, rx_cq->id, true, work_done);
9372cacb 2339
6384a4d0
SP
2340 /* When an rx-obj gets into post_starved state, just
2341 * let be_worker do the posting.
2342 */
2343 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM &&
2344 !rxo->rx_post_starved)
10ef9ab4 2345 be_post_rx_frags(rxo, GFP_ATOMIC);
6b7c5b94 2346 }
10ef9ab4 2347
6b7c5b94
SP
2348 return work_done;
2349}
2350
10ef9ab4
SP
2351static bool be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
2352 int budget, int idx)
6b7c5b94 2353{
6b7c5b94 2354 struct be_eth_tx_compl *txcp;
10ef9ab4 2355 int num_wrbs = 0, work_done;
3c8def97 2356
10ef9ab4
SP
2357 for (work_done = 0; work_done < budget; work_done++) {
2358 txcp = be_tx_compl_get(&txo->cq);
2359 if (!txcp)
2360 break;
2361 num_wrbs += be_tx_compl_process(adapter, txo,
3c8def97
SP
2362 AMAP_GET_BITS(struct amap_eth_tx_compl,
2363 wrb_index, txcp));
10ef9ab4 2364 }
6b7c5b94 2365
10ef9ab4
SP
2366 if (work_done) {
2367 be_cq_notify(adapter, txo->cq.id, true, work_done);
2368 atomic_sub(num_wrbs, &txo->q.used);
3c8def97 2369
10ef9ab4
SP
2370 /* As Tx wrbs have been freed up, wake up netdev queue
2371 * if it was stopped due to lack of tx wrbs. */
2372 if (__netif_subqueue_stopped(adapter->netdev, idx) &&
2373 atomic_read(&txo->q.used) < txo->q.len / 2) {
2374 netif_wake_subqueue(adapter->netdev, idx);
3c8def97 2375 }
10ef9ab4
SP
2376
2377 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
2378 tx_stats(txo)->tx_compl += work_done;
2379 u64_stats_update_end(&tx_stats(txo)->sync_compl);
6b7c5b94 2380 }
10ef9ab4
SP
2381 return (work_done < budget); /* Done */
2382}
6b7c5b94 2383
68d7bdcb 2384int be_poll(struct napi_struct *napi, int budget)
10ef9ab4
SP
2385{
2386 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2387 struct be_adapter *adapter = eqo->adapter;
0b545a62 2388 int max_work = 0, work, i, num_evts;
6384a4d0 2389 struct be_rx_obj *rxo;
10ef9ab4 2390 bool tx_done;
f31e50a8 2391
0b545a62
SP
2392 num_evts = events_get(eqo);
2393
10ef9ab4
SP
2394 /* Process all TXQs serviced by this EQ */
2395 for (i = eqo->idx; i < adapter->num_tx_qs; i += adapter->num_evt_qs) {
2396 tx_done = be_process_tx(adapter, &adapter->tx_obj[i],
2397 eqo->tx_budget, i);
2398 if (!tx_done)
2399 max_work = budget;
f31e50a8
SP
2400 }
2401
6384a4d0
SP
2402 if (be_lock_napi(eqo)) {
2403 /* This loop will iterate twice for EQ0 in which
2404 * completions of the last RXQ (default one) are also processed
2405 * For other EQs the loop iterates only once
2406 */
2407 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
2408 work = be_process_rx(rxo, napi, budget, NAPI_POLLING);
2409 max_work = max(work, max_work);
2410 }
2411 be_unlock_napi(eqo);
2412 } else {
2413 max_work = budget;
10ef9ab4 2414 }
6b7c5b94 2415
10ef9ab4
SP
2416 if (is_mcc_eqo(eqo))
2417 be_process_mcc(adapter);
93c86700 2418
10ef9ab4
SP
2419 if (max_work < budget) {
2420 napi_complete(napi);
0b545a62 2421 be_eq_notify(adapter, eqo->q.id, true, false, num_evts);
10ef9ab4
SP
2422 } else {
2423 /* As we'll continue in polling mode, count and clear events */
0b545a62 2424 be_eq_notify(adapter, eqo->q.id, false, false, num_evts);
93c86700 2425 }
10ef9ab4 2426 return max_work;
6b7c5b94
SP
2427}
2428
6384a4d0
SP
2429#ifdef CONFIG_NET_RX_BUSY_POLL
2430static int be_busy_poll(struct napi_struct *napi)
2431{
2432 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2433 struct be_adapter *adapter = eqo->adapter;
2434 struct be_rx_obj *rxo;
2435 int i, work = 0;
2436
2437 if (!be_lock_busy_poll(eqo))
2438 return LL_FLUSH_BUSY;
2439
2440 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
2441 work = be_process_rx(rxo, napi, 4, BUSY_POLLING);
2442 if (work)
2443 break;
2444 }
2445
2446 be_unlock_busy_poll(eqo);
2447 return work;
2448}
2449#endif
2450
f67ef7ba 2451void be_detect_error(struct be_adapter *adapter)
7c185276 2452{
e1cfb67a
PR
2453 u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
2454 u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
7c185276 2455 u32 i;
eb0eecc1
SK
2456 bool error_detected = false;
2457 struct device *dev = &adapter->pdev->dev;
2458 struct net_device *netdev = adapter->netdev;
7c185276 2459
d23e946c 2460 if (be_hw_error(adapter))
72f02485
SP
2461 return;
2462
e1cfb67a
PR
2463 if (lancer_chip(adapter)) {
2464 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2465 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2466 sliport_err1 = ioread32(adapter->db +
2467 SLIPORT_ERROR1_OFFSET);
2468 sliport_err2 = ioread32(adapter->db +
2469 SLIPORT_ERROR2_OFFSET);
eb0eecc1
SK
2470 adapter->hw_error = true;
2471 /* Do not log error messages if its a FW reset */
2472 if (sliport_err1 == SLIPORT_ERROR_FW_RESET1 &&
2473 sliport_err2 == SLIPORT_ERROR_FW_RESET2) {
2474 dev_info(dev, "Firmware update in progress\n");
2475 } else {
2476 error_detected = true;
2477 dev_err(dev, "Error detected in the card\n");
2478 dev_err(dev, "ERR: sliport status 0x%x\n",
2479 sliport_status);
2480 dev_err(dev, "ERR: sliport error1 0x%x\n",
2481 sliport_err1);
2482 dev_err(dev, "ERR: sliport error2 0x%x\n",
2483 sliport_err2);
2484 }
e1cfb67a
PR
2485 }
2486 } else {
2487 pci_read_config_dword(adapter->pdev,
2488 PCICFG_UE_STATUS_LOW, &ue_lo);
2489 pci_read_config_dword(adapter->pdev,
2490 PCICFG_UE_STATUS_HIGH, &ue_hi);
2491 pci_read_config_dword(adapter->pdev,
2492 PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask);
2493 pci_read_config_dword(adapter->pdev,
2494 PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
2495
f67ef7ba
PR
2496 ue_lo = (ue_lo & ~ue_lo_mask);
2497 ue_hi = (ue_hi & ~ue_hi_mask);
7c185276 2498
eb0eecc1
SK
2499 /* On certain platforms BE hardware can indicate spurious UEs.
2500 * Allow HW to stop working completely in case of a real UE.
2501 * Hence not setting the hw_error for UE detection.
2502 */
f67ef7ba 2503
eb0eecc1
SK
2504 if (ue_lo || ue_hi) {
2505 error_detected = true;
2506 dev_err(dev,
2507 "Unrecoverable Error detected in the adapter");
2508 dev_err(dev, "Please reboot server to recover");
2509 if (skyhawk_chip(adapter))
2510 adapter->hw_error = true;
2511 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
2512 if (ue_lo & 1)
2513 dev_err(dev, "UE: %s bit set\n",
2514 ue_status_low_desc[i]);
2515 }
2516 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
2517 if (ue_hi & 1)
2518 dev_err(dev, "UE: %s bit set\n",
2519 ue_status_hi_desc[i]);
2520 }
7c185276
AK
2521 }
2522 }
eb0eecc1
SK
2523 if (error_detected)
2524 netif_carrier_off(netdev);
7c185276
AK
2525}
2526
8d56ff11
SP
2527static void be_msix_disable(struct be_adapter *adapter)
2528{
ac6a0c4a 2529 if (msix_enabled(adapter)) {
8d56ff11 2530 pci_disable_msix(adapter->pdev);
ac6a0c4a 2531 adapter->num_msix_vec = 0;
68d7bdcb 2532 adapter->num_msix_roce_vec = 0;
3abcdeda
SP
2533 }
2534}
2535
c2bba3df 2536static int be_msix_enable(struct be_adapter *adapter)
6b7c5b94 2537{
7dc4c064 2538 int i, num_vec;
d379142b 2539 struct device *dev = &adapter->pdev->dev;
6b7c5b94 2540
92bf14ab
SP
2541 /* If RoCE is supported, program the max number of NIC vectors that
2542 * may be configured via set-channels, along with vectors needed for
2543 * RoCe. Else, just program the number we'll use initially.
2544 */
2545 if (be_roce_supported(adapter))
2546 num_vec = min_t(int, 2 * be_max_eqs(adapter),
2547 2 * num_online_cpus());
2548 else
2549 num_vec = adapter->cfg_num_qs;
3abcdeda 2550
ac6a0c4a 2551 for (i = 0; i < num_vec; i++)
6b7c5b94
SP
2552 adapter->msix_entries[i].entry = i;
2553
7dc4c064
AG
2554 num_vec = pci_enable_msix_range(adapter->pdev, adapter->msix_entries,
2555 MIN_MSIX_VECTORS, num_vec);
2556 if (num_vec < 0)
2557 goto fail;
92bf14ab 2558
92bf14ab
SP
2559 if (be_roce_supported(adapter) && num_vec > MIN_MSIX_VECTORS) {
2560 adapter->num_msix_roce_vec = num_vec / 2;
2561 dev_info(dev, "enabled %d MSI-x vector(s) for RoCE\n",
2562 adapter->num_msix_roce_vec);
2563 }
2564
2565 adapter->num_msix_vec = num_vec - adapter->num_msix_roce_vec;
2566
2567 dev_info(dev, "enabled %d MSI-x vector(s) for NIC\n",
2568 adapter->num_msix_vec);
c2bba3df 2569 return 0;
7dc4c064
AG
2570
2571fail:
2572 dev_warn(dev, "MSIx enable failed\n");
2573
2574 /* INTx is not supported in VFs, so fail probe if enable_msix fails */
2575 if (!be_physfn(adapter))
2576 return num_vec;
2577 return 0;
6b7c5b94
SP
2578}
2579
fe6d2a38 2580static inline int be_msix_vec_get(struct be_adapter *adapter,
10ef9ab4 2581 struct be_eq_obj *eqo)
b628bde2 2582{
f2f781a7 2583 return adapter->msix_entries[eqo->msix_idx].vector;
b628bde2 2584}
6b7c5b94 2585
b628bde2
SP
2586static int be_msix_register(struct be_adapter *adapter)
2587{
10ef9ab4
SP
2588 struct net_device *netdev = adapter->netdev;
2589 struct be_eq_obj *eqo;
2590 int status, i, vec;
6b7c5b94 2591
10ef9ab4
SP
2592 for_all_evt_queues(adapter, eqo, i) {
2593 sprintf(eqo->desc, "%s-q%d", netdev->name, i);
2594 vec = be_msix_vec_get(adapter, eqo);
2595 status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
3abcdeda
SP
2596 if (status)
2597 goto err_msix;
2598 }
b628bde2 2599
6b7c5b94 2600 return 0;
3abcdeda 2601err_msix:
10ef9ab4
SP
2602 for (i--, eqo = &adapter->eq_obj[i]; i >= 0; i--, eqo--)
2603 free_irq(be_msix_vec_get(adapter, eqo), eqo);
2604 dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
2605 status);
ac6a0c4a 2606 be_msix_disable(adapter);
6b7c5b94
SP
2607 return status;
2608}
2609
2610static int be_irq_register(struct be_adapter *adapter)
2611{
2612 struct net_device *netdev = adapter->netdev;
2613 int status;
2614
ac6a0c4a 2615 if (msix_enabled(adapter)) {
6b7c5b94
SP
2616 status = be_msix_register(adapter);
2617 if (status == 0)
2618 goto done;
ba343c77
SB
2619 /* INTx is not supported for VF */
2620 if (!be_physfn(adapter))
2621 return status;
6b7c5b94
SP
2622 }
2623
e49cc34f 2624 /* INTx: only the first EQ is used */
6b7c5b94
SP
2625 netdev->irq = adapter->pdev->irq;
2626 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
e49cc34f 2627 &adapter->eq_obj[0]);
6b7c5b94
SP
2628 if (status) {
2629 dev_err(&adapter->pdev->dev,
2630 "INTx request IRQ failed - err %d\n", status);
2631 return status;
2632 }
2633done:
2634 adapter->isr_registered = true;
2635 return 0;
2636}
2637
2638static void be_irq_unregister(struct be_adapter *adapter)
2639{
2640 struct net_device *netdev = adapter->netdev;
10ef9ab4 2641 struct be_eq_obj *eqo;
3abcdeda 2642 int i;
6b7c5b94
SP
2643
2644 if (!adapter->isr_registered)
2645 return;
2646
2647 /* INTx */
ac6a0c4a 2648 if (!msix_enabled(adapter)) {
e49cc34f 2649 free_irq(netdev->irq, &adapter->eq_obj[0]);
6b7c5b94
SP
2650 goto done;
2651 }
2652
2653 /* MSIx */
10ef9ab4
SP
2654 for_all_evt_queues(adapter, eqo, i)
2655 free_irq(be_msix_vec_get(adapter, eqo), eqo);
3abcdeda 2656
6b7c5b94
SP
2657done:
2658 adapter->isr_registered = false;
6b7c5b94
SP
2659}
2660
10ef9ab4 2661static void be_rx_qs_destroy(struct be_adapter *adapter)
482c9e79
SP
2662{
2663 struct be_queue_info *q;
2664 struct be_rx_obj *rxo;
2665 int i;
2666
2667 for_all_rx_queues(adapter, rxo, i) {
2668 q = &rxo->q;
2669 if (q->created) {
2670 be_cmd_rxq_destroy(adapter, q);
10ef9ab4 2671 be_rx_cq_clean(rxo);
482c9e79 2672 }
10ef9ab4 2673 be_queue_free(adapter, q);
482c9e79
SP
2674 }
2675}
2676
889cd4b2
SP
2677static int be_close(struct net_device *netdev)
2678{
2679 struct be_adapter *adapter = netdev_priv(netdev);
10ef9ab4
SP
2680 struct be_eq_obj *eqo;
2681 int i;
889cd4b2 2682
045508a8
PP
2683 be_roce_dev_close(adapter);
2684
dff345c5
IV
2685 if (adapter->flags & BE_FLAGS_NAPI_ENABLED) {
2686 for_all_evt_queues(adapter, eqo, i) {
04d3d624 2687 napi_disable(&eqo->napi);
6384a4d0
SP
2688 be_disable_busy_poll(eqo);
2689 }
71237b6f 2690 adapter->flags &= ~BE_FLAGS_NAPI_ENABLED;
04d3d624 2691 }
a323d9bf
SP
2692
2693 be_async_mcc_disable(adapter);
2694
2695 /* Wait for all pending tx completions to arrive so that
2696 * all tx skbs are freed.
2697 */
fba87559 2698 netif_tx_disable(netdev);
6e1f9975 2699 be_tx_compl_clean(adapter);
a323d9bf
SP
2700
2701 be_rx_qs_destroy(adapter);
2702
d11a347d
AK
2703 for (i = 1; i < (adapter->uc_macs + 1); i++)
2704 be_cmd_pmac_del(adapter, adapter->if_handle,
2705 adapter->pmac_id[i], 0);
2706 adapter->uc_macs = 0;
2707
a323d9bf 2708 for_all_evt_queues(adapter, eqo, i) {
10ef9ab4
SP
2709 if (msix_enabled(adapter))
2710 synchronize_irq(be_msix_vec_get(adapter, eqo));
2711 else
2712 synchronize_irq(netdev->irq);
2713 be_eq_clean(eqo);
63fcb27f
PR
2714 }
2715
889cd4b2
SP
2716 be_irq_unregister(adapter);
2717
482c9e79
SP
2718 return 0;
2719}
2720
10ef9ab4 2721static int be_rx_qs_create(struct be_adapter *adapter)
482c9e79
SP
2722{
2723 struct be_rx_obj *rxo;
e9008ee9
PR
2724 int rc, i, j;
2725 u8 rsstable[128];
482c9e79
SP
2726
2727 for_all_rx_queues(adapter, rxo, i) {
10ef9ab4
SP
2728 rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
2729 sizeof(struct be_eth_rx_d));
2730 if (rc)
2731 return rc;
2732 }
2733
2734 /* The FW would like the default RXQ to be created first */
2735 rxo = default_rxo(adapter);
2736 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id, rx_frag_size,
2737 adapter->if_handle, false, &rxo->rss_id);
2738 if (rc)
2739 return rc;
2740
2741 for_all_rss_queues(adapter, rxo, i) {
482c9e79 2742 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
10ef9ab4
SP
2743 rx_frag_size, adapter->if_handle,
2744 true, &rxo->rss_id);
482c9e79
SP
2745 if (rc)
2746 return rc;
2747 }
2748
2749 if (be_multi_rxq(adapter)) {
e9008ee9
PR
2750 for (j = 0; j < 128; j += adapter->num_rx_qs - 1) {
2751 for_all_rss_queues(adapter, rxo, i) {
2752 if ((j + i) >= 128)
2753 break;
2754 rsstable[j + i] = rxo->rss_id;
2755 }
2756 }
594ad54a
SR
2757 adapter->rss_flags = RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_IPV4 |
2758 RSS_ENABLE_TCP_IPV6 | RSS_ENABLE_IPV6;
2759
2760 if (!BEx_chip(adapter))
2761 adapter->rss_flags |= RSS_ENABLE_UDP_IPV4 |
2762 RSS_ENABLE_UDP_IPV6;
da1388d6
VV
2763 } else {
2764 /* Disable RSS, if only default RX Q is created */
2765 adapter->rss_flags = RSS_ENABLE_NONE;
2766 }
594ad54a 2767
da1388d6
VV
2768 rc = be_cmd_rss_config(adapter, rsstable, adapter->rss_flags,
2769 128);
2770 if (rc) {
2771 adapter->rss_flags = RSS_ENABLE_NONE;
2772 return rc;
482c9e79
SP
2773 }
2774
2775 /* First time posting */
10ef9ab4 2776 for_all_rx_queues(adapter, rxo, i)
482c9e79 2777 be_post_rx_frags(rxo, GFP_KERNEL);
889cd4b2
SP
2778 return 0;
2779}
2780
6b7c5b94
SP
2781static int be_open(struct net_device *netdev)
2782{
2783 struct be_adapter *adapter = netdev_priv(netdev);
10ef9ab4 2784 struct be_eq_obj *eqo;
3abcdeda 2785 struct be_rx_obj *rxo;
10ef9ab4 2786 struct be_tx_obj *txo;
b236916a 2787 u8 link_status;
3abcdeda 2788 int status, i;
5fb379ee 2789
10ef9ab4 2790 status = be_rx_qs_create(adapter);
482c9e79
SP
2791 if (status)
2792 goto err;
2793
c2bba3df
SK
2794 status = be_irq_register(adapter);
2795 if (status)
2796 goto err;
5fb379ee 2797
10ef9ab4 2798 for_all_rx_queues(adapter, rxo, i)
3abcdeda 2799 be_cq_notify(adapter, rxo->cq.id, true, 0);
5fb379ee 2800
10ef9ab4
SP
2801 for_all_tx_queues(adapter, txo, i)
2802 be_cq_notify(adapter, txo->cq.id, true, 0);
2803
7a1e9b20
SP
2804 be_async_mcc_enable(adapter);
2805
10ef9ab4
SP
2806 for_all_evt_queues(adapter, eqo, i) {
2807 napi_enable(&eqo->napi);
6384a4d0 2808 be_enable_busy_poll(eqo);
10ef9ab4
SP
2809 be_eq_notify(adapter, eqo->q.id, true, false, 0);
2810 }
04d3d624 2811 adapter->flags |= BE_FLAGS_NAPI_ENABLED;
10ef9ab4 2812
323ff71e 2813 status = be_cmd_link_status_query(adapter, NULL, &link_status, 0);
b236916a
AK
2814 if (!status)
2815 be_link_status_update(adapter, link_status);
2816
fba87559 2817 netif_tx_start_all_queues(netdev);
045508a8 2818 be_roce_dev_open(adapter);
889cd4b2
SP
2819 return 0;
2820err:
2821 be_close(adapter->netdev);
2822 return -EIO;
5fb379ee
SP
2823}
2824
71d8d1b5
AK
2825static int be_setup_wol(struct be_adapter *adapter, bool enable)
2826{
2827 struct be_dma_mem cmd;
2828 int status = 0;
2829 u8 mac[ETH_ALEN];
2830
2831 memset(mac, 0, ETH_ALEN);
2832
2833 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
ede23fa8
JP
2834 cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2835 GFP_KERNEL);
71d8d1b5
AK
2836 if (cmd.va == NULL)
2837 return -1;
71d8d1b5
AK
2838
2839 if (enable) {
2840 status = pci_write_config_dword(adapter->pdev,
2841 PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2842 if (status) {
2843 dev_err(&adapter->pdev->dev,
2381a55c 2844 "Could not enable Wake-on-lan\n");
2b7bcebf
IV
2845 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2846 cmd.dma);
71d8d1b5
AK
2847 return status;
2848 }
2849 status = be_cmd_enable_magic_wol(adapter,
2850 adapter->netdev->dev_addr, &cmd);
2851 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2852 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2853 } else {
2854 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2855 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2856 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2857 }
2858
2b7bcebf 2859 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
71d8d1b5
AK
2860 return status;
2861}
2862
6d87f5c3
AK
2863/*
2864 * Generate a seed MAC address from the PF MAC Address using jhash.
2865 * MAC Address for VFs are assigned incrementally starting from the seed.
2866 * These addresses are programmed in the ASIC by the PF and the VF driver
2867 * queries for the MAC address during its probe.
2868 */
4c876616 2869static int be_vf_eth_addr_config(struct be_adapter *adapter)
6d87f5c3 2870{
f9449ab7 2871 u32 vf;
3abcdeda 2872 int status = 0;
6d87f5c3 2873 u8 mac[ETH_ALEN];
11ac75ed 2874 struct be_vf_cfg *vf_cfg;
6d87f5c3
AK
2875
2876 be_vf_eth_addr_generate(adapter, mac);
2877
11ac75ed 2878 for_all_vfs(adapter, vf_cfg, vf) {
3175d8c2 2879 if (BEx_chip(adapter))
590c391d 2880 status = be_cmd_pmac_add(adapter, mac,
11ac75ed
SP
2881 vf_cfg->if_handle,
2882 &vf_cfg->pmac_id, vf + 1);
3175d8c2
SP
2883 else
2884 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
2885 vf + 1);
590c391d 2886
6d87f5c3
AK
2887 if (status)
2888 dev_err(&adapter->pdev->dev,
590c391d 2889 "Mac address assignment failed for VF %d\n", vf);
6d87f5c3 2890 else
11ac75ed 2891 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
6d87f5c3
AK
2892
2893 mac[5] += 1;
2894 }
2895 return status;
2896}
2897
4c876616
SP
2898static int be_vfs_mac_query(struct be_adapter *adapter)
2899{
2900 int status, vf;
2901 u8 mac[ETH_ALEN];
2902 struct be_vf_cfg *vf_cfg;
4c876616
SP
2903
2904 for_all_vfs(adapter, vf_cfg, vf) {
b188f090
SR
2905 status = be_cmd_get_active_mac(adapter, vf_cfg->pmac_id,
2906 mac, vf_cfg->if_handle,
2907 false, vf+1);
4c876616
SP
2908 if (status)
2909 return status;
2910 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
2911 }
2912 return 0;
2913}
2914
f9449ab7 2915static void be_vf_clear(struct be_adapter *adapter)
6d87f5c3 2916{
11ac75ed 2917 struct be_vf_cfg *vf_cfg;
6d87f5c3
AK
2918 u32 vf;
2919
257a3feb 2920 if (pci_vfs_assigned(adapter->pdev)) {
4c876616
SP
2921 dev_warn(&adapter->pdev->dev,
2922 "VFs are assigned to VMs: not disabling VFs\n");
39f1d94d
SP
2923 goto done;
2924 }
2925
b4c1df93
SP
2926 pci_disable_sriov(adapter->pdev);
2927
11ac75ed 2928 for_all_vfs(adapter, vf_cfg, vf) {
3175d8c2 2929 if (BEx_chip(adapter))
11ac75ed
SP
2930 be_cmd_pmac_del(adapter, vf_cfg->if_handle,
2931 vf_cfg->pmac_id, vf + 1);
3175d8c2
SP
2932 else
2933 be_cmd_set_mac(adapter, NULL, vf_cfg->if_handle,
2934 vf + 1);
f9449ab7 2935
11ac75ed
SP
2936 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
2937 }
39f1d94d
SP
2938done:
2939 kfree(adapter->vf_cfg);
2940 adapter->num_vfs = 0;
6d87f5c3
AK
2941}
2942
7707133c
SP
2943static void be_clear_queues(struct be_adapter *adapter)
2944{
2945 be_mcc_queues_destroy(adapter);
2946 be_rx_cqs_destroy(adapter);
2947 be_tx_queues_destroy(adapter);
2948 be_evt_queues_destroy(adapter);
2949}
2950
68d7bdcb 2951static void be_cancel_worker(struct be_adapter *adapter)
a54769f5 2952{
191eb756
SP
2953 if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) {
2954 cancel_delayed_work_sync(&adapter->work);
2955 adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED;
2956 }
68d7bdcb
SP
2957}
2958
b05004ad 2959static void be_mac_clear(struct be_adapter *adapter)
68d7bdcb
SP
2960{
2961 int i;
2962
b05004ad
SK
2963 if (adapter->pmac_id) {
2964 for (i = 0; i < (adapter->uc_macs + 1); i++)
2965 be_cmd_pmac_del(adapter, adapter->if_handle,
2966 adapter->pmac_id[i], 0);
2967 adapter->uc_macs = 0;
2968
2969 kfree(adapter->pmac_id);
2970 adapter->pmac_id = NULL;
2971 }
2972}
2973
2974static int be_clear(struct be_adapter *adapter)
2975{
68d7bdcb 2976 be_cancel_worker(adapter);
191eb756 2977
11ac75ed 2978 if (sriov_enabled(adapter))
f9449ab7
SP
2979 be_vf_clear(adapter);
2980
2d17f403 2981 /* delete the primary mac along with the uc-mac list */
b05004ad 2982 be_mac_clear(adapter);
fbc13f01 2983
f9449ab7 2984 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
a54769f5 2985
7707133c 2986 be_clear_queues(adapter);
a54769f5 2987
10ef9ab4 2988 be_msix_disable(adapter);
a54769f5
SP
2989 return 0;
2990}
2991
4c876616 2992static int be_vfs_if_create(struct be_adapter *adapter)
abb93951 2993{
92bf14ab 2994 struct be_resources res = {0};
4c876616
SP
2995 struct be_vf_cfg *vf_cfg;
2996 u32 cap_flags, en_flags, vf;
922bbe88 2997 int status = 0;
abb93951 2998
4c876616
SP
2999 cap_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
3000 BE_IF_FLAGS_MULTICAST;
abb93951 3001
4c876616 3002 for_all_vfs(adapter, vf_cfg, vf) {
92bf14ab
SP
3003 if (!BE3_chip(adapter)) {
3004 status = be_cmd_get_profile_config(adapter, &res,
3005 vf + 1);
3006 if (!status)
3007 cap_flags = res.if_cap_flags;
3008 }
4c876616
SP
3009
3010 /* If a FW profile exists, then cap_flags are updated */
3011 en_flags = cap_flags & (BE_IF_FLAGS_UNTAGGED |
3012 BE_IF_FLAGS_BROADCAST | BE_IF_FLAGS_MULTICAST);
3013 status = be_cmd_if_create(adapter, cap_flags, en_flags,
3014 &vf_cfg->if_handle, vf + 1);
3015 if (status)
3016 goto err;
3017 }
3018err:
3019 return status;
abb93951
PR
3020}
3021
39f1d94d 3022static int be_vf_setup_init(struct be_adapter *adapter)
30128031 3023{
11ac75ed 3024 struct be_vf_cfg *vf_cfg;
30128031
SP
3025 int vf;
3026
39f1d94d
SP
3027 adapter->vf_cfg = kcalloc(adapter->num_vfs, sizeof(*vf_cfg),
3028 GFP_KERNEL);
3029 if (!adapter->vf_cfg)
3030 return -ENOMEM;
3031
11ac75ed
SP
3032 for_all_vfs(adapter, vf_cfg, vf) {
3033 vf_cfg->if_handle = -1;
3034 vf_cfg->pmac_id = -1;
30128031 3035 }
39f1d94d 3036 return 0;
30128031
SP
3037}
3038
f9449ab7
SP
3039static int be_vf_setup(struct be_adapter *adapter)
3040{
c502224e 3041 struct device *dev = &adapter->pdev->dev;
11ac75ed 3042 struct be_vf_cfg *vf_cfg;
4c876616 3043 int status, old_vfs, vf;
04a06028 3044 u32 privileges;
c502224e 3045 u16 lnk_speed;
39f1d94d 3046
257a3feb 3047 old_vfs = pci_num_vf(adapter->pdev);
4c876616
SP
3048 if (old_vfs) {
3049 dev_info(dev, "%d VFs are already enabled\n", old_vfs);
3050 if (old_vfs != num_vfs)
3051 dev_warn(dev, "Ignoring num_vfs=%d setting\n", num_vfs);
3052 adapter->num_vfs = old_vfs;
39f1d94d 3053 } else {
92bf14ab 3054 if (num_vfs > be_max_vfs(adapter))
4c876616 3055 dev_info(dev, "Device supports %d VFs and not %d\n",
92bf14ab
SP
3056 be_max_vfs(adapter), num_vfs);
3057 adapter->num_vfs = min_t(u16, num_vfs, be_max_vfs(adapter));
b4c1df93 3058 if (!adapter->num_vfs)
4c876616 3059 return 0;
39f1d94d
SP
3060 }
3061
3062 status = be_vf_setup_init(adapter);
3063 if (status)
3064 goto err;
30128031 3065
4c876616
SP
3066 if (old_vfs) {
3067 for_all_vfs(adapter, vf_cfg, vf) {
3068 status = be_cmd_get_if_id(adapter, vf_cfg, vf);
3069 if (status)
3070 goto err;
3071 }
3072 } else {
3073 status = be_vfs_if_create(adapter);
f9449ab7
SP
3074 if (status)
3075 goto err;
f9449ab7
SP
3076 }
3077
4c876616
SP
3078 if (old_vfs) {
3079 status = be_vfs_mac_query(adapter);
3080 if (status)
3081 goto err;
3082 } else {
39f1d94d
SP
3083 status = be_vf_eth_addr_config(adapter);
3084 if (status)
3085 goto err;
3086 }
f9449ab7 3087
11ac75ed 3088 for_all_vfs(adapter, vf_cfg, vf) {
04a06028
SP
3089 /* Allow VFs to programs MAC/VLAN filters */
3090 status = be_cmd_get_fn_privileges(adapter, &privileges, vf + 1);
3091 if (!status && !(privileges & BE_PRIV_FILTMGMT)) {
3092 status = be_cmd_set_fn_privileges(adapter,
3093 privileges |
3094 BE_PRIV_FILTMGMT,
3095 vf + 1);
3096 if (!status)
3097 dev_info(dev, "VF%d has FILTMGMT privilege\n",
3098 vf);
3099 }
3100
4c876616
SP
3101 /* BE3 FW, by default, caps VF TX-rate to 100mbps.
3102 * Allow full available bandwidth
3103 */
3104 if (BE3_chip(adapter) && !old_vfs)
3105 be_cmd_set_qos(adapter, 1000, vf+1);
3106
3107 status = be_cmd_link_status_query(adapter, &lnk_speed,
3108 NULL, vf + 1);
3109 if (!status)
3110 vf_cfg->tx_rate = lnk_speed;
f1f3ee1b 3111
0599863d
VV
3112 if (!old_vfs)
3113 be_cmd_enable_vf(adapter, vf + 1);
f9449ab7 3114 }
b4c1df93
SP
3115
3116 if (!old_vfs) {
3117 status = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
3118 if (status) {
3119 dev_err(dev, "SRIOV enable failed\n");
3120 adapter->num_vfs = 0;
3121 goto err;
3122 }
3123 }
f9449ab7
SP
3124 return 0;
3125err:
4c876616
SP
3126 dev_err(dev, "VF setup failed\n");
3127 be_vf_clear(adapter);
f9449ab7
SP
3128 return status;
3129}
3130
f93f160b
VV
3131/* Converting function_mode bits on BE3 to SH mc_type enums */
3132
3133static u8 be_convert_mc_type(u32 function_mode)
3134{
3135 if (function_mode & VNIC_MODE && function_mode & FLEX10_MODE)
3136 return vNIC1;
3137 else if (function_mode & FLEX10_MODE)
3138 return FLEX10;
3139 else if (function_mode & VNIC_MODE)
3140 return vNIC2;
3141 else if (function_mode & UMC_ENABLED)
3142 return UMC;
3143 else
3144 return MC_NONE;
3145}
3146
92bf14ab
SP
3147/* On BE2/BE3 FW does not suggest the supported limits */
3148static void BEx_get_resources(struct be_adapter *adapter,
3149 struct be_resources *res)
3150{
3151 struct pci_dev *pdev = adapter->pdev;
3152 bool use_sriov = false;
ecf1f6e1
SR
3153 int max_vfs = 0;
3154
3155 if (be_physfn(adapter) && BE3_chip(adapter)) {
3156 be_cmd_get_profile_config(adapter, res, 0);
3157 /* Some old versions of BE3 FW don't report max_vfs value */
3158 if (res->max_vfs == 0) {
3159 max_vfs = pci_sriov_get_totalvfs(pdev);
3160 res->max_vfs = max_vfs > 0 ? min(MAX_VFS, max_vfs) : 0;
3161 }
3162 use_sriov = res->max_vfs && sriov_want(adapter);
92bf14ab
SP
3163 }
3164
3165 if (be_physfn(adapter))
3166 res->max_uc_mac = BE_UC_PMAC_COUNT;
3167 else
3168 res->max_uc_mac = BE_VF_UC_PMAC_COUNT;
3169
f93f160b
VV
3170 adapter->mc_type = be_convert_mc_type(adapter->function_mode);
3171
3172 if (be_is_mc(adapter)) {
3173 /* Assuming that there are 4 channels per port,
3174 * when multi-channel is enabled
3175 */
3176 if (be_is_qnq_mode(adapter))
3177 res->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
3178 else
3179 /* In a non-qnq multichannel mode, the pvid
3180 * takes up one vlan entry
3181 */
3182 res->max_vlans = (BE_NUM_VLANS_SUPPORTED / 4) - 1;
3183 } else {
92bf14ab 3184 res->max_vlans = BE_NUM_VLANS_SUPPORTED;
f93f160b
VV
3185 }
3186
92bf14ab
SP
3187 res->max_mcast_mac = BE_MAX_MC;
3188
30f3fe45 3189 /* For BE3 1Gb ports, F/W does not properly support multiple TXQs */
92bf14ab 3190 if (BE2_chip(adapter) || use_sriov || be_is_mc(adapter) ||
30f3fe45 3191 !be_physfn(adapter) || (adapter->port_num > 1))
92bf14ab
SP
3192 res->max_tx_qs = 1;
3193 else
3194 res->max_tx_qs = BE3_MAX_TX_QS;
3195
3196 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
3197 !use_sriov && be_physfn(adapter))
3198 res->max_rss_qs = (adapter->be3_native) ?
3199 BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
3200 res->max_rx_qs = res->max_rss_qs + 1;
3201
e3dc867c 3202 if (be_physfn(adapter))
ecf1f6e1 3203 res->max_evt_qs = (res->max_vfs > 0) ?
e3dc867c
SR
3204 BE3_SRIOV_MAX_EVT_QS : BE3_MAX_EVT_QS;
3205 else
3206 res->max_evt_qs = 1;
92bf14ab
SP
3207
3208 res->if_cap_flags = BE_IF_CAP_FLAGS_WANT;
3209 if (!(adapter->function_caps & BE_FUNCTION_CAPS_RSS))
3210 res->if_cap_flags &= ~BE_IF_FLAGS_RSS;
3211}
3212
30128031
SP
3213static void be_setup_init(struct be_adapter *adapter)
3214{
3215 adapter->vlan_prio_bmap = 0xff;
42f11cf2 3216 adapter->phy.link_speed = -1;
30128031
SP
3217 adapter->if_handle = -1;
3218 adapter->be3_native = false;
3219 adapter->promiscuous = false;
f25b119c
PR
3220 if (be_physfn(adapter))
3221 adapter->cmd_privileges = MAX_PRIVILEGES;
3222 else
3223 adapter->cmd_privileges = MIN_PRIVILEGES;
30128031
SP
3224}
3225
92bf14ab 3226static int be_get_resources(struct be_adapter *adapter)
abb93951 3227{
92bf14ab
SP
3228 struct device *dev = &adapter->pdev->dev;
3229 struct be_resources res = {0};
3230 int status;
abb93951 3231
92bf14ab
SP
3232 if (BEx_chip(adapter)) {
3233 BEx_get_resources(adapter, &res);
3234 adapter->res = res;
abb93951
PR
3235 }
3236
92bf14ab
SP
3237 /* For Lancer, SH etc read per-function resource limits from FW.
3238 * GET_FUNC_CONFIG returns per function guaranteed limits.
3239 * GET_PROFILE_CONFIG returns PCI-E related limits PF-pool limits
3240 */
3241 if (!BEx_chip(adapter)) {
3242 status = be_cmd_get_func_config(adapter, &res);
3243 if (status)
3244 return status;
abb93951 3245
92bf14ab
SP
3246 /* If RoCE may be enabled stash away half the EQs for RoCE */
3247 if (be_roce_supported(adapter))
3248 res.max_evt_qs /= 2;
3249 adapter->res = res;
abb93951 3250
92bf14ab
SP
3251 if (be_physfn(adapter)) {
3252 status = be_cmd_get_profile_config(adapter, &res, 0);
3253 if (status)
3254 return status;
3255 adapter->res.max_vfs = res.max_vfs;
3256 }
abb93951 3257
92bf14ab
SP
3258 dev_info(dev, "Max: txqs %d, rxqs %d, rss %d, eqs %d, vfs %d\n",
3259 be_max_txqs(adapter), be_max_rxqs(adapter),
3260 be_max_rss(adapter), be_max_eqs(adapter),
3261 be_max_vfs(adapter));
3262 dev_info(dev, "Max: uc-macs %d, mc-macs %d, vlans %d\n",
3263 be_max_uc(adapter), be_max_mc(adapter),
3264 be_max_vlans(adapter));
abb93951 3265 }
4c876616 3266
92bf14ab 3267 return 0;
abb93951
PR
3268}
3269
39f1d94d
SP
3270/* Routine to query per function resource limits */
3271static int be_get_config(struct be_adapter *adapter)
3272{
542963b7 3273 u16 profile_id;
4c876616 3274 int status;
39f1d94d 3275
abb93951
PR
3276 status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
3277 &adapter->function_mode,
0ad3157e
VV
3278 &adapter->function_caps,
3279 &adapter->asic_rev);
abb93951 3280 if (status)
92bf14ab 3281 return status;
abb93951 3282
542963b7
VV
3283 if (be_physfn(adapter)) {
3284 status = be_cmd_get_active_profile(adapter, &profile_id);
3285 if (!status)
3286 dev_info(&adapter->pdev->dev,
3287 "Using profile 0x%x\n", profile_id);
3288 }
3289
92bf14ab
SP
3290 status = be_get_resources(adapter);
3291 if (status)
3292 return status;
abb93951
PR
3293
3294 /* primary mac needs 1 pmac entry */
92bf14ab
SP
3295 adapter->pmac_id = kcalloc(be_max_uc(adapter) + 1, sizeof(u32),
3296 GFP_KERNEL);
3297 if (!adapter->pmac_id)
3298 return -ENOMEM;
abb93951 3299
92bf14ab
SP
3300 /* Sanitize cfg_num_qs based on HW and platform limits */
3301 adapter->cfg_num_qs = min(adapter->cfg_num_qs, be_max_qs(adapter));
3302
3303 return 0;
39f1d94d
SP
3304}
3305
95046b92
SP
3306static int be_mac_setup(struct be_adapter *adapter)
3307{
3308 u8 mac[ETH_ALEN];
3309 int status;
3310
3311 if (is_zero_ether_addr(adapter->netdev->dev_addr)) {
3312 status = be_cmd_get_perm_mac(adapter, mac);
3313 if (status)
3314 return status;
3315
3316 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
3317 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
3318 } else {
3319 /* Maybe the HW was reset; dev_addr must be re-programmed */
3320 memcpy(mac, adapter->netdev->dev_addr, ETH_ALEN);
3321 }
3322
2c7a9dc1
AK
3323 /* For BE3-R VFs, the PF programs the initial MAC address */
3324 if (!(BEx_chip(adapter) && be_virtfn(adapter)))
3325 be_cmd_pmac_add(adapter, mac, adapter->if_handle,
3326 &adapter->pmac_id[0], 0);
95046b92
SP
3327 return 0;
3328}
3329
68d7bdcb
SP
3330static void be_schedule_worker(struct be_adapter *adapter)
3331{
3332 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
3333 adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
3334}
3335
7707133c 3336static int be_setup_queues(struct be_adapter *adapter)
5fb379ee 3337{
68d7bdcb 3338 struct net_device *netdev = adapter->netdev;
10ef9ab4 3339 int status;
ba343c77 3340
7707133c 3341 status = be_evt_queues_create(adapter);
abb93951
PR
3342 if (status)
3343 goto err;
73d540f2 3344
7707133c 3345 status = be_tx_qs_create(adapter);
c2bba3df
SK
3346 if (status)
3347 goto err;
10ef9ab4 3348
7707133c 3349 status = be_rx_cqs_create(adapter);
10ef9ab4 3350 if (status)
a54769f5 3351 goto err;
6b7c5b94 3352
7707133c 3353 status = be_mcc_queues_create(adapter);
10ef9ab4
SP
3354 if (status)
3355 goto err;
3356
68d7bdcb
SP
3357 status = netif_set_real_num_rx_queues(netdev, adapter->num_rx_qs);
3358 if (status)
3359 goto err;
3360
3361 status = netif_set_real_num_tx_queues(netdev, adapter->num_tx_qs);
3362 if (status)
3363 goto err;
3364
7707133c
SP
3365 return 0;
3366err:
3367 dev_err(&adapter->pdev->dev, "queue_setup failed\n");
3368 return status;
3369}
3370
68d7bdcb
SP
3371int be_update_queues(struct be_adapter *adapter)
3372{
3373 struct net_device *netdev = adapter->netdev;
3374 int status;
3375
3376 if (netif_running(netdev))
3377 be_close(netdev);
3378
3379 be_cancel_worker(adapter);
3380
3381 /* If any vectors have been shared with RoCE we cannot re-program
3382 * the MSIx table.
3383 */
3384 if (!adapter->num_msix_roce_vec)
3385 be_msix_disable(adapter);
3386
3387 be_clear_queues(adapter);
3388
3389 if (!msix_enabled(adapter)) {
3390 status = be_msix_enable(adapter);
3391 if (status)
3392 return status;
3393 }
3394
3395 status = be_setup_queues(adapter);
3396 if (status)
3397 return status;
3398
3399 be_schedule_worker(adapter);
3400
3401 if (netif_running(netdev))
3402 status = be_open(netdev);
3403
3404 return status;
3405}
3406
7707133c
SP
3407static int be_setup(struct be_adapter *adapter)
3408{
3409 struct device *dev = &adapter->pdev->dev;
3410 u32 tx_fc, rx_fc, en_flags;
3411 int status;
3412
3413 be_setup_init(adapter);
3414
3415 if (!lancer_chip(adapter))
3416 be_cmd_req_native_mode(adapter);
3417
3418 status = be_get_config(adapter);
10ef9ab4 3419 if (status)
a54769f5 3420 goto err;
6b7c5b94 3421
7707133c 3422 status = be_msix_enable(adapter);
10ef9ab4 3423 if (status)
a54769f5 3424 goto err;
6b7c5b94 3425
f9449ab7 3426 en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
7707133c 3427 BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
abb93951 3428 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS)
f9449ab7 3429 en_flags |= BE_IF_FLAGS_RSS;
92bf14ab
SP
3430 en_flags = en_flags & be_if_cap_flags(adapter);
3431 status = be_cmd_if_create(adapter, be_if_cap_flags(adapter), en_flags,
1578e777 3432 &adapter->if_handle, 0);
7707133c 3433 if (status)
a54769f5 3434 goto err;
6b7c5b94 3435
68d7bdcb
SP
3436 /* Updating real_num_tx/rx_queues() requires rtnl_lock() */
3437 rtnl_lock();
7707133c 3438 status = be_setup_queues(adapter);
68d7bdcb 3439 rtnl_unlock();
95046b92 3440 if (status)
1578e777
PR
3441 goto err;
3442
7707133c 3443 be_cmd_get_fn_privileges(adapter, &adapter->cmd_privileges, 0);
7707133c
SP
3444
3445 status = be_mac_setup(adapter);
10ef9ab4
SP
3446 if (status)
3447 goto err;
3448
eeb65ced 3449 be_cmd_get_fw_ver(adapter, adapter->fw_ver, adapter->fw_on_flash);
5a56eb10 3450
e9e2a904
SK
3451 if (BE2_chip(adapter) && fw_major_num(adapter->fw_ver) < 4) {
3452 dev_err(dev, "Firmware on card is old(%s), IRQs may not work.",
3453 adapter->fw_ver);
3454 dev_err(dev, "Please upgrade firmware to version >= 4.0\n");
3455 }
3456
1d1e9a46 3457 if (adapter->vlans_added)
10329df8 3458 be_vid_config(adapter);
7ab8b0b4 3459
a54769f5 3460 be_set_rx_mode(adapter->netdev);
5fb379ee 3461
76a9e08e
SR
3462 be_cmd_get_acpi_wol_cap(adapter);
3463
ddc3f5cb 3464 be_cmd_get_flow_control(adapter, &tx_fc, &rx_fc);
590c391d 3465
ddc3f5cb
AK
3466 if (rx_fc != adapter->rx_fc || tx_fc != adapter->tx_fc)
3467 be_cmd_set_flow_control(adapter, adapter->tx_fc,
a54769f5 3468 adapter->rx_fc);
2dc1deb6 3469
b905b5d4 3470 if (sriov_want(adapter)) {
92bf14ab 3471 if (be_max_vfs(adapter))
39f1d94d
SP
3472 be_vf_setup(adapter);
3473 else
3474 dev_warn(dev, "device doesn't support SRIOV\n");
f9449ab7
SP
3475 }
3476
f25b119c
PR
3477 status = be_cmd_get_phy_info(adapter);
3478 if (!status && be_pause_supported(adapter))
42f11cf2
AK
3479 adapter->phy.fc_autoneg = 1;
3480
68d7bdcb 3481 be_schedule_worker(adapter);
f9449ab7 3482 return 0;
a54769f5
SP
3483err:
3484 be_clear(adapter);
3485 return status;
3486}
6b7c5b94 3487
66268739
IV
3488#ifdef CONFIG_NET_POLL_CONTROLLER
3489static void be_netpoll(struct net_device *netdev)
3490{
3491 struct be_adapter *adapter = netdev_priv(netdev);
10ef9ab4 3492 struct be_eq_obj *eqo;
66268739
IV
3493 int i;
3494
e49cc34f
SP
3495 for_all_evt_queues(adapter, eqo, i) {
3496 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
3497 napi_schedule(&eqo->napi);
3498 }
10ef9ab4
SP
3499
3500 return;
66268739
IV
3501}
3502#endif
3503
84517482 3504#define FW_FILE_HDR_SIGN "ServerEngines Corp. "
4188e7df 3505static char flash_cookie[2][16] = {"*** SE FLAS", "H DIRECTORY *** "};
c165541e 3506
fa9a6fed 3507static bool be_flash_redboot(struct be_adapter *adapter,
3f0d4560
AK
3508 const u8 *p, u32 img_start, int image_size,
3509 int hdr_size)
fa9a6fed
SB
3510{
3511 u32 crc_offset;
3512 u8 flashed_crc[4];
3513 int status;
3f0d4560
AK
3514
3515 crc_offset = hdr_size + img_start + image_size - 4;
3516
fa9a6fed 3517 p += crc_offset;
3f0d4560
AK
3518
3519 status = be_cmd_get_flash_crc(adapter, flashed_crc,
f510fc64 3520 (image_size - 4));
fa9a6fed
SB
3521 if (status) {
3522 dev_err(&adapter->pdev->dev,
3523 "could not get crc from flash, not flashing redboot\n");
3524 return false;
3525 }
3526
3527 /*update redboot only if crc does not match*/
3528 if (!memcmp(flashed_crc, p, 4))
3529 return false;
3530 else
3531 return true;
fa9a6fed
SB
3532}
3533
306f1348
SP
3534static bool phy_flashing_required(struct be_adapter *adapter)
3535{
42f11cf2
AK
3536 return (adapter->phy.phy_type == TN_8022 &&
3537 adapter->phy.interface_type == PHY_TYPE_BASET_10GB);
306f1348
SP
3538}
3539
c165541e
PR
3540static bool is_comp_in_ufi(struct be_adapter *adapter,
3541 struct flash_section_info *fsec, int type)
3542{
3543 int i = 0, img_type = 0;
3544 struct flash_section_info_g2 *fsec_g2 = NULL;
3545
ca34fe38 3546 if (BE2_chip(adapter))
c165541e
PR
3547 fsec_g2 = (struct flash_section_info_g2 *)fsec;
3548
3549 for (i = 0; i < MAX_FLASH_COMP; i++) {
3550 if (fsec_g2)
3551 img_type = le32_to_cpu(fsec_g2->fsec_entry[i].type);
3552 else
3553 img_type = le32_to_cpu(fsec->fsec_entry[i].type);
3554
3555 if (img_type == type)
3556 return true;
3557 }
3558 return false;
3559
3560}
3561
4188e7df 3562static struct flash_section_info *get_fsec_info(struct be_adapter *adapter,
c165541e
PR
3563 int header_size,
3564 const struct firmware *fw)
3565{
3566 struct flash_section_info *fsec = NULL;
3567 const u8 *p = fw->data;
3568
3569 p += header_size;
3570 while (p < (fw->data + fw->size)) {
3571 fsec = (struct flash_section_info *)p;
3572 if (!memcmp(flash_cookie, fsec->cookie, sizeof(flash_cookie)))
3573 return fsec;
3574 p += 32;
3575 }
3576 return NULL;
3577}
3578
773a2d7c
PR
3579static int be_flash(struct be_adapter *adapter, const u8 *img,
3580 struct be_dma_mem *flash_cmd, int optype, int img_size)
3581{
3582 u32 total_bytes = 0, flash_op, num_bytes = 0;
3583 int status = 0;
3584 struct be_cmd_write_flashrom *req = flash_cmd->va;
3585
3586 total_bytes = img_size;
3587 while (total_bytes) {
3588 num_bytes = min_t(u32, 32*1024, total_bytes);
3589
3590 total_bytes -= num_bytes;
3591
3592 if (!total_bytes) {
3593 if (optype == OPTYPE_PHY_FW)
3594 flash_op = FLASHROM_OPER_PHY_FLASH;
3595 else
3596 flash_op = FLASHROM_OPER_FLASH;
3597 } else {
3598 if (optype == OPTYPE_PHY_FW)
3599 flash_op = FLASHROM_OPER_PHY_SAVE;
3600 else
3601 flash_op = FLASHROM_OPER_SAVE;
3602 }
3603
be716446 3604 memcpy(req->data_buf, img, num_bytes);
773a2d7c
PR
3605 img += num_bytes;
3606 status = be_cmd_write_flashrom(adapter, flash_cmd, optype,
3607 flash_op, num_bytes);
3608 if (status) {
3609 if (status == ILLEGAL_IOCTL_REQ &&
3610 optype == OPTYPE_PHY_FW)
3611 break;
3612 dev_err(&adapter->pdev->dev,
3613 "cmd to write to flash rom failed.\n");
3614 return status;
3615 }
3616 }
3617 return 0;
3618}
3619
0ad3157e 3620/* For BE2, BE3 and BE3-R */
ca34fe38 3621static int be_flash_BEx(struct be_adapter *adapter,
c165541e
PR
3622 const struct firmware *fw,
3623 struct be_dma_mem *flash_cmd,
3624 int num_of_images)
3f0d4560 3625
84517482 3626{
3f0d4560 3627 int status = 0, i, filehdr_size = 0;
c165541e 3628 int img_hdrs_size = (num_of_images * sizeof(struct image_hdr));
84517482 3629 const u8 *p = fw->data;
215faf9c 3630 const struct flash_comp *pflashcomp;
773a2d7c 3631 int num_comp, redboot;
c165541e
PR
3632 struct flash_section_info *fsec = NULL;
3633
3634 struct flash_comp gen3_flash_types[] = {
3635 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, OPTYPE_ISCSI_ACTIVE,
3636 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_iSCSI},
3637 { FLASH_REDBOOT_START_g3, OPTYPE_REDBOOT,
3638 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3, IMAGE_BOOT_CODE},
3639 { FLASH_iSCSI_BIOS_START_g3, OPTYPE_BIOS,
3640 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_ISCSI},
3641 { FLASH_PXE_BIOS_START_g3, OPTYPE_PXE_BIOS,
3642 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_PXE},
3643 { FLASH_FCoE_BIOS_START_g3, OPTYPE_FCOE_BIOS,
3644 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_FCoE},
3645 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, OPTYPE_ISCSI_BACKUP,
3646 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_iSCSI},
3647 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, OPTYPE_FCOE_FW_ACTIVE,
3648 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_FCoE},
3649 { FLASH_FCoE_BACKUP_IMAGE_START_g3, OPTYPE_FCOE_FW_BACKUP,
3650 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_FCoE},
3651 { FLASH_NCSI_START_g3, OPTYPE_NCSI_FW,
3652 FLASH_NCSI_IMAGE_MAX_SIZE_g3, IMAGE_NCSI},
3653 { FLASH_PHY_FW_START_g3, OPTYPE_PHY_FW,
3654 FLASH_PHY_FW_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_PHY}
3f0d4560 3655 };
c165541e
PR
3656
3657 struct flash_comp gen2_flash_types[] = {
3658 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, OPTYPE_ISCSI_ACTIVE,
3659 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_iSCSI},
3660 { FLASH_REDBOOT_START_g2, OPTYPE_REDBOOT,
3661 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2, IMAGE_BOOT_CODE},
3662 { FLASH_iSCSI_BIOS_START_g2, OPTYPE_BIOS,
3663 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_ISCSI},
3664 { FLASH_PXE_BIOS_START_g2, OPTYPE_PXE_BIOS,
3665 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_PXE},
3666 { FLASH_FCoE_BIOS_START_g2, OPTYPE_FCOE_BIOS,
3667 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_FCoE},
3668 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, OPTYPE_ISCSI_BACKUP,
3669 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_iSCSI},
3670 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, OPTYPE_FCOE_FW_ACTIVE,
3671 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_FCoE},
3672 { FLASH_FCoE_BACKUP_IMAGE_START_g2, OPTYPE_FCOE_FW_BACKUP,
3673 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_FCoE}
3f0d4560
AK
3674 };
3675
ca34fe38 3676 if (BE3_chip(adapter)) {
3f0d4560
AK
3677 pflashcomp = gen3_flash_types;
3678 filehdr_size = sizeof(struct flash_file_hdr_g3);
215faf9c 3679 num_comp = ARRAY_SIZE(gen3_flash_types);
3f0d4560
AK
3680 } else {
3681 pflashcomp = gen2_flash_types;
3682 filehdr_size = sizeof(struct flash_file_hdr_g2);
215faf9c 3683 num_comp = ARRAY_SIZE(gen2_flash_types);
84517482 3684 }
ca34fe38 3685
c165541e
PR
3686 /* Get flash section info*/
3687 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3688 if (!fsec) {
3689 dev_err(&adapter->pdev->dev,
3690 "Invalid Cookie. UFI corrupted ?\n");
3691 return -1;
3692 }
9fe96934 3693 for (i = 0; i < num_comp; i++) {
c165541e 3694 if (!is_comp_in_ufi(adapter, fsec, pflashcomp[i].img_type))
9fe96934 3695 continue;
c165541e
PR
3696
3697 if ((pflashcomp[i].optype == OPTYPE_NCSI_FW) &&
3698 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
3699 continue;
3700
773a2d7c
PR
3701 if (pflashcomp[i].optype == OPTYPE_PHY_FW &&
3702 !phy_flashing_required(adapter))
306f1348 3703 continue;
c165541e 3704
773a2d7c
PR
3705 if (pflashcomp[i].optype == OPTYPE_REDBOOT) {
3706 redboot = be_flash_redboot(adapter, fw->data,
3707 pflashcomp[i].offset, pflashcomp[i].size,
3708 filehdr_size + img_hdrs_size);
3709 if (!redboot)
3710 continue;
3711 }
c165541e 3712
3f0d4560 3713 p = fw->data;
c165541e 3714 p += filehdr_size + pflashcomp[i].offset + img_hdrs_size;
306f1348
SP
3715 if (p + pflashcomp[i].size > fw->data + fw->size)
3716 return -1;
773a2d7c
PR
3717
3718 status = be_flash(adapter, p, flash_cmd, pflashcomp[i].optype,
3719 pflashcomp[i].size);
3720 if (status) {
3721 dev_err(&adapter->pdev->dev,
3722 "Flashing section type %d failed.\n",
3723 pflashcomp[i].img_type);
3724 return status;
84517482 3725 }
84517482 3726 }
84517482
AK
3727 return 0;
3728}
3729
773a2d7c
PR
3730static int be_flash_skyhawk(struct be_adapter *adapter,
3731 const struct firmware *fw,
3732 struct be_dma_mem *flash_cmd, int num_of_images)
3f0d4560 3733{
773a2d7c
PR
3734 int status = 0, i, filehdr_size = 0;
3735 int img_offset, img_size, img_optype, redboot;
3736 int img_hdrs_size = num_of_images * sizeof(struct image_hdr);
3737 const u8 *p = fw->data;
3738 struct flash_section_info *fsec = NULL;
3739
3740 filehdr_size = sizeof(struct flash_file_hdr_g3);
3741 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3742 if (!fsec) {
3743 dev_err(&adapter->pdev->dev,
3744 "Invalid Cookie. UFI corrupted ?\n");
3745 return -1;
3746 }
3747
3748 for (i = 0; i < le32_to_cpu(fsec->fsec_hdr.num_images); i++) {
3749 img_offset = le32_to_cpu(fsec->fsec_entry[i].offset);
3750 img_size = le32_to_cpu(fsec->fsec_entry[i].pad_size);
3751
3752 switch (le32_to_cpu(fsec->fsec_entry[i].type)) {
3753 case IMAGE_FIRMWARE_iSCSI:
3754 img_optype = OPTYPE_ISCSI_ACTIVE;
3755 break;
3756 case IMAGE_BOOT_CODE:
3757 img_optype = OPTYPE_REDBOOT;
3758 break;
3759 case IMAGE_OPTION_ROM_ISCSI:
3760 img_optype = OPTYPE_BIOS;
3761 break;
3762 case IMAGE_OPTION_ROM_PXE:
3763 img_optype = OPTYPE_PXE_BIOS;
3764 break;
3765 case IMAGE_OPTION_ROM_FCoE:
3766 img_optype = OPTYPE_FCOE_BIOS;
3767 break;
3768 case IMAGE_FIRMWARE_BACKUP_iSCSI:
3769 img_optype = OPTYPE_ISCSI_BACKUP;
3770 break;
3771 case IMAGE_NCSI:
3772 img_optype = OPTYPE_NCSI_FW;
3773 break;
3774 default:
3775 continue;
3776 }
3777
3778 if (img_optype == OPTYPE_REDBOOT) {
3779 redboot = be_flash_redboot(adapter, fw->data,
3780 img_offset, img_size,
3781 filehdr_size + img_hdrs_size);
3782 if (!redboot)
3783 continue;
3784 }
3785
3786 p = fw->data;
3787 p += filehdr_size + img_offset + img_hdrs_size;
3788 if (p + img_size > fw->data + fw->size)
3789 return -1;
3790
3791 status = be_flash(adapter, p, flash_cmd, img_optype, img_size);
3792 if (status) {
3793 dev_err(&adapter->pdev->dev,
3794 "Flashing section type %d failed.\n",
3795 fsec->fsec_entry[i].type);
3796 return status;
3797 }
3798 }
3799 return 0;
3f0d4560
AK
3800}
3801
485bf569
SN
3802static int lancer_fw_download(struct be_adapter *adapter,
3803 const struct firmware *fw)
84517482 3804{
485bf569
SN
3805#define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024)
3806#define LANCER_FW_DOWNLOAD_LOCATION "/prg"
84517482 3807 struct be_dma_mem flash_cmd;
485bf569
SN
3808 const u8 *data_ptr = NULL;
3809 u8 *dest_image_ptr = NULL;
3810 size_t image_size = 0;
3811 u32 chunk_size = 0;
3812 u32 data_written = 0;
3813 u32 offset = 0;
3814 int status = 0;
3815 u8 add_status = 0;
f67ef7ba 3816 u8 change_status;
84517482 3817
485bf569 3818 if (!IS_ALIGNED(fw->size, sizeof(u32))) {
d9efd2af 3819 dev_err(&adapter->pdev->dev,
485bf569
SN
3820 "FW Image not properly aligned. "
3821 "Length must be 4 byte aligned.\n");
3822 status = -EINVAL;
3823 goto lancer_fw_exit;
d9efd2af
SB
3824 }
3825
485bf569
SN
3826 flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
3827 + LANCER_FW_DOWNLOAD_CHUNK;
3828 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
d0320f75 3829 &flash_cmd.dma, GFP_KERNEL);
485bf569
SN
3830 if (!flash_cmd.va) {
3831 status = -ENOMEM;
485bf569
SN
3832 goto lancer_fw_exit;
3833 }
84517482 3834
485bf569
SN
3835 dest_image_ptr = flash_cmd.va +
3836 sizeof(struct lancer_cmd_req_write_object);
3837 image_size = fw->size;
3838 data_ptr = fw->data;
3839
3840 while (image_size) {
3841 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
3842
3843 /* Copy the image chunk content. */
3844 memcpy(dest_image_ptr, data_ptr, chunk_size);
3845
3846 status = lancer_cmd_write_object(adapter, &flash_cmd,
f67ef7ba
PR
3847 chunk_size, offset,
3848 LANCER_FW_DOWNLOAD_LOCATION,
3849 &data_written, &change_status,
3850 &add_status);
485bf569
SN
3851 if (status)
3852 break;
3853
3854 offset += data_written;
3855 data_ptr += data_written;
3856 image_size -= data_written;
3857 }
3858
3859 if (!status) {
3860 /* Commit the FW written */
3861 status = lancer_cmd_write_object(adapter, &flash_cmd,
f67ef7ba
PR
3862 0, offset,
3863 LANCER_FW_DOWNLOAD_LOCATION,
3864 &data_written, &change_status,
3865 &add_status);
485bf569
SN
3866 }
3867
3868 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3869 flash_cmd.dma);
3870 if (status) {
3871 dev_err(&adapter->pdev->dev,
3872 "Firmware load error. "
3873 "Status code: 0x%x Additional Status: 0x%x\n",
3874 status, add_status);
3875 goto lancer_fw_exit;
3876 }
3877
f67ef7ba 3878 if (change_status == LANCER_FW_RESET_NEEDED) {
4bebb56a
SK
3879 dev_info(&adapter->pdev->dev,
3880 "Resetting adapter to activate new FW\n");
5c510811
SK
3881 status = lancer_physdev_ctrl(adapter,
3882 PHYSDEV_CONTROL_FW_RESET_MASK);
f67ef7ba
PR
3883 if (status) {
3884 dev_err(&adapter->pdev->dev,
3885 "Adapter busy for FW reset.\n"
3886 "New FW will not be active.\n");
3887 goto lancer_fw_exit;
3888 }
3889 } else if (change_status != LANCER_NO_RESET_NEEDED) {
3890 dev_err(&adapter->pdev->dev,
3891 "System reboot required for new FW"
3892 " to be active\n");
3893 }
3894
485bf569
SN
3895 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
3896lancer_fw_exit:
3897 return status;
3898}
3899
ca34fe38
SP
3900#define UFI_TYPE2 2
3901#define UFI_TYPE3 3
0ad3157e 3902#define UFI_TYPE3R 10
ca34fe38
SP
3903#define UFI_TYPE4 4
3904static int be_get_ufi_type(struct be_adapter *adapter,
0ad3157e 3905 struct flash_file_hdr_g3 *fhdr)
773a2d7c
PR
3906{
3907 if (fhdr == NULL)
3908 goto be_get_ufi_exit;
3909
ca34fe38
SP
3910 if (skyhawk_chip(adapter) && fhdr->build[0] == '4')
3911 return UFI_TYPE4;
0ad3157e
VV
3912 else if (BE3_chip(adapter) && fhdr->build[0] == '3') {
3913 if (fhdr->asic_type_rev == 0x10)
3914 return UFI_TYPE3R;
3915 else
3916 return UFI_TYPE3;
3917 } else if (BE2_chip(adapter) && fhdr->build[0] == '2')
ca34fe38 3918 return UFI_TYPE2;
773a2d7c
PR
3919
3920be_get_ufi_exit:
3921 dev_err(&adapter->pdev->dev,
3922 "UFI and Interface are not compatible for flashing\n");
3923 return -1;
3924}
3925
485bf569
SN
3926static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
3927{
485bf569
SN
3928 struct flash_file_hdr_g3 *fhdr3;
3929 struct image_hdr *img_hdr_ptr = NULL;
3930 struct be_dma_mem flash_cmd;
3931 const u8 *p;
773a2d7c 3932 int status = 0, i = 0, num_imgs = 0, ufi_type = 0;
84517482 3933
be716446 3934 flash_cmd.size = sizeof(struct be_cmd_write_flashrom);
2b7bcebf
IV
3935 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
3936 &flash_cmd.dma, GFP_KERNEL);
84517482
AK
3937 if (!flash_cmd.va) {
3938 status = -ENOMEM;
485bf569 3939 goto be_fw_exit;
84517482
AK
3940 }
3941
773a2d7c 3942 p = fw->data;
0ad3157e 3943 fhdr3 = (struct flash_file_hdr_g3 *)p;
773a2d7c 3944
0ad3157e 3945 ufi_type = be_get_ufi_type(adapter, fhdr3);
773a2d7c 3946
773a2d7c
PR
3947 num_imgs = le32_to_cpu(fhdr3->num_imgs);
3948 for (i = 0; i < num_imgs; i++) {
3949 img_hdr_ptr = (struct image_hdr *)(fw->data +
3950 (sizeof(struct flash_file_hdr_g3) +
3951 i * sizeof(struct image_hdr)));
3952 if (le32_to_cpu(img_hdr_ptr->imageid) == 1) {
0ad3157e
VV
3953 switch (ufi_type) {
3954 case UFI_TYPE4:
773a2d7c
PR
3955 status = be_flash_skyhawk(adapter, fw,
3956 &flash_cmd, num_imgs);
0ad3157e
VV
3957 break;
3958 case UFI_TYPE3R:
ca34fe38
SP
3959 status = be_flash_BEx(adapter, fw, &flash_cmd,
3960 num_imgs);
0ad3157e
VV
3961 break;
3962 case UFI_TYPE3:
3963 /* Do not flash this ufi on BE3-R cards */
3964 if (adapter->asic_rev < 0x10)
3965 status = be_flash_BEx(adapter, fw,
3966 &flash_cmd,
3967 num_imgs);
3968 else {
3969 status = -1;
3970 dev_err(&adapter->pdev->dev,
3971 "Can't load BE3 UFI on BE3R\n");
3972 }
3973 }
3f0d4560 3974 }
773a2d7c
PR
3975 }
3976
ca34fe38
SP
3977 if (ufi_type == UFI_TYPE2)
3978 status = be_flash_BEx(adapter, fw, &flash_cmd, 0);
773a2d7c 3979 else if (ufi_type == -1)
3f0d4560 3980 status = -1;
84517482 3981
2b7bcebf
IV
3982 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3983 flash_cmd.dma);
84517482
AK
3984 if (status) {
3985 dev_err(&adapter->pdev->dev, "Firmware load error\n");
485bf569 3986 goto be_fw_exit;
84517482
AK
3987 }
3988
af901ca1 3989 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
84517482 3990
485bf569
SN
3991be_fw_exit:
3992 return status;
3993}
3994
3995int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
3996{
3997 const struct firmware *fw;
3998 int status;
3999
4000 if (!netif_running(adapter->netdev)) {
4001 dev_err(&adapter->pdev->dev,
4002 "Firmware load not allowed (interface is down)\n");
4003 return -1;
4004 }
4005
4006 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
4007 if (status)
4008 goto fw_exit;
4009
4010 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
4011
4012 if (lancer_chip(adapter))
4013 status = lancer_fw_download(adapter, fw);
4014 else
4015 status = be_fw_download(adapter, fw);
4016
eeb65ced
SK
4017 if (!status)
4018 be_cmd_get_fw_ver(adapter, adapter->fw_ver,
4019 adapter->fw_on_flash);
4020
84517482
AK
4021fw_exit:
4022 release_firmware(fw);
4023 return status;
4024}
4025
a77dcb8c
AK
4026static int be_ndo_bridge_setlink(struct net_device *dev,
4027 struct nlmsghdr *nlh)
4028{
4029 struct be_adapter *adapter = netdev_priv(dev);
4030 struct nlattr *attr, *br_spec;
4031 int rem;
4032 int status = 0;
4033 u16 mode = 0;
4034
4035 if (!sriov_enabled(adapter))
4036 return -EOPNOTSUPP;
4037
4038 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
4039
4040 nla_for_each_nested(attr, br_spec, rem) {
4041 if (nla_type(attr) != IFLA_BRIDGE_MODE)
4042 continue;
4043
4044 mode = nla_get_u16(attr);
4045 if (mode != BRIDGE_MODE_VEPA && mode != BRIDGE_MODE_VEB)
4046 return -EINVAL;
4047
4048 status = be_cmd_set_hsw_config(adapter, 0, 0,
4049 adapter->if_handle,
4050 mode == BRIDGE_MODE_VEPA ?
4051 PORT_FWD_TYPE_VEPA :
4052 PORT_FWD_TYPE_VEB);
4053 if (status)
4054 goto err;
4055
4056 dev_info(&adapter->pdev->dev, "enabled switch mode: %s\n",
4057 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
4058
4059 return status;
4060 }
4061err:
4062 dev_err(&adapter->pdev->dev, "Failed to set switch mode %s\n",
4063 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
4064
4065 return status;
4066}
4067
4068static int be_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
4069 struct net_device *dev,
4070 u32 filter_mask)
4071{
4072 struct be_adapter *adapter = netdev_priv(dev);
4073 int status = 0;
4074 u8 hsw_mode;
4075
4076 if (!sriov_enabled(adapter))
4077 return 0;
4078
4079 /* BE and Lancer chips support VEB mode only */
4080 if (BEx_chip(adapter) || lancer_chip(adapter)) {
4081 hsw_mode = PORT_FWD_TYPE_VEB;
4082 } else {
4083 status = be_cmd_get_hsw_config(adapter, NULL, 0,
4084 adapter->if_handle, &hsw_mode);
4085 if (status)
4086 return 0;
4087 }
4088
4089 return ndo_dflt_bridge_getlink(skb, pid, seq, dev,
4090 hsw_mode == PORT_FWD_TYPE_VEPA ?
4091 BRIDGE_MODE_VEPA : BRIDGE_MODE_VEB);
4092}
4093
e5686ad8 4094static const struct net_device_ops be_netdev_ops = {
6b7c5b94
SP
4095 .ndo_open = be_open,
4096 .ndo_stop = be_close,
4097 .ndo_start_xmit = be_xmit,
a54769f5 4098 .ndo_set_rx_mode = be_set_rx_mode,
6b7c5b94
SP
4099 .ndo_set_mac_address = be_mac_addr_set,
4100 .ndo_change_mtu = be_change_mtu,
ab1594e9 4101 .ndo_get_stats64 = be_get_stats64,
6b7c5b94 4102 .ndo_validate_addr = eth_validate_addr,
6b7c5b94
SP
4103 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
4104 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
64600ea5 4105 .ndo_set_vf_mac = be_set_vf_mac,
1da87b7f 4106 .ndo_set_vf_vlan = be_set_vf_vlan,
e1d18735 4107 .ndo_set_vf_tx_rate = be_set_vf_tx_rate,
66268739
IV
4108 .ndo_get_vf_config = be_get_vf_config,
4109#ifdef CONFIG_NET_POLL_CONTROLLER
4110 .ndo_poll_controller = be_netpoll,
4111#endif
a77dcb8c
AK
4112 .ndo_bridge_setlink = be_ndo_bridge_setlink,
4113 .ndo_bridge_getlink = be_ndo_bridge_getlink,
6384a4d0
SP
4114#ifdef CONFIG_NET_RX_BUSY_POLL
4115 .ndo_busy_poll = be_busy_poll
4116#endif
6b7c5b94
SP
4117};
4118
4119static void be_netdev_init(struct net_device *netdev)
4120{
4121 struct be_adapter *adapter = netdev_priv(netdev);
4122
6332c8d3 4123 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
8b8ddc68 4124 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
f646968f 4125 NETIF_F_HW_VLAN_CTAG_TX;
8b8ddc68
MM
4126 if (be_multi_rxq(adapter))
4127 netdev->hw_features |= NETIF_F_RXHASH;
6332c8d3
MM
4128
4129 netdev->features |= netdev->hw_features |
f646968f 4130 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER;
4b972914 4131
eb8a50d9 4132 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
79032644 4133 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
51c59870 4134
fbc13f01
AK
4135 netdev->priv_flags |= IFF_UNICAST_FLT;
4136
6b7c5b94
SP
4137 netdev->flags |= IFF_MULTICAST;
4138
b7e5887e 4139 netif_set_gso_max_size(netdev, 65535 - ETH_HLEN);
c190e3c8 4140
10ef9ab4 4141 netdev->netdev_ops = &be_netdev_ops;
6b7c5b94
SP
4142
4143 SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
6b7c5b94
SP
4144}
4145
4146static void be_unmap_pci_bars(struct be_adapter *adapter)
4147{
c5b3ad4c
SP
4148 if (adapter->csr)
4149 pci_iounmap(adapter->pdev, adapter->csr);
8788fdc2 4150 if (adapter->db)
ce66f781 4151 pci_iounmap(adapter->pdev, adapter->db);
045508a8
PP
4152}
4153
ce66f781
SP
4154static int db_bar(struct be_adapter *adapter)
4155{
4156 if (lancer_chip(adapter) || !be_physfn(adapter))
4157 return 0;
4158 else
4159 return 4;
4160}
4161
4162static int be_roce_map_pci_bars(struct be_adapter *adapter)
045508a8 4163{
dbf0f2a7 4164 if (skyhawk_chip(adapter)) {
ce66f781
SP
4165 adapter->roce_db.size = 4096;
4166 adapter->roce_db.io_addr = pci_resource_start(adapter->pdev,
4167 db_bar(adapter));
4168 adapter->roce_db.total_size = pci_resource_len(adapter->pdev,
4169 db_bar(adapter));
4170 }
045508a8 4171 return 0;
6b7c5b94
SP
4172}
4173
4174static int be_map_pci_bars(struct be_adapter *adapter)
4175{
4176 u8 __iomem *addr;
fe6d2a38 4177
c5b3ad4c
SP
4178 if (BEx_chip(adapter) && be_physfn(adapter)) {
4179 adapter->csr = pci_iomap(adapter->pdev, 2, 0);
4180 if (adapter->csr == NULL)
4181 return -ENOMEM;
4182 }
4183
ce66f781 4184 addr = pci_iomap(adapter->pdev, db_bar(adapter), 0);
6b7c5b94
SP
4185 if (addr == NULL)
4186 goto pci_map_err;
ba343c77 4187 adapter->db = addr;
ce66f781
SP
4188
4189 be_roce_map_pci_bars(adapter);
6b7c5b94 4190 return 0;
ce66f781 4191
6b7c5b94
SP
4192pci_map_err:
4193 be_unmap_pci_bars(adapter);
4194 return -ENOMEM;
4195}
4196
6b7c5b94
SP
4197static void be_ctrl_cleanup(struct be_adapter *adapter)
4198{
8788fdc2 4199 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
6b7c5b94
SP
4200
4201 be_unmap_pci_bars(adapter);
4202
4203 if (mem->va)
2b7bcebf
IV
4204 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
4205 mem->dma);
e7b909a6 4206
5b8821b7 4207 mem = &adapter->rx_filter;
e7b909a6 4208 if (mem->va)
2b7bcebf
IV
4209 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
4210 mem->dma);
6b7c5b94
SP
4211}
4212
6b7c5b94
SP
4213static int be_ctrl_init(struct be_adapter *adapter)
4214{
8788fdc2
SP
4215 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
4216 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
5b8821b7 4217 struct be_dma_mem *rx_filter = &adapter->rx_filter;
ce66f781 4218 u32 sli_intf;
6b7c5b94 4219 int status;
6b7c5b94 4220
ce66f781
SP
4221 pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
4222 adapter->sli_family = (sli_intf & SLI_INTF_FAMILY_MASK) >>
4223 SLI_INTF_FAMILY_SHIFT;
4224 adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
4225
6b7c5b94
SP
4226 status = be_map_pci_bars(adapter);
4227 if (status)
e7b909a6 4228 goto done;
6b7c5b94
SP
4229
4230 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
2b7bcebf
IV
4231 mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
4232 mbox_mem_alloc->size,
4233 &mbox_mem_alloc->dma,
4234 GFP_KERNEL);
6b7c5b94 4235 if (!mbox_mem_alloc->va) {
e7b909a6
SP
4236 status = -ENOMEM;
4237 goto unmap_pci_bars;
6b7c5b94
SP
4238 }
4239 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
4240 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
4241 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
4242 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
e7b909a6 4243
5b8821b7 4244 rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
ede23fa8
JP
4245 rx_filter->va = dma_zalloc_coherent(&adapter->pdev->dev,
4246 rx_filter->size, &rx_filter->dma,
4247 GFP_KERNEL);
5b8821b7 4248 if (rx_filter->va == NULL) {
e7b909a6
SP
4249 status = -ENOMEM;
4250 goto free_mbox;
4251 }
1f9061d2 4252
2984961c 4253 mutex_init(&adapter->mbox_lock);
8788fdc2
SP
4254 spin_lock_init(&adapter->mcc_lock);
4255 spin_lock_init(&adapter->mcc_cq_lock);
a8f447bd 4256
5eeff635 4257 init_completion(&adapter->et_cmd_compl);
cf588477 4258 pci_save_state(adapter->pdev);
6b7c5b94 4259 return 0;
e7b909a6
SP
4260
4261free_mbox:
2b7bcebf
IV
4262 dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
4263 mbox_mem_alloc->va, mbox_mem_alloc->dma);
e7b909a6
SP
4264
4265unmap_pci_bars:
4266 be_unmap_pci_bars(adapter);
4267
4268done:
4269 return status;
6b7c5b94
SP
4270}
4271
4272static void be_stats_cleanup(struct be_adapter *adapter)
4273{
3abcdeda 4274 struct be_dma_mem *cmd = &adapter->stats_cmd;
6b7c5b94
SP
4275
4276 if (cmd->va)
2b7bcebf
IV
4277 dma_free_coherent(&adapter->pdev->dev, cmd->size,
4278 cmd->va, cmd->dma);
6b7c5b94
SP
4279}
4280
4281static int be_stats_init(struct be_adapter *adapter)
4282{
3abcdeda 4283 struct be_dma_mem *cmd = &adapter->stats_cmd;
6b7c5b94 4284
ca34fe38
SP
4285 if (lancer_chip(adapter))
4286 cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
4287 else if (BE2_chip(adapter))
89a88ab8 4288 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
61000861 4289 else if (BE3_chip(adapter))
ca34fe38 4290 cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
61000861
AK
4291 else
4292 /* ALL non-BE ASICs */
4293 cmd->size = sizeof(struct be_cmd_req_get_stats_v2);
ca34fe38 4294
ede23fa8
JP
4295 cmd->va = dma_zalloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
4296 GFP_KERNEL);
6b7c5b94
SP
4297 if (cmd->va == NULL)
4298 return -1;
4299 return 0;
4300}
4301
3bc6b06c 4302static void be_remove(struct pci_dev *pdev)
6b7c5b94
SP
4303{
4304 struct be_adapter *adapter = pci_get_drvdata(pdev);
8d56ff11 4305
6b7c5b94
SP
4306 if (!adapter)
4307 return;
4308
045508a8 4309 be_roce_dev_remove(adapter);
8cef7a78 4310 be_intr_set(adapter, false);
045508a8 4311
f67ef7ba
PR
4312 cancel_delayed_work_sync(&adapter->func_recovery_work);
4313
6b7c5b94
SP
4314 unregister_netdev(adapter->netdev);
4315
5fb379ee
SP
4316 be_clear(adapter);
4317
bf99e50d
PR
4318 /* tell fw we're done with firing cmds */
4319 be_cmd_fw_clean(adapter);
4320
6b7c5b94
SP
4321 be_stats_cleanup(adapter);
4322
4323 be_ctrl_cleanup(adapter);
4324
d6b6d987
SP
4325 pci_disable_pcie_error_reporting(pdev);
4326
6b7c5b94
SP
4327 pci_release_regions(pdev);
4328 pci_disable_device(pdev);
4329
4330 free_netdev(adapter->netdev);
4331}
4332
39f1d94d 4333static int be_get_initial_config(struct be_adapter *adapter)
6b7c5b94 4334{
baaa08d1 4335 int status, level;
6b7c5b94 4336
9e1453c5
AK
4337 status = be_cmd_get_cntl_attributes(adapter);
4338 if (status)
4339 return status;
4340
7aeb2156
PR
4341 /* Must be a power of 2 or else MODULO will BUG_ON */
4342 adapter->be_get_temp_freq = 64;
4343
baaa08d1
VV
4344 if (BEx_chip(adapter)) {
4345 level = be_cmd_get_fw_log_level(adapter);
4346 adapter->msg_enable =
4347 level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0;
4348 }
941a77d5 4349
92bf14ab 4350 adapter->cfg_num_qs = netif_get_num_default_rss_queues();
2243e2e9 4351 return 0;
6b7c5b94
SP
4352}
4353
f67ef7ba 4354static int lancer_recover_func(struct be_adapter *adapter)
d8110f62 4355{
01e5b2c4 4356 struct device *dev = &adapter->pdev->dev;
d8110f62 4357 int status;
d8110f62 4358
f67ef7ba
PR
4359 status = lancer_test_and_set_rdy_state(adapter);
4360 if (status)
4361 goto err;
d8110f62 4362
f67ef7ba
PR
4363 if (netif_running(adapter->netdev))
4364 be_close(adapter->netdev);
d8110f62 4365
f67ef7ba
PR
4366 be_clear(adapter);
4367
01e5b2c4 4368 be_clear_all_error(adapter);
f67ef7ba
PR
4369
4370 status = be_setup(adapter);
4371 if (status)
4372 goto err;
d8110f62 4373
f67ef7ba
PR
4374 if (netif_running(adapter->netdev)) {
4375 status = be_open(adapter->netdev);
d8110f62
PR
4376 if (status)
4377 goto err;
f67ef7ba 4378 }
d8110f62 4379
4bebb56a 4380 dev_err(dev, "Adapter recovery successful\n");
f67ef7ba
PR
4381 return 0;
4382err:
01e5b2c4
SK
4383 if (status == -EAGAIN)
4384 dev_err(dev, "Waiting for resource provisioning\n");
4385 else
4bebb56a 4386 dev_err(dev, "Adapter recovery failed\n");
d8110f62 4387
f67ef7ba
PR
4388 return status;
4389}
4390
4391static void be_func_recovery_task(struct work_struct *work)
4392{
4393 struct be_adapter *adapter =
4394 container_of(work, struct be_adapter, func_recovery_work.work);
01e5b2c4 4395 int status = 0;
d8110f62 4396
f67ef7ba 4397 be_detect_error(adapter);
d8110f62 4398
f67ef7ba 4399 if (adapter->hw_error && lancer_chip(adapter)) {
d8110f62 4400
f67ef7ba
PR
4401 rtnl_lock();
4402 netif_device_detach(adapter->netdev);
4403 rtnl_unlock();
d8110f62 4404
f67ef7ba 4405 status = lancer_recover_func(adapter);
f67ef7ba
PR
4406 if (!status)
4407 netif_device_attach(adapter->netdev);
d8110f62 4408 }
f67ef7ba 4409
01e5b2c4
SK
4410 /* In Lancer, for all errors other than provisioning error (-EAGAIN),
4411 * no need to attempt further recovery.
4412 */
4413 if (!status || status == -EAGAIN)
4414 schedule_delayed_work(&adapter->func_recovery_work,
4415 msecs_to_jiffies(1000));
d8110f62
PR
4416}
4417
4418static void be_worker(struct work_struct *work)
4419{
4420 struct be_adapter *adapter =
4421 container_of(work, struct be_adapter, work.work);
4422 struct be_rx_obj *rxo;
4423 int i;
4424
d8110f62
PR
4425 /* when interrupts are not yet enabled, just reap any pending
4426 * mcc completions */
4427 if (!netif_running(adapter->netdev)) {
072a9c48 4428 local_bh_disable();
10ef9ab4 4429 be_process_mcc(adapter);
072a9c48 4430 local_bh_enable();
d8110f62
PR
4431 goto reschedule;
4432 }
4433
4434 if (!adapter->stats_cmd_sent) {
4435 if (lancer_chip(adapter))
4436 lancer_cmd_get_pport_stats(adapter,
4437 &adapter->stats_cmd);
4438 else
4439 be_cmd_get_stats(adapter, &adapter->stats_cmd);
4440 }
4441
d696b5e2
VV
4442 if (be_physfn(adapter) &&
4443 MODULO(adapter->work_counter, adapter->be_get_temp_freq) == 0)
7aeb2156
PR
4444 be_cmd_get_die_temperature(adapter);
4445
d8110f62 4446 for_all_rx_queues(adapter, rxo, i) {
6384a4d0
SP
4447 /* Replenish RX-queues starved due to memory
4448 * allocation failures.
4449 */
4450 if (rxo->rx_post_starved)
d8110f62 4451 be_post_rx_frags(rxo, GFP_KERNEL);
d8110f62
PR
4452 }
4453
2632bafd 4454 be_eqd_update(adapter);
10ef9ab4 4455
d8110f62
PR
4456reschedule:
4457 adapter->work_counter++;
4458 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
4459}
4460
257a3feb 4461/* If any VFs are already enabled don't FLR the PF */
39f1d94d
SP
4462static bool be_reset_required(struct be_adapter *adapter)
4463{
257a3feb 4464 return pci_num_vf(adapter->pdev) ? false : true;
39f1d94d
SP
4465}
4466
d379142b
SP
4467static char *mc_name(struct be_adapter *adapter)
4468{
f93f160b
VV
4469 char *str = ""; /* default */
4470
4471 switch (adapter->mc_type) {
4472 case UMC:
4473 str = "UMC";
4474 break;
4475 case FLEX10:
4476 str = "FLEX10";
4477 break;
4478 case vNIC1:
4479 str = "vNIC-1";
4480 break;
4481 case nPAR:
4482 str = "nPAR";
4483 break;
4484 case UFP:
4485 str = "UFP";
4486 break;
4487 case vNIC2:
4488 str = "vNIC-2";
4489 break;
4490 default:
4491 str = "";
4492 }
4493
4494 return str;
d379142b
SP
4495}
4496
4497static inline char *func_name(struct be_adapter *adapter)
4498{
4499 return be_physfn(adapter) ? "PF" : "VF";
4500}
4501
1dd06ae8 4502static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id)
6b7c5b94
SP
4503{
4504 int status = 0;
4505 struct be_adapter *adapter;
4506 struct net_device *netdev;
b4e32a71 4507 char port_name;
6b7c5b94
SP
4508
4509 status = pci_enable_device(pdev);
4510 if (status)
4511 goto do_none;
4512
4513 status = pci_request_regions(pdev, DRV_NAME);
4514 if (status)
4515 goto disable_dev;
4516 pci_set_master(pdev);
4517
7f640062 4518 netdev = alloc_etherdev_mqs(sizeof(*adapter), MAX_TX_QS, MAX_RX_QS);
6b7c5b94
SP
4519 if (netdev == NULL) {
4520 status = -ENOMEM;
4521 goto rel_reg;
4522 }
4523 adapter = netdev_priv(netdev);
4524 adapter->pdev = pdev;
4525 pci_set_drvdata(pdev, adapter);
4526 adapter->netdev = netdev;
2243e2e9 4527 SET_NETDEV_DEV(netdev, &pdev->dev);
6b7c5b94 4528
4c15c243 4529 status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
6b7c5b94
SP
4530 if (!status) {
4531 netdev->features |= NETIF_F_HIGHDMA;
4532 } else {
4c15c243 4533 status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
6b7c5b94
SP
4534 if (status) {
4535 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
4536 goto free_netdev;
4537 }
4538 }
4539
ea58c180
AK
4540 if (be_physfn(adapter)) {
4541 status = pci_enable_pcie_error_reporting(pdev);
4542 if (!status)
4543 dev_info(&pdev->dev, "PCIe error reporting enabled\n");
4544 }
d6b6d987 4545
6b7c5b94
SP
4546 status = be_ctrl_init(adapter);
4547 if (status)
39f1d94d 4548 goto free_netdev;
6b7c5b94 4549
2243e2e9 4550 /* sync up with fw's ready state */
ba343c77 4551 if (be_physfn(adapter)) {
bf99e50d 4552 status = be_fw_wait_ready(adapter);
ba343c77
SB
4553 if (status)
4554 goto ctrl_clean;
ba343c77 4555 }
6b7c5b94 4556
39f1d94d
SP
4557 if (be_reset_required(adapter)) {
4558 status = be_cmd_reset_function(adapter);
4559 if (status)
4560 goto ctrl_clean;
556ae191 4561
2d177be8
KA
4562 /* Wait for interrupts to quiesce after an FLR */
4563 msleep(100);
4564 }
8cef7a78
SK
4565
4566 /* Allow interrupts for other ULPs running on NIC function */
4567 be_intr_set(adapter, true);
10ef9ab4 4568
2d177be8
KA
4569 /* tell fw we're ready to fire cmds */
4570 status = be_cmd_fw_init(adapter);
4571 if (status)
4572 goto ctrl_clean;
4573
2243e2e9
SP
4574 status = be_stats_init(adapter);
4575 if (status)
4576 goto ctrl_clean;
4577
39f1d94d 4578 status = be_get_initial_config(adapter);
6b7c5b94
SP
4579 if (status)
4580 goto stats_clean;
6b7c5b94
SP
4581
4582 INIT_DELAYED_WORK(&adapter->work, be_worker);
f67ef7ba 4583 INIT_DELAYED_WORK(&adapter->func_recovery_work, be_func_recovery_task);
a54769f5 4584 adapter->rx_fc = adapter->tx_fc = true;
6b7c5b94 4585
5fb379ee
SP
4586 status = be_setup(adapter);
4587 if (status)
55f5c3c5 4588 goto stats_clean;
2243e2e9 4589
3abcdeda 4590 be_netdev_init(netdev);
6b7c5b94
SP
4591 status = register_netdev(netdev);
4592 if (status != 0)
5fb379ee 4593 goto unsetup;
6b7c5b94 4594
045508a8
PP
4595 be_roce_dev_add(adapter);
4596
f67ef7ba
PR
4597 schedule_delayed_work(&adapter->func_recovery_work,
4598 msecs_to_jiffies(1000));
b4e32a71
PR
4599
4600 be_cmd_query_port_name(adapter, &port_name);
4601
d379142b
SP
4602 dev_info(&pdev->dev, "%s: %s %s port %c\n", nic_name(pdev),
4603 func_name(adapter), mc_name(adapter), port_name);
34b1ef04 4604
6b7c5b94
SP
4605 return 0;
4606
5fb379ee
SP
4607unsetup:
4608 be_clear(adapter);
6b7c5b94
SP
4609stats_clean:
4610 be_stats_cleanup(adapter);
4611ctrl_clean:
4612 be_ctrl_cleanup(adapter);
f9449ab7 4613free_netdev:
fe6d2a38 4614 free_netdev(netdev);
6b7c5b94
SP
4615rel_reg:
4616 pci_release_regions(pdev);
4617disable_dev:
4618 pci_disable_device(pdev);
4619do_none:
c4ca2374 4620 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
6b7c5b94
SP
4621 return status;
4622}
4623
4624static int be_suspend(struct pci_dev *pdev, pm_message_t state)
4625{
4626 struct be_adapter *adapter = pci_get_drvdata(pdev);
4627 struct net_device *netdev = adapter->netdev;
4628
76a9e08e 4629 if (adapter->wol_en)
71d8d1b5
AK
4630 be_setup_wol(adapter, true);
4631
d4360d6f 4632 be_intr_set(adapter, false);
f67ef7ba
PR
4633 cancel_delayed_work_sync(&adapter->func_recovery_work);
4634
6b7c5b94
SP
4635 netif_device_detach(netdev);
4636 if (netif_running(netdev)) {
4637 rtnl_lock();
4638 be_close(netdev);
4639 rtnl_unlock();
4640 }
9b0365f1 4641 be_clear(adapter);
6b7c5b94
SP
4642
4643 pci_save_state(pdev);
4644 pci_disable_device(pdev);
4645 pci_set_power_state(pdev, pci_choose_state(pdev, state));
4646 return 0;
4647}
4648
4649static int be_resume(struct pci_dev *pdev)
4650{
4651 int status = 0;
4652 struct be_adapter *adapter = pci_get_drvdata(pdev);
4653 struct net_device *netdev = adapter->netdev;
4654
4655 netif_device_detach(netdev);
4656
4657 status = pci_enable_device(pdev);
4658 if (status)
4659 return status;
4660
1ca01512 4661 pci_set_power_state(pdev, PCI_D0);
6b7c5b94
SP
4662 pci_restore_state(pdev);
4663
dd5746bf
SB
4664 status = be_fw_wait_ready(adapter);
4665 if (status)
4666 return status;
4667
d4360d6f 4668 be_intr_set(adapter, true);
2243e2e9
SP
4669 /* tell fw we're ready to fire cmds */
4670 status = be_cmd_fw_init(adapter);
4671 if (status)
4672 return status;
4673
9b0365f1 4674 be_setup(adapter);
6b7c5b94
SP
4675 if (netif_running(netdev)) {
4676 rtnl_lock();
4677 be_open(netdev);
4678 rtnl_unlock();
4679 }
f67ef7ba
PR
4680
4681 schedule_delayed_work(&adapter->func_recovery_work,
4682 msecs_to_jiffies(1000));
6b7c5b94 4683 netif_device_attach(netdev);
71d8d1b5 4684
76a9e08e 4685 if (adapter->wol_en)
71d8d1b5 4686 be_setup_wol(adapter, false);
a4ca055f 4687
6b7c5b94
SP
4688 return 0;
4689}
4690
82456b03
SP
4691/*
4692 * An FLR will stop BE from DMAing any data.
4693 */
4694static void be_shutdown(struct pci_dev *pdev)
4695{
4696 struct be_adapter *adapter = pci_get_drvdata(pdev);
82456b03 4697
2d5d4154
AK
4698 if (!adapter)
4699 return;
82456b03 4700
0f4a6828 4701 cancel_delayed_work_sync(&adapter->work);
f67ef7ba 4702 cancel_delayed_work_sync(&adapter->func_recovery_work);
a4ca055f 4703
2d5d4154 4704 netif_device_detach(adapter->netdev);
82456b03 4705
57841869
AK
4706 be_cmd_reset_function(adapter);
4707
82456b03 4708 pci_disable_device(pdev);
82456b03
SP
4709}
4710
cf588477
SP
4711static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
4712 pci_channel_state_t state)
4713{
4714 struct be_adapter *adapter = pci_get_drvdata(pdev);
4715 struct net_device *netdev = adapter->netdev;
4716
4717 dev_err(&adapter->pdev->dev, "EEH error detected\n");
4718
01e5b2c4
SK
4719 if (!adapter->eeh_error) {
4720 adapter->eeh_error = true;
cf588477 4721
01e5b2c4 4722 cancel_delayed_work_sync(&adapter->func_recovery_work);
cf588477 4723
cf588477 4724 rtnl_lock();
01e5b2c4
SK
4725 netif_device_detach(netdev);
4726 if (netif_running(netdev))
4727 be_close(netdev);
cf588477 4728 rtnl_unlock();
01e5b2c4
SK
4729
4730 be_clear(adapter);
cf588477 4731 }
cf588477
SP
4732
4733 if (state == pci_channel_io_perm_failure)
4734 return PCI_ERS_RESULT_DISCONNECT;
4735
4736 pci_disable_device(pdev);
4737
eeb7fc7b
SK
4738 /* The error could cause the FW to trigger a flash debug dump.
4739 * Resetting the card while flash dump is in progress
c8a54163
PR
4740 * can cause it not to recover; wait for it to finish.
4741 * Wait only for first function as it is needed only once per
4742 * adapter.
eeb7fc7b 4743 */
c8a54163
PR
4744 if (pdev->devfn == 0)
4745 ssleep(30);
4746
cf588477
SP
4747 return PCI_ERS_RESULT_NEED_RESET;
4748}
4749
4750static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
4751{
4752 struct be_adapter *adapter = pci_get_drvdata(pdev);
4753 int status;
4754
4755 dev_info(&adapter->pdev->dev, "EEH reset\n");
cf588477
SP
4756
4757 status = pci_enable_device(pdev);
4758 if (status)
4759 return PCI_ERS_RESULT_DISCONNECT;
4760
4761 pci_set_master(pdev);
1ca01512 4762 pci_set_power_state(pdev, PCI_D0);
cf588477
SP
4763 pci_restore_state(pdev);
4764
4765 /* Check if card is ok and fw is ready */
c5b3ad4c
SP
4766 dev_info(&adapter->pdev->dev,
4767 "Waiting for FW to be ready after EEH reset\n");
bf99e50d 4768 status = be_fw_wait_ready(adapter);
cf588477
SP
4769 if (status)
4770 return PCI_ERS_RESULT_DISCONNECT;
4771
d6b6d987 4772 pci_cleanup_aer_uncorrect_error_status(pdev);
01e5b2c4 4773 be_clear_all_error(adapter);
cf588477
SP
4774 return PCI_ERS_RESULT_RECOVERED;
4775}
4776
4777static void be_eeh_resume(struct pci_dev *pdev)
4778{
4779 int status = 0;
4780 struct be_adapter *adapter = pci_get_drvdata(pdev);
4781 struct net_device *netdev = adapter->netdev;
4782
4783 dev_info(&adapter->pdev->dev, "EEH resume\n");
4784
4785 pci_save_state(pdev);
4786
2d177be8 4787 status = be_cmd_reset_function(adapter);
cf588477
SP
4788 if (status)
4789 goto err;
4790
2d177be8
KA
4791 /* tell fw we're ready to fire cmds */
4792 status = be_cmd_fw_init(adapter);
bf99e50d
PR
4793 if (status)
4794 goto err;
4795
cf588477
SP
4796 status = be_setup(adapter);
4797 if (status)
4798 goto err;
4799
4800 if (netif_running(netdev)) {
4801 status = be_open(netdev);
4802 if (status)
4803 goto err;
4804 }
f67ef7ba
PR
4805
4806 schedule_delayed_work(&adapter->func_recovery_work,
4807 msecs_to_jiffies(1000));
cf588477
SP
4808 netif_device_attach(netdev);
4809 return;
4810err:
4811 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
cf588477
SP
4812}
4813
3646f0e5 4814static const struct pci_error_handlers be_eeh_handlers = {
cf588477
SP
4815 .error_detected = be_eeh_err_detected,
4816 .slot_reset = be_eeh_reset,
4817 .resume = be_eeh_resume,
4818};
4819
6b7c5b94
SP
4820static struct pci_driver be_driver = {
4821 .name = DRV_NAME,
4822 .id_table = be_dev_ids,
4823 .probe = be_probe,
4824 .remove = be_remove,
4825 .suspend = be_suspend,
cf588477 4826 .resume = be_resume,
82456b03 4827 .shutdown = be_shutdown,
cf588477 4828 .err_handler = &be_eeh_handlers
6b7c5b94
SP
4829};
4830
4831static int __init be_init_module(void)
4832{
8e95a202
JP
4833 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
4834 rx_frag_size != 2048) {
6b7c5b94
SP
4835 printk(KERN_WARNING DRV_NAME
4836 " : Module param rx_frag_size must be 2048/4096/8192."
4837 " Using 2048\n");
4838 rx_frag_size = 2048;
4839 }
6b7c5b94
SP
4840
4841 return pci_register_driver(&be_driver);
4842}
4843module_init(be_init_module);
4844
4845static void __exit be_exit_module(void)
4846{
4847 pci_unregister_driver(&be_driver);
4848}
4849module_exit(be_exit_module);