be2net: define macro for_all_tx_queues_on_eq()
[linux-2.6-block.git] / drivers / net / ethernet / emulex / benet / be_main.c
CommitLineData
6b7c5b94 1/*
40263820 2 * Copyright (C) 2005 - 2014 Emulex
6b7c5b94
SP
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
d2145cde 11 * linux-drivers@emulex.com
6b7c5b94 12 *
d2145cde
AK
13 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
6b7c5b94
SP
16 */
17
70c71606 18#include <linux/prefetch.h>
9d9779e7 19#include <linux/module.h>
6b7c5b94 20#include "be.h"
8788fdc2 21#include "be_cmds.h"
65f71b8b 22#include <asm/div64.h>
d6b6d987 23#include <linux/aer.h>
a77dcb8c 24#include <linux/if_bridge.h>
6384a4d0 25#include <net/busy_poll.h>
c9c47142 26#include <net/vxlan.h>
6b7c5b94
SP
27
28MODULE_VERSION(DRV_VER);
29MODULE_DEVICE_TABLE(pci, be_dev_ids);
30MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
00d3d51e 31MODULE_AUTHOR("Emulex Corporation");
6b7c5b94
SP
32MODULE_LICENSE("GPL");
33
ba343c77 34static unsigned int num_vfs;
ba343c77 35module_param(num_vfs, uint, S_IRUGO);
ba343c77 36MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
6b7c5b94 37
11ac75ed
SP
38static ushort rx_frag_size = 2048;
39module_param(rx_frag_size, ushort, S_IRUGO);
40MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
41
9baa3c34 42static const struct pci_device_id be_dev_ids[] = {
c4ca2374 43 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
59fd5d87 44 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
c4ca2374
AK
45 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
46 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
fe6d2a38 47 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
12f4d0a8 48 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
ecedb6ae 49 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
76b73530 50 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID6)},
6b7c5b94
SP
51 { 0 }
52};
53MODULE_DEVICE_TABLE(pci, be_dev_ids);
7c185276 54/* UE Status Low CSR */
42c8b11e 55static const char * const ue_status_low_desc[] = {
7c185276
AK
56 "CEV",
57 "CTX",
58 "DBUF",
59 "ERX",
60 "Host",
61 "MPU",
62 "NDMA",
63 "PTC ",
64 "RDMA ",
65 "RXF ",
66 "RXIPS ",
67 "RXULP0 ",
68 "RXULP1 ",
69 "RXULP2 ",
70 "TIM ",
71 "TPOST ",
72 "TPRE ",
73 "TXIPS ",
74 "TXULP0 ",
75 "TXULP1 ",
76 "UC ",
77 "WDMA ",
78 "TXULP2 ",
79 "HOST1 ",
80 "P0_OB_LINK ",
81 "P1_OB_LINK ",
82 "HOST_GPIO ",
83 "MBOX ",
6bdf8f55
VV
84 "ERX2 ",
85 "SPARE ",
86 "JTAG ",
87 "MPU_INTPEND "
7c185276
AK
88};
89/* UE Status High CSR */
42c8b11e 90static const char * const ue_status_hi_desc[] = {
7c185276
AK
91 "LPCMEMHOST",
92 "MGMT_MAC",
93 "PCS0ONLINE",
94 "MPU_IRAM",
95 "PCS1ONLINE",
96 "PCTL0",
97 "PCTL1",
98 "PMEM",
99 "RR",
100 "TXPB",
101 "RXPP",
102 "XAUI",
103 "TXP",
104 "ARM",
105 "IPC",
106 "HOST2",
107 "HOST3",
108 "HOST4",
109 "HOST5",
110 "HOST6",
111 "HOST7",
6bdf8f55
VV
112 "ECRC",
113 "Poison TLP",
42c8b11e 114 "NETC",
6bdf8f55
VV
115 "PERIPH",
116 "LLTXULP",
117 "D2P",
118 "RCON",
119 "LDMA",
120 "LLTXP",
121 "LLTXPB",
7c185276
AK
122 "Unknown"
123};
6b7c5b94 124
752961a1 125
6b7c5b94
SP
126static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
127{
128 struct be_dma_mem *mem = &q->dma_mem;
1cfafab9 129 if (mem->va) {
2b7bcebf
IV
130 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
131 mem->dma);
1cfafab9
SP
132 mem->va = NULL;
133 }
6b7c5b94
SP
134}
135
136static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
748b539a 137 u16 len, u16 entry_size)
6b7c5b94
SP
138{
139 struct be_dma_mem *mem = &q->dma_mem;
140
141 memset(q, 0, sizeof(*q));
142 q->len = len;
143 q->entry_size = entry_size;
144 mem->size = len * entry_size;
ede23fa8
JP
145 mem->va = dma_zalloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
146 GFP_KERNEL);
6b7c5b94 147 if (!mem->va)
10ef9ab4 148 return -ENOMEM;
6b7c5b94
SP
149 return 0;
150}
151
68c45a2d 152static void be_reg_intr_set(struct be_adapter *adapter, bool enable)
6b7c5b94 153{
db3ea781 154 u32 reg, enabled;
5f0b849e 155
db3ea781 156 pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
748b539a 157 &reg);
db3ea781
SP
158 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
159
5f0b849e 160 if (!enabled && enable)
6b7c5b94 161 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
5f0b849e 162 else if (enabled && !enable)
6b7c5b94 163 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
5f0b849e 164 else
6b7c5b94 165 return;
5f0b849e 166
db3ea781 167 pci_write_config_dword(adapter->pdev,
748b539a 168 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
6b7c5b94
SP
169}
170
68c45a2d
SK
171static void be_intr_set(struct be_adapter *adapter, bool enable)
172{
173 int status = 0;
174
175 /* On lancer interrupts can't be controlled via this register */
176 if (lancer_chip(adapter))
177 return;
178
179 if (adapter->eeh_error)
180 return;
181
182 status = be_cmd_intr_set(adapter, enable);
183 if (status)
184 be_reg_intr_set(adapter, enable);
185}
186
8788fdc2 187static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
6b7c5b94
SP
188{
189 u32 val = 0;
190 val |= qid & DB_RQ_RING_ID_MASK;
191 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
f3eb62d2
SP
192
193 wmb();
8788fdc2 194 iowrite32(val, adapter->db + DB_RQ_OFFSET);
6b7c5b94
SP
195}
196
94d73aaa
VV
197static void be_txq_notify(struct be_adapter *adapter, struct be_tx_obj *txo,
198 u16 posted)
6b7c5b94
SP
199{
200 u32 val = 0;
94d73aaa 201 val |= txo->q.id & DB_TXULP_RING_ID_MASK;
6b7c5b94 202 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
f3eb62d2
SP
203
204 wmb();
94d73aaa 205 iowrite32(val, adapter->db + txo->db_offset);
6b7c5b94
SP
206}
207
8788fdc2 208static void be_eq_notify(struct be_adapter *adapter, u16 qid,
748b539a 209 bool arm, bool clear_int, u16 num_popped)
6b7c5b94
SP
210{
211 u32 val = 0;
212 val |= qid & DB_EQ_RING_ID_MASK;
748b539a 213 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) << DB_EQ_RING_ID_EXT_MASK_SHIFT);
cf588477 214
f67ef7ba 215 if (adapter->eeh_error)
cf588477
SP
216 return;
217
6b7c5b94
SP
218 if (arm)
219 val |= 1 << DB_EQ_REARM_SHIFT;
220 if (clear_int)
221 val |= 1 << DB_EQ_CLR_SHIFT;
222 val |= 1 << DB_EQ_EVNT_SHIFT;
223 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
8788fdc2 224 iowrite32(val, adapter->db + DB_EQ_OFFSET);
6b7c5b94
SP
225}
226
8788fdc2 227void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
6b7c5b94
SP
228{
229 u32 val = 0;
230 val |= qid & DB_CQ_RING_ID_MASK;
fe6d2a38
SP
231 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
232 DB_CQ_RING_ID_EXT_MASK_SHIFT);
cf588477 233
f67ef7ba 234 if (adapter->eeh_error)
cf588477
SP
235 return;
236
6b7c5b94
SP
237 if (arm)
238 val |= 1 << DB_CQ_REARM_SHIFT;
239 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
8788fdc2 240 iowrite32(val, adapter->db + DB_CQ_OFFSET);
6b7c5b94
SP
241}
242
6b7c5b94
SP
243static int be_mac_addr_set(struct net_device *netdev, void *p)
244{
245 struct be_adapter *adapter = netdev_priv(netdev);
5a712c13 246 struct device *dev = &adapter->pdev->dev;
6b7c5b94 247 struct sockaddr *addr = p;
5a712c13
SP
248 int status;
249 u8 mac[ETH_ALEN];
250 u32 old_pmac_id = adapter->pmac_id[0], curr_pmac_id = 0;
6b7c5b94 251
ca9e4988
AK
252 if (!is_valid_ether_addr(addr->sa_data))
253 return -EADDRNOTAVAIL;
254
ff32f8ab
VV
255 /* Proceed further only if, User provided MAC is different
256 * from active MAC
257 */
258 if (ether_addr_equal(addr->sa_data, netdev->dev_addr))
259 return 0;
260
5a712c13
SP
261 /* The PMAC_ADD cmd may fail if the VF doesn't have FILTMGMT
262 * privilege or if PF did not provision the new MAC address.
263 * On BE3, this cmd will always fail if the VF doesn't have the
264 * FILTMGMT privilege. This failure is OK, only if the PF programmed
265 * the MAC for the VF.
704e4c88 266 */
5a712c13
SP
267 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
268 adapter->if_handle, &adapter->pmac_id[0], 0);
269 if (!status) {
270 curr_pmac_id = adapter->pmac_id[0];
271
272 /* Delete the old programmed MAC. This call may fail if the
273 * old MAC was already deleted by the PF driver.
274 */
275 if (adapter->pmac_id[0] != old_pmac_id)
276 be_cmd_pmac_del(adapter, adapter->if_handle,
277 old_pmac_id, 0);
704e4c88
PR
278 }
279
5a712c13
SP
280 /* Decide if the new MAC is successfully activated only after
281 * querying the FW
704e4c88 282 */
b188f090
SR
283 status = be_cmd_get_active_mac(adapter, curr_pmac_id, mac,
284 adapter->if_handle, true, 0);
a65027e4 285 if (status)
e3a7ae2c 286 goto err;
6b7c5b94 287
5a712c13
SP
288 /* The MAC change did not happen, either due to lack of privilege
289 * or PF didn't pre-provision.
290 */
61d23e9f 291 if (!ether_addr_equal(addr->sa_data, mac)) {
5a712c13
SP
292 status = -EPERM;
293 goto err;
294 }
295
e3a7ae2c 296 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
5a712c13 297 dev_info(dev, "MAC address changed to %pM\n", mac);
e3a7ae2c
SK
298 return 0;
299err:
5a712c13 300 dev_warn(dev, "MAC address change to %pM failed\n", addr->sa_data);
6b7c5b94
SP
301 return status;
302}
303
ca34fe38
SP
304/* BE2 supports only v0 cmd */
305static void *hw_stats_from_cmd(struct be_adapter *adapter)
306{
307 if (BE2_chip(adapter)) {
308 struct be_cmd_resp_get_stats_v0 *cmd = adapter->stats_cmd.va;
309
310 return &cmd->hw_stats;
61000861 311 } else if (BE3_chip(adapter)) {
ca34fe38
SP
312 struct be_cmd_resp_get_stats_v1 *cmd = adapter->stats_cmd.va;
313
61000861
AK
314 return &cmd->hw_stats;
315 } else {
316 struct be_cmd_resp_get_stats_v2 *cmd = adapter->stats_cmd.va;
317
ca34fe38
SP
318 return &cmd->hw_stats;
319 }
320}
321
322/* BE2 supports only v0 cmd */
323static void *be_erx_stats_from_cmd(struct be_adapter *adapter)
324{
325 if (BE2_chip(adapter)) {
326 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
327
328 return &hw_stats->erx;
61000861 329 } else if (BE3_chip(adapter)) {
ca34fe38
SP
330 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
331
61000861
AK
332 return &hw_stats->erx;
333 } else {
334 struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
335
ca34fe38
SP
336 return &hw_stats->erx;
337 }
338}
339
340static void populate_be_v0_stats(struct be_adapter *adapter)
89a88ab8 341{
ac124ff9
SP
342 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
343 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
344 struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
89a88ab8 345 struct be_port_rxf_stats_v0 *port_stats =
ac124ff9
SP
346 &rxf_stats->port[adapter->port_num];
347 struct be_drv_stats *drvs = &adapter->drv_stats;
89a88ab8 348
ac124ff9 349 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
89a88ab8
AK
350 drvs->rx_pause_frames = port_stats->rx_pause_frames;
351 drvs->rx_crc_errors = port_stats->rx_crc_errors;
352 drvs->rx_control_frames = port_stats->rx_control_frames;
353 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
354 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
355 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
356 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
357 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
358 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
359 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
360 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
361 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
362 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
363 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
ac124ff9 364 drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
89a88ab8
AK
365 drvs->rx_dropped_header_too_small =
366 port_stats->rx_dropped_header_too_small;
18fb06a1
SR
367 drvs->rx_address_filtered =
368 port_stats->rx_address_filtered +
369 port_stats->rx_vlan_filtered;
89a88ab8
AK
370 drvs->rx_alignment_symbol_errors =
371 port_stats->rx_alignment_symbol_errors;
372
373 drvs->tx_pauseframes = port_stats->tx_pauseframes;
374 drvs->tx_controlframes = port_stats->tx_controlframes;
375
376 if (adapter->port_num)
ac124ff9 377 drvs->jabber_events = rxf_stats->port1_jabber_events;
89a88ab8 378 else
ac124ff9 379 drvs->jabber_events = rxf_stats->port0_jabber_events;
89a88ab8 380 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
89a88ab8 381 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
89a88ab8
AK
382 drvs->forwarded_packets = rxf_stats->forwarded_packets;
383 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
ac124ff9
SP
384 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
385 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
89a88ab8
AK
386 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
387}
388
ca34fe38 389static void populate_be_v1_stats(struct be_adapter *adapter)
89a88ab8 390{
ac124ff9
SP
391 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
392 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
393 struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
89a88ab8 394 struct be_port_rxf_stats_v1 *port_stats =
ac124ff9
SP
395 &rxf_stats->port[adapter->port_num];
396 struct be_drv_stats *drvs = &adapter->drv_stats;
89a88ab8 397
ac124ff9 398 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
02fe7027
AK
399 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
400 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
89a88ab8
AK
401 drvs->rx_pause_frames = port_stats->rx_pause_frames;
402 drvs->rx_crc_errors = port_stats->rx_crc_errors;
403 drvs->rx_control_frames = port_stats->rx_control_frames;
404 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
405 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
406 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
407 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
408 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
409 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
410 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
411 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
412 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
413 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
414 drvs->rx_dropped_header_too_small =
415 port_stats->rx_dropped_header_too_small;
416 drvs->rx_input_fifo_overflow_drop =
417 port_stats->rx_input_fifo_overflow_drop;
18fb06a1 418 drvs->rx_address_filtered = port_stats->rx_address_filtered;
89a88ab8
AK
419 drvs->rx_alignment_symbol_errors =
420 port_stats->rx_alignment_symbol_errors;
ac124ff9 421 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
89a88ab8
AK
422 drvs->tx_pauseframes = port_stats->tx_pauseframes;
423 drvs->tx_controlframes = port_stats->tx_controlframes;
b5adffc4 424 drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
89a88ab8
AK
425 drvs->jabber_events = port_stats->jabber_events;
426 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
89a88ab8 427 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
89a88ab8
AK
428 drvs->forwarded_packets = rxf_stats->forwarded_packets;
429 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
ac124ff9
SP
430 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
431 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
89a88ab8
AK
432 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
433}
434
61000861
AK
435static void populate_be_v2_stats(struct be_adapter *adapter)
436{
437 struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
438 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
439 struct be_rxf_stats_v2 *rxf_stats = &hw_stats->rxf;
440 struct be_port_rxf_stats_v2 *port_stats =
441 &rxf_stats->port[adapter->port_num];
442 struct be_drv_stats *drvs = &adapter->drv_stats;
443
444 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
445 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
446 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
447 drvs->rx_pause_frames = port_stats->rx_pause_frames;
448 drvs->rx_crc_errors = port_stats->rx_crc_errors;
449 drvs->rx_control_frames = port_stats->rx_control_frames;
450 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
451 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
452 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
453 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
454 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
455 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
456 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
457 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
458 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
459 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
460 drvs->rx_dropped_header_too_small =
461 port_stats->rx_dropped_header_too_small;
462 drvs->rx_input_fifo_overflow_drop =
463 port_stats->rx_input_fifo_overflow_drop;
464 drvs->rx_address_filtered = port_stats->rx_address_filtered;
465 drvs->rx_alignment_symbol_errors =
466 port_stats->rx_alignment_symbol_errors;
467 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
468 drvs->tx_pauseframes = port_stats->tx_pauseframes;
469 drvs->tx_controlframes = port_stats->tx_controlframes;
470 drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
471 drvs->jabber_events = port_stats->jabber_events;
472 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
473 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
474 drvs->forwarded_packets = rxf_stats->forwarded_packets;
475 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
476 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
477 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
478 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
748b539a 479 if (be_roce_supported(adapter)) {
461ae379
AK
480 drvs->rx_roce_bytes_lsd = port_stats->roce_bytes_received_lsd;
481 drvs->rx_roce_bytes_msd = port_stats->roce_bytes_received_msd;
482 drvs->rx_roce_frames = port_stats->roce_frames_received;
483 drvs->roce_drops_crc = port_stats->roce_drops_crc;
484 drvs->roce_drops_payload_len =
485 port_stats->roce_drops_payload_len;
486 }
61000861
AK
487}
488
005d5696
SX
489static void populate_lancer_stats(struct be_adapter *adapter)
490{
89a88ab8 491
005d5696 492 struct be_drv_stats *drvs = &adapter->drv_stats;
748b539a 493 struct lancer_pport_stats *pport_stats = pport_stats_from_cmd(adapter);
ac124ff9
SP
494
495 be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
496 drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
497 drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
498 drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
005d5696 499 drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
ac124ff9 500 drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
005d5696
SX
501 drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
502 drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
503 drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
504 drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
505 drvs->rx_dropped_tcp_length =
506 pport_stats->rx_dropped_invalid_tcp_length;
507 drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
508 drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
509 drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
510 drvs->rx_dropped_header_too_small =
511 pport_stats->rx_dropped_header_too_small;
512 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
18fb06a1
SR
513 drvs->rx_address_filtered =
514 pport_stats->rx_address_filtered +
515 pport_stats->rx_vlan_filtered;
ac124ff9 516 drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
005d5696 517 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
ac124ff9
SP
518 drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
519 drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
005d5696 520 drvs->jabber_events = pport_stats->rx_jabbers;
ac124ff9
SP
521 drvs->forwarded_packets = pport_stats->num_forwards_lo;
522 drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
005d5696 523 drvs->rx_drops_too_many_frags =
ac124ff9 524 pport_stats->rx_drops_too_many_frags_lo;
005d5696 525}
89a88ab8 526
09c1c68f
SP
527static void accumulate_16bit_val(u32 *acc, u16 val)
528{
529#define lo(x) (x & 0xFFFF)
530#define hi(x) (x & 0xFFFF0000)
531 bool wrapped = val < lo(*acc);
532 u32 newacc = hi(*acc) + val;
533
534 if (wrapped)
535 newacc += 65536;
536 ACCESS_ONCE(*acc) = newacc;
537}
538
4188e7df 539static void populate_erx_stats(struct be_adapter *adapter,
748b539a 540 struct be_rx_obj *rxo, u32 erx_stat)
a6c578ef
AK
541{
542 if (!BEx_chip(adapter))
543 rx_stats(rxo)->rx_drops_no_frags = erx_stat;
544 else
545 /* below erx HW counter can actually wrap around after
546 * 65535. Driver accumulates a 32-bit value
547 */
548 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
549 (u16)erx_stat);
550}
551
89a88ab8
AK
552void be_parse_stats(struct be_adapter *adapter)
553{
61000861 554 struct be_erx_stats_v2 *erx = be_erx_stats_from_cmd(adapter);
ac124ff9
SP
555 struct be_rx_obj *rxo;
556 int i;
a6c578ef 557 u32 erx_stat;
ac124ff9 558
ca34fe38
SP
559 if (lancer_chip(adapter)) {
560 populate_lancer_stats(adapter);
005d5696 561 } else {
ca34fe38
SP
562 if (BE2_chip(adapter))
563 populate_be_v0_stats(adapter);
61000861
AK
564 else if (BE3_chip(adapter))
565 /* for BE3 */
ca34fe38 566 populate_be_v1_stats(adapter);
61000861
AK
567 else
568 populate_be_v2_stats(adapter);
d51ebd33 569
61000861 570 /* erx_v2 is longer than v0, v1. use v2 for v0, v1 access */
ca34fe38 571 for_all_rx_queues(adapter, rxo, i) {
a6c578ef
AK
572 erx_stat = erx->rx_drops_no_fragments[rxo->q.id];
573 populate_erx_stats(adapter, rxo, erx_stat);
ca34fe38 574 }
09c1c68f 575 }
89a88ab8
AK
576}
577
ab1594e9 578static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
748b539a 579 struct rtnl_link_stats64 *stats)
6b7c5b94 580{
ab1594e9 581 struct be_adapter *adapter = netdev_priv(netdev);
89a88ab8 582 struct be_drv_stats *drvs = &adapter->drv_stats;
3abcdeda 583 struct be_rx_obj *rxo;
3c8def97 584 struct be_tx_obj *txo;
ab1594e9
SP
585 u64 pkts, bytes;
586 unsigned int start;
3abcdeda 587 int i;
6b7c5b94 588
3abcdeda 589 for_all_rx_queues(adapter, rxo, i) {
ab1594e9
SP
590 const struct be_rx_stats *rx_stats = rx_stats(rxo);
591 do {
57a7744e 592 start = u64_stats_fetch_begin_irq(&rx_stats->sync);
ab1594e9
SP
593 pkts = rx_stats(rxo)->rx_pkts;
594 bytes = rx_stats(rxo)->rx_bytes;
57a7744e 595 } while (u64_stats_fetch_retry_irq(&rx_stats->sync, start));
ab1594e9
SP
596 stats->rx_packets += pkts;
597 stats->rx_bytes += bytes;
598 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
599 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
600 rx_stats(rxo)->rx_drops_no_frags;
3abcdeda
SP
601 }
602
3c8def97 603 for_all_tx_queues(adapter, txo, i) {
ab1594e9
SP
604 const struct be_tx_stats *tx_stats = tx_stats(txo);
605 do {
57a7744e 606 start = u64_stats_fetch_begin_irq(&tx_stats->sync);
ab1594e9
SP
607 pkts = tx_stats(txo)->tx_pkts;
608 bytes = tx_stats(txo)->tx_bytes;
57a7744e 609 } while (u64_stats_fetch_retry_irq(&tx_stats->sync, start));
ab1594e9
SP
610 stats->tx_packets += pkts;
611 stats->tx_bytes += bytes;
3c8def97 612 }
6b7c5b94
SP
613
614 /* bad pkts received */
ab1594e9 615 stats->rx_errors = drvs->rx_crc_errors +
89a88ab8
AK
616 drvs->rx_alignment_symbol_errors +
617 drvs->rx_in_range_errors +
618 drvs->rx_out_range_errors +
619 drvs->rx_frame_too_long +
620 drvs->rx_dropped_too_small +
621 drvs->rx_dropped_too_short +
622 drvs->rx_dropped_header_too_small +
623 drvs->rx_dropped_tcp_length +
ab1594e9 624 drvs->rx_dropped_runt;
68110868 625
6b7c5b94 626 /* detailed rx errors */
ab1594e9 627 stats->rx_length_errors = drvs->rx_in_range_errors +
89a88ab8
AK
628 drvs->rx_out_range_errors +
629 drvs->rx_frame_too_long;
68110868 630
ab1594e9 631 stats->rx_crc_errors = drvs->rx_crc_errors;
6b7c5b94
SP
632
633 /* frame alignment errors */
ab1594e9 634 stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
68110868 635
6b7c5b94
SP
636 /* receiver fifo overrun */
637 /* drops_no_pbuf is no per i/f, it's per BE card */
ab1594e9 638 stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
89a88ab8
AK
639 drvs->rx_input_fifo_overflow_drop +
640 drvs->rx_drops_no_pbuf;
ab1594e9 641 return stats;
6b7c5b94
SP
642}
643
b236916a 644void be_link_status_update(struct be_adapter *adapter, u8 link_status)
6b7c5b94 645{
6b7c5b94
SP
646 struct net_device *netdev = adapter->netdev;
647
b236916a 648 if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
ea172a01 649 netif_carrier_off(netdev);
b236916a 650 adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
6b7c5b94 651 }
b236916a 652
bdce2ad7 653 if (link_status)
b236916a
AK
654 netif_carrier_on(netdev);
655 else
656 netif_carrier_off(netdev);
6b7c5b94
SP
657}
658
3c8def97 659static void be_tx_stats_update(struct be_tx_obj *txo,
748b539a
SP
660 u32 wrb_cnt, u32 copied, u32 gso_segs,
661 bool stopped)
6b7c5b94 662{
3c8def97
SP
663 struct be_tx_stats *stats = tx_stats(txo);
664
ab1594e9 665 u64_stats_update_begin(&stats->sync);
ac124ff9
SP
666 stats->tx_reqs++;
667 stats->tx_wrbs += wrb_cnt;
668 stats->tx_bytes += copied;
669 stats->tx_pkts += (gso_segs ? gso_segs : 1);
6b7c5b94 670 if (stopped)
ac124ff9 671 stats->tx_stops++;
ab1594e9 672 u64_stats_update_end(&stats->sync);
6b7c5b94
SP
673}
674
675/* Determine number of WRB entries needed to xmit data in an skb */
fe6d2a38 676static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
748b539a 677 bool *dummy)
6b7c5b94 678{
ebc8d2ab
DM
679 int cnt = (skb->len > skb->data_len);
680
681 cnt += skb_shinfo(skb)->nr_frags;
682
6b7c5b94
SP
683 /* to account for hdr wrb */
684 cnt++;
fe6d2a38
SP
685 if (lancer_chip(adapter) || !(cnt & 1)) {
686 *dummy = false;
687 } else {
6b7c5b94
SP
688 /* add a dummy to make it an even num */
689 cnt++;
690 *dummy = true;
fe6d2a38 691 }
6b7c5b94
SP
692 BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
693 return cnt;
694}
695
696static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
697{
698 wrb->frag_pa_hi = upper_32_bits(addr);
699 wrb->frag_pa_lo = addr & 0xFFFFFFFF;
700 wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
89b1f496 701 wrb->rsvd0 = 0;
6b7c5b94
SP
702}
703
1ded132d 704static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
748b539a 705 struct sk_buff *skb)
1ded132d
AK
706{
707 u8 vlan_prio;
708 u16 vlan_tag;
709
710 vlan_tag = vlan_tx_tag_get(skb);
711 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
712 /* If vlan priority provided by OS is NOT in available bmap */
713 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
714 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
715 adapter->recommended_prio;
716
717 return vlan_tag;
718}
719
c9c47142
SP
720/* Used only for IP tunnel packets */
721static u16 skb_inner_ip_proto(struct sk_buff *skb)
722{
723 return (inner_ip_hdr(skb)->version == 4) ?
724 inner_ip_hdr(skb)->protocol : inner_ipv6_hdr(skb)->nexthdr;
725}
726
727static u16 skb_ip_proto(struct sk_buff *skb)
728{
729 return (ip_hdr(skb)->version == 4) ?
730 ip_hdr(skb)->protocol : ipv6_hdr(skb)->nexthdr;
731}
732
cc4ce020 733static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
748b539a
SP
734 struct sk_buff *skb, u32 wrb_cnt, u32 len,
735 bool skip_hw_vlan)
6b7c5b94 736{
c9c47142 737 u16 vlan_tag, proto;
cc4ce020 738
6b7c5b94
SP
739 memset(hdr, 0, sizeof(*hdr));
740
c3c18bc1 741 SET_TX_WRB_HDR_BITS(crc, hdr, 1);
6b7c5b94 742
49e4b847 743 if (skb_is_gso(skb)) {
c3c18bc1
SP
744 SET_TX_WRB_HDR_BITS(lso, hdr, 1);
745 SET_TX_WRB_HDR_BITS(lso_mss, hdr, skb_shinfo(skb)->gso_size);
fe6d2a38 746 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
c3c18bc1 747 SET_TX_WRB_HDR_BITS(lso6, hdr, 1);
6b7c5b94 748 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
c9c47142 749 if (skb->encapsulation) {
c3c18bc1 750 SET_TX_WRB_HDR_BITS(ipcs, hdr, 1);
c9c47142
SP
751 proto = skb_inner_ip_proto(skb);
752 } else {
753 proto = skb_ip_proto(skb);
754 }
755 if (proto == IPPROTO_TCP)
c3c18bc1 756 SET_TX_WRB_HDR_BITS(tcpcs, hdr, 1);
c9c47142 757 else if (proto == IPPROTO_UDP)
c3c18bc1 758 SET_TX_WRB_HDR_BITS(udpcs, hdr, 1);
6b7c5b94
SP
759 }
760
4c5102f9 761 if (vlan_tx_tag_present(skb)) {
c3c18bc1 762 SET_TX_WRB_HDR_BITS(vlan, hdr, 1);
1ded132d 763 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
c3c18bc1 764 SET_TX_WRB_HDR_BITS(vlan_tag, hdr, vlan_tag);
6b7c5b94
SP
765 }
766
bc0c3405 767 /* To skip HW VLAN tagging: evt = 1, compl = 0 */
c3c18bc1
SP
768 SET_TX_WRB_HDR_BITS(complete, hdr, !skip_hw_vlan);
769 SET_TX_WRB_HDR_BITS(event, hdr, 1);
770 SET_TX_WRB_HDR_BITS(num_wrb, hdr, wrb_cnt);
771 SET_TX_WRB_HDR_BITS(len, hdr, len);
6b7c5b94
SP
772}
773
2b7bcebf 774static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
748b539a 775 bool unmap_single)
7101e111
SP
776{
777 dma_addr_t dma;
778
779 be_dws_le_to_cpu(wrb, sizeof(*wrb));
780
781 dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
b681ee77 782 if (wrb->frag_len) {
7101e111 783 if (unmap_single)
2b7bcebf
IV
784 dma_unmap_single(dev, dma, wrb->frag_len,
785 DMA_TO_DEVICE);
7101e111 786 else
2b7bcebf 787 dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
7101e111
SP
788 }
789}
6b7c5b94 790
3c8def97 791static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
748b539a
SP
792 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb,
793 bool skip_hw_vlan)
6b7c5b94 794{
7101e111
SP
795 dma_addr_t busaddr;
796 int i, copied = 0;
2b7bcebf 797 struct device *dev = &adapter->pdev->dev;
6b7c5b94 798 struct sk_buff *first_skb = skb;
6b7c5b94
SP
799 struct be_eth_wrb *wrb;
800 struct be_eth_hdr_wrb *hdr;
7101e111
SP
801 bool map_single = false;
802 u16 map_head;
6b7c5b94 803
6b7c5b94
SP
804 hdr = queue_head_node(txq);
805 queue_head_inc(txq);
7101e111 806 map_head = txq->head;
6b7c5b94 807
ebc8d2ab 808 if (skb->len > skb->data_len) {
e743d313 809 int len = skb_headlen(skb);
2b7bcebf
IV
810 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
811 if (dma_mapping_error(dev, busaddr))
7101e111
SP
812 goto dma_err;
813 map_single = true;
ebc8d2ab
DM
814 wrb = queue_head_node(txq);
815 wrb_fill(wrb, busaddr, len);
816 be_dws_cpu_to_le(wrb, sizeof(*wrb));
817 queue_head_inc(txq);
818 copied += len;
819 }
6b7c5b94 820
ebc8d2ab 821 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
748b539a 822 const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
b061b39e 823 busaddr = skb_frag_dma_map(dev, frag, 0,
9e903e08 824 skb_frag_size(frag), DMA_TO_DEVICE);
2b7bcebf 825 if (dma_mapping_error(dev, busaddr))
7101e111 826 goto dma_err;
ebc8d2ab 827 wrb = queue_head_node(txq);
9e903e08 828 wrb_fill(wrb, busaddr, skb_frag_size(frag));
ebc8d2ab
DM
829 be_dws_cpu_to_le(wrb, sizeof(*wrb));
830 queue_head_inc(txq);
9e903e08 831 copied += skb_frag_size(frag);
6b7c5b94
SP
832 }
833
834 if (dummy_wrb) {
835 wrb = queue_head_node(txq);
836 wrb_fill(wrb, 0, 0);
837 be_dws_cpu_to_le(wrb, sizeof(*wrb));
838 queue_head_inc(txq);
839 }
840
bc0c3405 841 wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied, skip_hw_vlan);
6b7c5b94
SP
842 be_dws_cpu_to_le(hdr, sizeof(*hdr));
843
844 return copied;
7101e111
SP
845dma_err:
846 txq->head = map_head;
847 while (copied) {
848 wrb = queue_head_node(txq);
2b7bcebf 849 unmap_tx_frag(dev, wrb, map_single);
7101e111
SP
850 map_single = false;
851 copied -= wrb->frag_len;
d3de1540 852 adapter->drv_stats.dma_map_errors++;
7101e111
SP
853 queue_head_inc(txq);
854 }
855 return 0;
6b7c5b94
SP
856}
857
93040ae5 858static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
bc0c3405
AK
859 struct sk_buff *skb,
860 bool *skip_hw_vlan)
93040ae5
SK
861{
862 u16 vlan_tag = 0;
863
864 skb = skb_share_check(skb, GFP_ATOMIC);
865 if (unlikely(!skb))
866 return skb;
867
efee8e87 868 if (vlan_tx_tag_present(skb))
93040ae5 869 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
52fe29e4
SB
870
871 if (qnq_async_evt_rcvd(adapter) && adapter->pvid) {
872 if (!vlan_tag)
873 vlan_tag = adapter->pvid;
874 /* f/w workaround to set skip_hw_vlan = 1, informs the F/W to
875 * skip VLAN insertion
876 */
877 if (skip_hw_vlan)
878 *skip_hw_vlan = true;
879 }
bc0c3405
AK
880
881 if (vlan_tag) {
58717686 882 skb = __vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
bc0c3405
AK
883 if (unlikely(!skb))
884 return skb;
bc0c3405
AK
885 skb->vlan_tci = 0;
886 }
887
888 /* Insert the outer VLAN, if any */
889 if (adapter->qnq_vid) {
890 vlan_tag = adapter->qnq_vid;
58717686 891 skb = __vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
bc0c3405
AK
892 if (unlikely(!skb))
893 return skb;
894 if (skip_hw_vlan)
895 *skip_hw_vlan = true;
896 }
897
93040ae5
SK
898 return skb;
899}
900
bc0c3405
AK
901static bool be_ipv6_exthdr_check(struct sk_buff *skb)
902{
903 struct ethhdr *eh = (struct ethhdr *)skb->data;
904 u16 offset = ETH_HLEN;
905
906 if (eh->h_proto == htons(ETH_P_IPV6)) {
907 struct ipv6hdr *ip6h = (struct ipv6hdr *)(skb->data + offset);
908
909 offset += sizeof(struct ipv6hdr);
910 if (ip6h->nexthdr != NEXTHDR_TCP &&
911 ip6h->nexthdr != NEXTHDR_UDP) {
912 struct ipv6_opt_hdr *ehdr =
913 (struct ipv6_opt_hdr *) (skb->data + offset);
914
915 /* offending pkt: 2nd byte following IPv6 hdr is 0xff */
916 if (ehdr->hdrlen == 0xff)
917 return true;
918 }
919 }
920 return false;
921}
922
923static int be_vlan_tag_tx_chk(struct be_adapter *adapter, struct sk_buff *skb)
924{
925 return vlan_tx_tag_present(skb) || adapter->pvid || adapter->qnq_vid;
926}
927
748b539a 928static int be_ipv6_tx_stall_chk(struct be_adapter *adapter, struct sk_buff *skb)
bc0c3405 929{
ee9c799c 930 return BE3_chip(adapter) && be_ipv6_exthdr_check(skb);
bc0c3405
AK
931}
932
ec495fac
VV
933static struct sk_buff *be_lancer_xmit_workarounds(struct be_adapter *adapter,
934 struct sk_buff *skb,
935 bool *skip_hw_vlan)
6b7c5b94 936{
d2cb6ce7 937 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
ee9c799c
SP
938 unsigned int eth_hdr_len;
939 struct iphdr *ip;
93040ae5 940
1297f9db
AK
941 /* For padded packets, BE HW modifies tot_len field in IP header
942 * incorrecly when VLAN tag is inserted by HW.
3904dcc4 943 * For padded packets, Lancer computes incorrect checksum.
1ded132d 944 */
ee9c799c
SP
945 eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
946 VLAN_ETH_HLEN : ETH_HLEN;
3904dcc4
SK
947 if (skb->len <= 60 &&
948 (lancer_chip(adapter) || vlan_tx_tag_present(skb)) &&
ee9c799c 949 is_ipv4_pkt(skb)) {
93040ae5
SK
950 ip = (struct iphdr *)ip_hdr(skb);
951 pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len));
952 }
1ded132d 953
d2cb6ce7 954 /* If vlan tag is already inlined in the packet, skip HW VLAN
f93f160b 955 * tagging in pvid-tagging mode
d2cb6ce7 956 */
f93f160b 957 if (be_pvid_tagging_enabled(adapter) &&
d2cb6ce7 958 veh->h_vlan_proto == htons(ETH_P_8021Q))
748b539a 959 *skip_hw_vlan = true;
d2cb6ce7 960
93040ae5
SK
961 /* HW has a bug wherein it will calculate CSUM for VLAN
962 * pkts even though it is disabled.
963 * Manually insert VLAN in pkt.
964 */
965 if (skb->ip_summed != CHECKSUM_PARTIAL &&
ee9c799c
SP
966 vlan_tx_tag_present(skb)) {
967 skb = be_insert_vlan_in_pkt(adapter, skb, skip_hw_vlan);
bc0c3405 968 if (unlikely(!skb))
c9128951 969 goto err;
bc0c3405
AK
970 }
971
972 /* HW may lockup when VLAN HW tagging is requested on
973 * certain ipv6 packets. Drop such pkts if the HW workaround to
974 * skip HW tagging is not enabled by FW.
975 */
976 if (unlikely(be_ipv6_tx_stall_chk(adapter, skb) &&
ee9c799c
SP
977 (adapter->pvid || adapter->qnq_vid) &&
978 !qnq_async_evt_rcvd(adapter)))
bc0c3405
AK
979 goto tx_drop;
980
981 /* Manual VLAN tag insertion to prevent:
982 * ASIC lockup when the ASIC inserts VLAN tag into
983 * certain ipv6 packets. Insert VLAN tags in driver,
984 * and set event, completion, vlan bits accordingly
985 * in the Tx WRB.
986 */
987 if (be_ipv6_tx_stall_chk(adapter, skb) &&
988 be_vlan_tag_tx_chk(adapter, skb)) {
ee9c799c 989 skb = be_insert_vlan_in_pkt(adapter, skb, skip_hw_vlan);
1ded132d 990 if (unlikely(!skb))
c9128951 991 goto err;
1ded132d
AK
992 }
993
ee9c799c
SP
994 return skb;
995tx_drop:
996 dev_kfree_skb_any(skb);
c9128951 997err:
ee9c799c
SP
998 return NULL;
999}
1000
ec495fac
VV
1001static struct sk_buff *be_xmit_workarounds(struct be_adapter *adapter,
1002 struct sk_buff *skb,
1003 bool *skip_hw_vlan)
1004{
1005 /* Lancer, SH-R ASICs have a bug wherein Packets that are 32 bytes or
1006 * less may cause a transmit stall on that port. So the work-around is
1007 * to pad short packets (<= 32 bytes) to a 36-byte length.
1008 */
1009 if (unlikely(!BEx_chip(adapter) && skb->len <= 32)) {
1010 if (skb_padto(skb, 36))
1011 return NULL;
1012 skb->len = 36;
1013 }
1014
1015 if (BEx_chip(adapter) || lancer_chip(adapter)) {
1016 skb = be_lancer_xmit_workarounds(adapter, skb, skip_hw_vlan);
1017 if (!skb)
1018 return NULL;
1019 }
1020
1021 return skb;
1022}
1023
ee9c799c
SP
1024static netdev_tx_t be_xmit(struct sk_buff *skb, struct net_device *netdev)
1025{
1026 struct be_adapter *adapter = netdev_priv(netdev);
1027 struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
1028 struct be_queue_info *txq = &txo->q;
1029 bool dummy_wrb, stopped = false;
1030 u32 wrb_cnt = 0, copied = 0;
1031 bool skip_hw_vlan = false;
1032 u32 start = txq->head;
1033
1034 skb = be_xmit_workarounds(adapter, skb, &skip_hw_vlan);
bc617526
SP
1035 if (!skb) {
1036 tx_stats(txo)->tx_drv_drops++;
ee9c799c 1037 return NETDEV_TX_OK;
bc617526 1038 }
ee9c799c 1039
fe6d2a38 1040 wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
6b7c5b94 1041
bc0c3405
AK
1042 copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb,
1043 skip_hw_vlan);
c190e3c8 1044 if (copied) {
cd8f76c0
ED
1045 int gso_segs = skb_shinfo(skb)->gso_segs;
1046
c190e3c8 1047 /* record the sent skb in the sent_skb table */
3c8def97
SP
1048 BUG_ON(txo->sent_skb_list[start]);
1049 txo->sent_skb_list[start] = skb;
c190e3c8
AK
1050
1051 /* Ensure txq has space for the next skb; Else stop the queue
1052 * *BEFORE* ringing the tx doorbell, so that we serialze the
1053 * tx compls of the current transmit which'll wake up the queue
1054 */
7101e111 1055 atomic_add(wrb_cnt, &txq->used);
c190e3c8
AK
1056 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
1057 txq->len) {
3c8def97 1058 netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
c190e3c8
AK
1059 stopped = true;
1060 }
6b7c5b94 1061
94d73aaa 1062 be_txq_notify(adapter, txo, wrb_cnt);
6b7c5b94 1063
cd8f76c0 1064 be_tx_stats_update(txo, wrb_cnt, copied, gso_segs, stopped);
c190e3c8
AK
1065 } else {
1066 txq->head = start;
bc617526 1067 tx_stats(txo)->tx_drv_drops++;
c190e3c8 1068 dev_kfree_skb_any(skb);
6b7c5b94 1069 }
6b7c5b94
SP
1070 return NETDEV_TX_OK;
1071}
1072
1073static int be_change_mtu(struct net_device *netdev, int new_mtu)
1074{
1075 struct be_adapter *adapter = netdev_priv(netdev);
0d3f5cce
KA
1076 struct device *dev = &adapter->pdev->dev;
1077
1078 if (new_mtu < BE_MIN_MTU || new_mtu > BE_MAX_MTU) {
1079 dev_info(dev, "MTU must be between %d and %d bytes\n",
1080 BE_MIN_MTU, BE_MAX_MTU);
6b7c5b94
SP
1081 return -EINVAL;
1082 }
0d3f5cce
KA
1083
1084 dev_info(dev, "MTU changed from %d to %d bytes\n",
748b539a 1085 netdev->mtu, new_mtu);
6b7c5b94
SP
1086 netdev->mtu = new_mtu;
1087 return 0;
1088}
1089
1090/*
82903e4b
AK
1091 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
1092 * If the user configures more, place BE in vlan promiscuous mode.
6b7c5b94 1093 */
10329df8 1094static int be_vid_config(struct be_adapter *adapter)
6b7c5b94 1095{
10329df8 1096 u16 vids[BE_NUM_VLANS_SUPPORTED];
f6cbd364 1097 u16 num = 0, i = 0;
82903e4b 1098 int status = 0;
1da87b7f 1099
c0e64ef4
SP
1100 /* No need to further configure vids if in promiscuous mode */
1101 if (adapter->promiscuous)
1102 return 0;
1103
92bf14ab 1104 if (adapter->vlans_added > be_max_vlans(adapter))
0fc16ebf
PR
1105 goto set_vlan_promisc;
1106
1107 /* Construct VLAN Table to give to HW */
f6cbd364
RN
1108 for_each_set_bit(i, adapter->vids, VLAN_N_VID)
1109 vids[num++] = cpu_to_le16(i);
0fc16ebf 1110
4d567d97 1111 status = be_cmd_vlan_config(adapter, adapter->if_handle, vids, num);
0fc16ebf 1112 if (status) {
d9d604f8 1113 /* Set to VLAN promisc mode as setting VLAN filter failed */
4c60005f
KA
1114 if (addl_status(status) ==
1115 MCC_ADDL_STATUS_INSUFFICIENT_RESOURCES)
d9d604f8
AK
1116 goto set_vlan_promisc;
1117 dev_err(&adapter->pdev->dev,
1118 "Setting HW VLAN filtering failed.\n");
1119 } else {
1120 if (adapter->flags & BE_FLAGS_VLAN_PROMISC) {
1121 /* hw VLAN filtering re-enabled. */
1122 status = be_cmd_rx_filter(adapter,
1123 BE_FLAGS_VLAN_PROMISC, OFF);
1124 if (!status) {
1125 dev_info(&adapter->pdev->dev,
1126 "Disabling VLAN Promiscuous mode.\n");
1127 adapter->flags &= ~BE_FLAGS_VLAN_PROMISC;
d9d604f8
AK
1128 }
1129 }
6b7c5b94 1130 }
1da87b7f 1131
b31c50a7 1132 return status;
0fc16ebf
PR
1133
1134set_vlan_promisc:
a6b74e01
SK
1135 if (adapter->flags & BE_FLAGS_VLAN_PROMISC)
1136 return 0;
d9d604f8
AK
1137
1138 status = be_cmd_rx_filter(adapter, BE_FLAGS_VLAN_PROMISC, ON);
1139 if (!status) {
1140 dev_info(&adapter->pdev->dev, "Enable VLAN Promiscuous mode\n");
d9d604f8
AK
1141 adapter->flags |= BE_FLAGS_VLAN_PROMISC;
1142 } else
1143 dev_err(&adapter->pdev->dev,
1144 "Failed to enable VLAN Promiscuous mode.\n");
0fc16ebf 1145 return status;
6b7c5b94
SP
1146}
1147
80d5c368 1148static int be_vlan_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
6b7c5b94
SP
1149{
1150 struct be_adapter *adapter = netdev_priv(netdev);
80817cbf 1151 int status = 0;
6b7c5b94 1152
a85e9986
PR
1153 /* Packets with VID 0 are always received by Lancer by default */
1154 if (lancer_chip(adapter) && vid == 0)
48291c22
VV
1155 return status;
1156
f6cbd364 1157 if (test_bit(vid, adapter->vids))
48291c22 1158 return status;
a85e9986 1159
f6cbd364 1160 set_bit(vid, adapter->vids);
a6b74e01 1161 adapter->vlans_added++;
8e586137 1162
a6b74e01
SK
1163 status = be_vid_config(adapter);
1164 if (status) {
1165 adapter->vlans_added--;
f6cbd364 1166 clear_bit(vid, adapter->vids);
a6b74e01 1167 }
48291c22 1168
80817cbf 1169 return status;
6b7c5b94
SP
1170}
1171
80d5c368 1172static int be_vlan_rem_vid(struct net_device *netdev, __be16 proto, u16 vid)
6b7c5b94
SP
1173{
1174 struct be_adapter *adapter = netdev_priv(netdev);
1175
a85e9986
PR
1176 /* Packets with VID 0 are always received by Lancer by default */
1177 if (lancer_chip(adapter) && vid == 0)
9d4dfe4a 1178 return 0;
a85e9986 1179
f6cbd364 1180 clear_bit(vid, adapter->vids);
9d4dfe4a
KA
1181 adapter->vlans_added--;
1182
1183 return be_vid_config(adapter);
6b7c5b94
SP
1184}
1185
7ad09458
S
1186static void be_clear_promisc(struct be_adapter *adapter)
1187{
1188 adapter->promiscuous = false;
a0794885 1189 adapter->flags &= ~(BE_FLAGS_VLAN_PROMISC | BE_FLAGS_MCAST_PROMISC);
7ad09458
S
1190
1191 be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
1192}
1193
a54769f5 1194static void be_set_rx_mode(struct net_device *netdev)
6b7c5b94
SP
1195{
1196 struct be_adapter *adapter = netdev_priv(netdev);
0fc16ebf 1197 int status;
6b7c5b94 1198
24307eef 1199 if (netdev->flags & IFF_PROMISC) {
5b8821b7 1200 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
24307eef
SP
1201 adapter->promiscuous = true;
1202 goto done;
6b7c5b94
SP
1203 }
1204
25985edc 1205 /* BE was previously in promiscuous mode; disable it */
24307eef 1206 if (adapter->promiscuous) {
7ad09458 1207 be_clear_promisc(adapter);
c0e64ef4 1208 if (adapter->vlans_added)
10329df8 1209 be_vid_config(adapter);
6b7c5b94
SP
1210 }
1211
e7b909a6 1212 /* Enable multicast promisc if num configured exceeds what we support */
4cd24eaf 1213 if (netdev->flags & IFF_ALLMULTI ||
a0794885
KA
1214 netdev_mc_count(netdev) > be_max_mc(adapter))
1215 goto set_mcast_promisc;
6b7c5b94 1216
fbc13f01
AK
1217 if (netdev_uc_count(netdev) != adapter->uc_macs) {
1218 struct netdev_hw_addr *ha;
1219 int i = 1; /* First slot is claimed by the Primary MAC */
1220
1221 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++) {
1222 be_cmd_pmac_del(adapter, adapter->if_handle,
1223 adapter->pmac_id[i], 0);
1224 }
1225
92bf14ab 1226 if (netdev_uc_count(netdev) > be_max_uc(adapter)) {
fbc13f01
AK
1227 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
1228 adapter->promiscuous = true;
1229 goto done;
1230 }
1231
1232 netdev_for_each_uc_addr(ha, adapter->netdev) {
1233 adapter->uc_macs++; /* First slot is for Primary MAC */
1234 be_cmd_pmac_add(adapter, (u8 *)ha->addr,
1235 adapter->if_handle,
1236 &adapter->pmac_id[adapter->uc_macs], 0);
1237 }
1238 }
1239
0fc16ebf 1240 status = be_cmd_rx_filter(adapter, IFF_MULTICAST, ON);
a0794885
KA
1241 if (!status) {
1242 if (adapter->flags & BE_FLAGS_MCAST_PROMISC)
1243 adapter->flags &= ~BE_FLAGS_MCAST_PROMISC;
1244 goto done;
0fc16ebf 1245 }
a0794885
KA
1246
1247set_mcast_promisc:
1248 if (adapter->flags & BE_FLAGS_MCAST_PROMISC)
1249 return;
1250
1251 /* Set to MCAST promisc mode if setting MULTICAST address fails
1252 * or if num configured exceeds what we support
1253 */
1254 status = be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
1255 if (!status)
1256 adapter->flags |= BE_FLAGS_MCAST_PROMISC;
24307eef
SP
1257done:
1258 return;
6b7c5b94
SP
1259}
1260
ba343c77
SB
1261static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
1262{
1263 struct be_adapter *adapter = netdev_priv(netdev);
11ac75ed 1264 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
ba343c77
SB
1265 int status;
1266
11ac75ed 1267 if (!sriov_enabled(adapter))
ba343c77
SB
1268 return -EPERM;
1269
11ac75ed 1270 if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
ba343c77
SB
1271 return -EINVAL;
1272
3c31aaf3
VV
1273 /* Proceed further only if user provided MAC is different
1274 * from active MAC
1275 */
1276 if (ether_addr_equal(mac, vf_cfg->mac_addr))
1277 return 0;
1278
3175d8c2
SP
1279 if (BEx_chip(adapter)) {
1280 be_cmd_pmac_del(adapter, vf_cfg->if_handle, vf_cfg->pmac_id,
1281 vf + 1);
ba343c77 1282
11ac75ed
SP
1283 status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
1284 &vf_cfg->pmac_id, vf + 1);
3175d8c2
SP
1285 } else {
1286 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
1287 vf + 1);
590c391d
PR
1288 }
1289
abccf23e
KA
1290 if (status) {
1291 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed: %#x",
1292 mac, vf, status);
1293 return be_cmd_status(status);
1294 }
64600ea5 1295
abccf23e
KA
1296 ether_addr_copy(vf_cfg->mac_addr, mac);
1297
1298 return 0;
ba343c77
SB
1299}
1300
64600ea5 1301static int be_get_vf_config(struct net_device *netdev, int vf,
748b539a 1302 struct ifla_vf_info *vi)
64600ea5
AK
1303{
1304 struct be_adapter *adapter = netdev_priv(netdev);
11ac75ed 1305 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
64600ea5 1306
11ac75ed 1307 if (!sriov_enabled(adapter))
64600ea5
AK
1308 return -EPERM;
1309
11ac75ed 1310 if (vf >= adapter->num_vfs)
64600ea5
AK
1311 return -EINVAL;
1312
1313 vi->vf = vf;
ed616689
SC
1314 vi->max_tx_rate = vf_cfg->tx_rate;
1315 vi->min_tx_rate = 0;
a60b3a13
AK
1316 vi->vlan = vf_cfg->vlan_tag & VLAN_VID_MASK;
1317 vi->qos = vf_cfg->vlan_tag >> VLAN_PRIO_SHIFT;
11ac75ed 1318 memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
bdce2ad7 1319 vi->linkstate = adapter->vf_cfg[vf].plink_tracking;
64600ea5
AK
1320
1321 return 0;
1322}
1323
748b539a 1324static int be_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos)
1da87b7f
AK
1325{
1326 struct be_adapter *adapter = netdev_priv(netdev);
b9fc0e53 1327 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1da87b7f
AK
1328 int status = 0;
1329
11ac75ed 1330 if (!sriov_enabled(adapter))
1da87b7f
AK
1331 return -EPERM;
1332
b9fc0e53 1333 if (vf >= adapter->num_vfs || vlan > 4095 || qos > 7)
1da87b7f
AK
1334 return -EINVAL;
1335
b9fc0e53
AK
1336 if (vlan || qos) {
1337 vlan |= qos << VLAN_PRIO_SHIFT;
c502224e 1338 if (vf_cfg->vlan_tag != vlan)
b9fc0e53
AK
1339 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
1340 vf_cfg->if_handle, 0);
1da87b7f 1341 } else {
f1f3ee1b 1342 /* Reset Transparent Vlan Tagging. */
c502224e
SK
1343 status = be_cmd_set_hsw_config(adapter, BE_RESET_VLAN_TAG_ID,
1344 vf + 1, vf_cfg->if_handle, 0);
1da87b7f
AK
1345 }
1346
abccf23e
KA
1347 if (status) {
1348 dev_err(&adapter->pdev->dev,
1349 "VLAN %d config on VF %d failed : %#x\n", vlan,
1350 vf, status);
1351 return be_cmd_status(status);
1352 }
1353
1354 vf_cfg->vlan_tag = vlan;
1355
1356 return 0;
1da87b7f
AK
1357}
1358
ed616689
SC
1359static int be_set_vf_tx_rate(struct net_device *netdev, int vf,
1360 int min_tx_rate, int max_tx_rate)
e1d18735
AK
1361{
1362 struct be_adapter *adapter = netdev_priv(netdev);
0f77ba73
RN
1363 struct device *dev = &adapter->pdev->dev;
1364 int percent_rate, status = 0;
1365 u16 link_speed = 0;
1366 u8 link_status;
e1d18735 1367
11ac75ed 1368 if (!sriov_enabled(adapter))
e1d18735
AK
1369 return -EPERM;
1370
94f434c2 1371 if (vf >= adapter->num_vfs)
e1d18735
AK
1372 return -EINVAL;
1373
ed616689
SC
1374 if (min_tx_rate)
1375 return -EINVAL;
1376
0f77ba73
RN
1377 if (!max_tx_rate)
1378 goto config_qos;
1379
1380 status = be_cmd_link_status_query(adapter, &link_speed,
1381 &link_status, 0);
1382 if (status)
1383 goto err;
1384
1385 if (!link_status) {
1386 dev_err(dev, "TX-rate setting not allowed when link is down\n");
940a3fcd 1387 status = -ENETDOWN;
0f77ba73
RN
1388 goto err;
1389 }
1390
1391 if (max_tx_rate < 100 || max_tx_rate > link_speed) {
1392 dev_err(dev, "TX-rate must be between 100 and %d Mbps\n",
1393 link_speed);
1394 status = -EINVAL;
1395 goto err;
1396 }
1397
1398 /* On Skyhawk the QOS setting must be done only as a % value */
1399 percent_rate = link_speed / 100;
1400 if (skyhawk_chip(adapter) && (max_tx_rate % percent_rate)) {
1401 dev_err(dev, "TX-rate must be a multiple of %d Mbps\n",
1402 percent_rate);
1403 status = -EINVAL;
1404 goto err;
94f434c2 1405 }
e1d18735 1406
0f77ba73
RN
1407config_qos:
1408 status = be_cmd_config_qos(adapter, max_tx_rate, link_speed, vf + 1);
e1d18735 1409 if (status)
0f77ba73
RN
1410 goto err;
1411
1412 adapter->vf_cfg[vf].tx_rate = max_tx_rate;
1413 return 0;
1414
1415err:
1416 dev_err(dev, "TX-rate setting of %dMbps on VF%d failed\n",
1417 max_tx_rate, vf);
abccf23e 1418 return be_cmd_status(status);
e1d18735 1419}
bdce2ad7
SR
1420static int be_set_vf_link_state(struct net_device *netdev, int vf,
1421 int link_state)
1422{
1423 struct be_adapter *adapter = netdev_priv(netdev);
1424 int status;
1425
1426 if (!sriov_enabled(adapter))
1427 return -EPERM;
1428
1429 if (vf >= adapter->num_vfs)
1430 return -EINVAL;
1431
1432 status = be_cmd_set_logical_link_config(adapter, link_state, vf+1);
abccf23e
KA
1433 if (status) {
1434 dev_err(&adapter->pdev->dev,
1435 "Link state change on VF %d failed: %#x\n", vf, status);
1436 return be_cmd_status(status);
1437 }
bdce2ad7 1438
abccf23e
KA
1439 adapter->vf_cfg[vf].plink_tracking = link_state;
1440
1441 return 0;
bdce2ad7 1442}
e1d18735 1443
2632bafd
SP
1444static void be_aic_update(struct be_aic_obj *aic, u64 rx_pkts, u64 tx_pkts,
1445 ulong now)
6b7c5b94 1446{
2632bafd
SP
1447 aic->rx_pkts_prev = rx_pkts;
1448 aic->tx_reqs_prev = tx_pkts;
1449 aic->jiffies = now;
1450}
ac124ff9 1451
2632bafd
SP
1452static void be_eqd_update(struct be_adapter *adapter)
1453{
1454 struct be_set_eqd set_eqd[MAX_EVT_QS];
1455 int eqd, i, num = 0, start;
1456 struct be_aic_obj *aic;
1457 struct be_eq_obj *eqo;
1458 struct be_rx_obj *rxo;
1459 struct be_tx_obj *txo;
1460 u64 rx_pkts, tx_pkts;
1461 ulong now;
1462 u32 pps, delta;
10ef9ab4 1463
2632bafd
SP
1464 for_all_evt_queues(adapter, eqo, i) {
1465 aic = &adapter->aic_obj[eqo->idx];
1466 if (!aic->enable) {
1467 if (aic->jiffies)
1468 aic->jiffies = 0;
1469 eqd = aic->et_eqd;
1470 goto modify_eqd;
1471 }
6b7c5b94 1472
2632bafd
SP
1473 rxo = &adapter->rx_obj[eqo->idx];
1474 do {
57a7744e 1475 start = u64_stats_fetch_begin_irq(&rxo->stats.sync);
2632bafd 1476 rx_pkts = rxo->stats.rx_pkts;
57a7744e 1477 } while (u64_stats_fetch_retry_irq(&rxo->stats.sync, start));
10ef9ab4 1478
2632bafd
SP
1479 txo = &adapter->tx_obj[eqo->idx];
1480 do {
57a7744e 1481 start = u64_stats_fetch_begin_irq(&txo->stats.sync);
2632bafd 1482 tx_pkts = txo->stats.tx_reqs;
57a7744e 1483 } while (u64_stats_fetch_retry_irq(&txo->stats.sync, start));
6b7c5b94 1484
6b7c5b94 1485
2632bafd
SP
1486 /* Skip, if wrapped around or first calculation */
1487 now = jiffies;
1488 if (!aic->jiffies || time_before(now, aic->jiffies) ||
1489 rx_pkts < aic->rx_pkts_prev ||
1490 tx_pkts < aic->tx_reqs_prev) {
1491 be_aic_update(aic, rx_pkts, tx_pkts, now);
1492 continue;
1493 }
1494
1495 delta = jiffies_to_msecs(now - aic->jiffies);
1496 pps = (((u32)(rx_pkts - aic->rx_pkts_prev) * 1000) / delta) +
1497 (((u32)(tx_pkts - aic->tx_reqs_prev) * 1000) / delta);
1498 eqd = (pps / 15000) << 2;
10ef9ab4 1499
2632bafd
SP
1500 if (eqd < 8)
1501 eqd = 0;
1502 eqd = min_t(u32, eqd, aic->max_eqd);
1503 eqd = max_t(u32, eqd, aic->min_eqd);
1504
1505 be_aic_update(aic, rx_pkts, tx_pkts, now);
10ef9ab4 1506modify_eqd:
2632bafd
SP
1507 if (eqd != aic->prev_eqd) {
1508 set_eqd[num].delay_multiplier = (eqd * 65)/100;
1509 set_eqd[num].eq_id = eqo->q.id;
1510 aic->prev_eqd = eqd;
1511 num++;
1512 }
ac124ff9 1513 }
2632bafd
SP
1514
1515 if (num)
1516 be_cmd_modify_eqd(adapter, set_eqd, num);
6b7c5b94
SP
1517}
1518
3abcdeda 1519static void be_rx_stats_update(struct be_rx_obj *rxo,
748b539a 1520 struct be_rx_compl_info *rxcp)
4097f663 1521{
ac124ff9 1522 struct be_rx_stats *stats = rx_stats(rxo);
1ef78abe 1523
ab1594e9 1524 u64_stats_update_begin(&stats->sync);
3abcdeda 1525 stats->rx_compl++;
2e588f84 1526 stats->rx_bytes += rxcp->pkt_size;
3abcdeda 1527 stats->rx_pkts++;
2e588f84 1528 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
3abcdeda 1529 stats->rx_mcast_pkts++;
2e588f84 1530 if (rxcp->err)
ac124ff9 1531 stats->rx_compl_err++;
ab1594e9 1532 u64_stats_update_end(&stats->sync);
4097f663
SP
1533}
1534
2e588f84 1535static inline bool csum_passed(struct be_rx_compl_info *rxcp)
728a9972 1536{
19fad86f 1537 /* L4 checksum is not reliable for non TCP/UDP packets.
c9c47142
SP
1538 * Also ignore ipcksm for ipv6 pkts
1539 */
2e588f84 1540 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
c9c47142 1541 (rxcp->ip_csum || rxcp->ipv6) && !rxcp->err;
728a9972
AK
1542}
1543
0b0ef1d0 1544static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo)
6b7c5b94 1545{
10ef9ab4 1546 struct be_adapter *adapter = rxo->adapter;
6b7c5b94 1547 struct be_rx_page_info *rx_page_info;
3abcdeda 1548 struct be_queue_info *rxq = &rxo->q;
0b0ef1d0 1549 u16 frag_idx = rxq->tail;
6b7c5b94 1550
3abcdeda 1551 rx_page_info = &rxo->page_info_tbl[frag_idx];
6b7c5b94
SP
1552 BUG_ON(!rx_page_info->page);
1553
e50287be 1554 if (rx_page_info->last_frag) {
2b7bcebf
IV
1555 dma_unmap_page(&adapter->pdev->dev,
1556 dma_unmap_addr(rx_page_info, bus),
1557 adapter->big_page_size, DMA_FROM_DEVICE);
e50287be
SP
1558 rx_page_info->last_frag = false;
1559 } else {
1560 dma_sync_single_for_cpu(&adapter->pdev->dev,
1561 dma_unmap_addr(rx_page_info, bus),
1562 rx_frag_size, DMA_FROM_DEVICE);
205859a2 1563 }
6b7c5b94 1564
0b0ef1d0 1565 queue_tail_inc(rxq);
6b7c5b94
SP
1566 atomic_dec(&rxq->used);
1567 return rx_page_info;
1568}
1569
1570/* Throwaway the data in the Rx completion */
10ef9ab4
SP
1571static void be_rx_compl_discard(struct be_rx_obj *rxo,
1572 struct be_rx_compl_info *rxcp)
6b7c5b94 1573{
6b7c5b94 1574 struct be_rx_page_info *page_info;
2e588f84 1575 u16 i, num_rcvd = rxcp->num_rcvd;
6b7c5b94 1576
e80d9da6 1577 for (i = 0; i < num_rcvd; i++) {
0b0ef1d0 1578 page_info = get_rx_page_info(rxo);
e80d9da6
PR
1579 put_page(page_info->page);
1580 memset(page_info, 0, sizeof(*page_info));
6b7c5b94
SP
1581 }
1582}
1583
1584/*
1585 * skb_fill_rx_data forms a complete skb for an ether frame
1586 * indicated by rxcp.
1587 */
10ef9ab4
SP
1588static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
1589 struct be_rx_compl_info *rxcp)
6b7c5b94 1590{
6b7c5b94 1591 struct be_rx_page_info *page_info;
2e588f84
SP
1592 u16 i, j;
1593 u16 hdr_len, curr_frag_len, remaining;
6b7c5b94 1594 u8 *start;
6b7c5b94 1595
0b0ef1d0 1596 page_info = get_rx_page_info(rxo);
6b7c5b94
SP
1597 start = page_address(page_info->page) + page_info->page_offset;
1598 prefetch(start);
1599
1600 /* Copy data in the first descriptor of this completion */
2e588f84 1601 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
6b7c5b94 1602
6b7c5b94
SP
1603 skb->len = curr_frag_len;
1604 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
ac1ae5f3 1605 memcpy(skb->data, start, curr_frag_len);
6b7c5b94
SP
1606 /* Complete packet has now been moved to data */
1607 put_page(page_info->page);
1608 skb->data_len = 0;
1609 skb->tail += curr_frag_len;
1610 } else {
ac1ae5f3
ED
1611 hdr_len = ETH_HLEN;
1612 memcpy(skb->data, start, hdr_len);
6b7c5b94 1613 skb_shinfo(skb)->nr_frags = 1;
b061b39e 1614 skb_frag_set_page(skb, 0, page_info->page);
6b7c5b94
SP
1615 skb_shinfo(skb)->frags[0].page_offset =
1616 page_info->page_offset + hdr_len;
748b539a
SP
1617 skb_frag_size_set(&skb_shinfo(skb)->frags[0],
1618 curr_frag_len - hdr_len);
6b7c5b94 1619 skb->data_len = curr_frag_len - hdr_len;
bdb28a97 1620 skb->truesize += rx_frag_size;
6b7c5b94
SP
1621 skb->tail += hdr_len;
1622 }
205859a2 1623 page_info->page = NULL;
6b7c5b94 1624
2e588f84
SP
1625 if (rxcp->pkt_size <= rx_frag_size) {
1626 BUG_ON(rxcp->num_rcvd != 1);
1627 return;
6b7c5b94
SP
1628 }
1629
1630 /* More frags present for this completion */
2e588f84
SP
1631 remaining = rxcp->pkt_size - curr_frag_len;
1632 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
0b0ef1d0 1633 page_info = get_rx_page_info(rxo);
2e588f84 1634 curr_frag_len = min(remaining, rx_frag_size);
6b7c5b94 1635
bd46cb6c
AK
1636 /* Coalesce all frags from the same physical page in one slot */
1637 if (page_info->page_offset == 0) {
1638 /* Fresh page */
1639 j++;
b061b39e 1640 skb_frag_set_page(skb, j, page_info->page);
bd46cb6c
AK
1641 skb_shinfo(skb)->frags[j].page_offset =
1642 page_info->page_offset;
9e903e08 1643 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
bd46cb6c
AK
1644 skb_shinfo(skb)->nr_frags++;
1645 } else {
1646 put_page(page_info->page);
1647 }
1648
9e903e08 1649 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
6b7c5b94
SP
1650 skb->len += curr_frag_len;
1651 skb->data_len += curr_frag_len;
bdb28a97 1652 skb->truesize += rx_frag_size;
2e588f84 1653 remaining -= curr_frag_len;
205859a2 1654 page_info->page = NULL;
6b7c5b94 1655 }
bd46cb6c 1656 BUG_ON(j > MAX_SKB_FRAGS);
6b7c5b94
SP
1657}
1658
5be93b9a 1659/* Process the RX completion indicated by rxcp when GRO is disabled */
6384a4d0 1660static void be_rx_compl_process(struct be_rx_obj *rxo, struct napi_struct *napi,
10ef9ab4 1661 struct be_rx_compl_info *rxcp)
6b7c5b94 1662{
10ef9ab4 1663 struct be_adapter *adapter = rxo->adapter;
6332c8d3 1664 struct net_device *netdev = adapter->netdev;
6b7c5b94 1665 struct sk_buff *skb;
89420424 1666
bb349bb4 1667 skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE);
a058a632 1668 if (unlikely(!skb)) {
ac124ff9 1669 rx_stats(rxo)->rx_drops_no_skbs++;
10ef9ab4 1670 be_rx_compl_discard(rxo, rxcp);
6b7c5b94
SP
1671 return;
1672 }
1673
10ef9ab4 1674 skb_fill_rx_data(rxo, skb, rxcp);
6b7c5b94 1675
6332c8d3 1676 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
728a9972 1677 skb->ip_summed = CHECKSUM_UNNECESSARY;
c6ce2f4b
SK
1678 else
1679 skb_checksum_none_assert(skb);
6b7c5b94 1680
6332c8d3 1681 skb->protocol = eth_type_trans(skb, netdev);
aaa6daec 1682 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
10ef9ab4 1683 if (netdev->features & NETIF_F_RXHASH)
d2464c8c 1684 skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
c9c47142 1685
b6c0e89d 1686 skb->csum_level = rxcp->tunneled;
6384a4d0 1687 skb_mark_napi_id(skb, napi);
6b7c5b94 1688
343e43c0 1689 if (rxcp->vlanf)
86a9bad3 1690 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
4c5102f9
AK
1691
1692 netif_receive_skb(skb);
6b7c5b94
SP
1693}
1694
5be93b9a 1695/* Process the RX completion indicated by rxcp when GRO is enabled */
4188e7df
JH
1696static void be_rx_compl_process_gro(struct be_rx_obj *rxo,
1697 struct napi_struct *napi,
1698 struct be_rx_compl_info *rxcp)
6b7c5b94 1699{
10ef9ab4 1700 struct be_adapter *adapter = rxo->adapter;
6b7c5b94 1701 struct be_rx_page_info *page_info;
5be93b9a 1702 struct sk_buff *skb = NULL;
2e588f84
SP
1703 u16 remaining, curr_frag_len;
1704 u16 i, j;
3968fa1e 1705
10ef9ab4 1706 skb = napi_get_frags(napi);
5be93b9a 1707 if (!skb) {
10ef9ab4 1708 be_rx_compl_discard(rxo, rxcp);
5be93b9a
AK
1709 return;
1710 }
1711
2e588f84
SP
1712 remaining = rxcp->pkt_size;
1713 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
0b0ef1d0 1714 page_info = get_rx_page_info(rxo);
6b7c5b94
SP
1715
1716 curr_frag_len = min(remaining, rx_frag_size);
1717
bd46cb6c
AK
1718 /* Coalesce all frags from the same physical page in one slot */
1719 if (i == 0 || page_info->page_offset == 0) {
1720 /* First frag or Fresh page */
1721 j++;
b061b39e 1722 skb_frag_set_page(skb, j, page_info->page);
5be93b9a
AK
1723 skb_shinfo(skb)->frags[j].page_offset =
1724 page_info->page_offset;
9e903e08 1725 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
bd46cb6c
AK
1726 } else {
1727 put_page(page_info->page);
1728 }
9e903e08 1729 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
bdb28a97 1730 skb->truesize += rx_frag_size;
bd46cb6c 1731 remaining -= curr_frag_len;
6b7c5b94
SP
1732 memset(page_info, 0, sizeof(*page_info));
1733 }
bd46cb6c 1734 BUG_ON(j > MAX_SKB_FRAGS);
6b7c5b94 1735
5be93b9a 1736 skb_shinfo(skb)->nr_frags = j + 1;
2e588f84
SP
1737 skb->len = rxcp->pkt_size;
1738 skb->data_len = rxcp->pkt_size;
5be93b9a 1739 skb->ip_summed = CHECKSUM_UNNECESSARY;
aaa6daec 1740 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
4b972914 1741 if (adapter->netdev->features & NETIF_F_RXHASH)
d2464c8c 1742 skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
c9c47142 1743
b6c0e89d 1744 skb->csum_level = rxcp->tunneled;
6384a4d0 1745 skb_mark_napi_id(skb, napi);
5be93b9a 1746
343e43c0 1747 if (rxcp->vlanf)
86a9bad3 1748 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
4c5102f9 1749
10ef9ab4 1750 napi_gro_frags(napi);
2e588f84
SP
1751}
1752
10ef9ab4
SP
1753static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
1754 struct be_rx_compl_info *rxcp)
2e588f84 1755{
c3c18bc1
SP
1756 rxcp->pkt_size = GET_RX_COMPL_V1_BITS(pktsize, compl);
1757 rxcp->vlanf = GET_RX_COMPL_V1_BITS(vtp, compl);
1758 rxcp->err = GET_RX_COMPL_V1_BITS(err, compl);
1759 rxcp->tcpf = GET_RX_COMPL_V1_BITS(tcpf, compl);
1760 rxcp->udpf = GET_RX_COMPL_V1_BITS(udpf, compl);
1761 rxcp->ip_csum = GET_RX_COMPL_V1_BITS(ipcksm, compl);
1762 rxcp->l4_csum = GET_RX_COMPL_V1_BITS(l4_cksm, compl);
1763 rxcp->ipv6 = GET_RX_COMPL_V1_BITS(ip_version, compl);
1764 rxcp->num_rcvd = GET_RX_COMPL_V1_BITS(numfrags, compl);
1765 rxcp->pkt_type = GET_RX_COMPL_V1_BITS(cast_enc, compl);
1766 rxcp->rss_hash = GET_RX_COMPL_V1_BITS(rsshash, compl);
15d72184 1767 if (rxcp->vlanf) {
c3c18bc1
SP
1768 rxcp->qnq = GET_RX_COMPL_V1_BITS(qnq, compl);
1769 rxcp->vlan_tag = GET_RX_COMPL_V1_BITS(vlan_tag, compl);
15d72184 1770 }
c3c18bc1 1771 rxcp->port = GET_RX_COMPL_V1_BITS(port, compl);
c9c47142 1772 rxcp->tunneled =
c3c18bc1 1773 GET_RX_COMPL_V1_BITS(tunneled, compl);
2e588f84
SP
1774}
1775
10ef9ab4
SP
1776static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
1777 struct be_rx_compl_info *rxcp)
2e588f84 1778{
c3c18bc1
SP
1779 rxcp->pkt_size = GET_RX_COMPL_V0_BITS(pktsize, compl);
1780 rxcp->vlanf = GET_RX_COMPL_V0_BITS(vtp, compl);
1781 rxcp->err = GET_RX_COMPL_V0_BITS(err, compl);
1782 rxcp->tcpf = GET_RX_COMPL_V0_BITS(tcpf, compl);
1783 rxcp->udpf = GET_RX_COMPL_V0_BITS(udpf, compl);
1784 rxcp->ip_csum = GET_RX_COMPL_V0_BITS(ipcksm, compl);
1785 rxcp->l4_csum = GET_RX_COMPL_V0_BITS(l4_cksm, compl);
1786 rxcp->ipv6 = GET_RX_COMPL_V0_BITS(ip_version, compl);
1787 rxcp->num_rcvd = GET_RX_COMPL_V0_BITS(numfrags, compl);
1788 rxcp->pkt_type = GET_RX_COMPL_V0_BITS(cast_enc, compl);
1789 rxcp->rss_hash = GET_RX_COMPL_V0_BITS(rsshash, compl);
15d72184 1790 if (rxcp->vlanf) {
c3c18bc1
SP
1791 rxcp->qnq = GET_RX_COMPL_V0_BITS(qnq, compl);
1792 rxcp->vlan_tag = GET_RX_COMPL_V0_BITS(vlan_tag, compl);
15d72184 1793 }
c3c18bc1
SP
1794 rxcp->port = GET_RX_COMPL_V0_BITS(port, compl);
1795 rxcp->ip_frag = GET_RX_COMPL_V0_BITS(ip_frag, compl);
2e588f84
SP
1796}
1797
1798static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1799{
1800 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1801 struct be_rx_compl_info *rxcp = &rxo->rxcp;
1802 struct be_adapter *adapter = rxo->adapter;
6b7c5b94 1803
2e588f84
SP
1804 /* For checking the valid bit it is Ok to use either definition as the
1805 * valid bit is at the same position in both v0 and v1 Rx compl */
1806 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
1807 return NULL;
6b7c5b94 1808
2e588f84
SP
1809 rmb();
1810 be_dws_le_to_cpu(compl, sizeof(*compl));
6b7c5b94 1811
2e588f84 1812 if (adapter->be3_native)
10ef9ab4 1813 be_parse_rx_compl_v1(compl, rxcp);
2e588f84 1814 else
10ef9ab4 1815 be_parse_rx_compl_v0(compl, rxcp);
6b7c5b94 1816
e38b1706
SK
1817 if (rxcp->ip_frag)
1818 rxcp->l4_csum = 0;
1819
15d72184 1820 if (rxcp->vlanf) {
f93f160b
VV
1821 /* In QNQ modes, if qnq bit is not set, then the packet was
1822 * tagged only with the transparent outer vlan-tag and must
1823 * not be treated as a vlan packet by host
1824 */
1825 if (be_is_qnq_mode(adapter) && !rxcp->qnq)
15d72184 1826 rxcp->vlanf = 0;
6b7c5b94 1827
15d72184 1828 if (!lancer_chip(adapter))
3c709f8f 1829 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
6b7c5b94 1830
939cf306 1831 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
f6cbd364 1832 !test_bit(rxcp->vlan_tag, adapter->vids))
15d72184
SP
1833 rxcp->vlanf = 0;
1834 }
2e588f84
SP
1835
1836 /* As the compl has been parsed, reset it; we wont touch it again */
1837 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
6b7c5b94 1838
3abcdeda 1839 queue_tail_inc(&rxo->cq);
6b7c5b94
SP
1840 return rxcp;
1841}
1842
1829b086 1843static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
6b7c5b94 1844{
6b7c5b94 1845 u32 order = get_order(size);
1829b086 1846
6b7c5b94 1847 if (order > 0)
1829b086
ED
1848 gfp |= __GFP_COMP;
1849 return alloc_pages(gfp, order);
6b7c5b94
SP
1850}
1851
1852/*
1853 * Allocate a page, split it to fragments of size rx_frag_size and post as
1854 * receive buffers to BE
1855 */
1829b086 1856static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
6b7c5b94 1857{
3abcdeda 1858 struct be_adapter *adapter = rxo->adapter;
26d92f92 1859 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
3abcdeda 1860 struct be_queue_info *rxq = &rxo->q;
6b7c5b94 1861 struct page *pagep = NULL;
ba42fad0 1862 struct device *dev = &adapter->pdev->dev;
6b7c5b94
SP
1863 struct be_eth_rx_d *rxd;
1864 u64 page_dmaaddr = 0, frag_dmaaddr;
1865 u32 posted, page_offset = 0;
1866
3abcdeda 1867 page_info = &rxo->page_info_tbl[rxq->head];
6b7c5b94
SP
1868 for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1869 if (!pagep) {
1829b086 1870 pagep = be_alloc_pages(adapter->big_page_size, gfp);
6b7c5b94 1871 if (unlikely(!pagep)) {
ac124ff9 1872 rx_stats(rxo)->rx_post_fail++;
6b7c5b94
SP
1873 break;
1874 }
ba42fad0
IV
1875 page_dmaaddr = dma_map_page(dev, pagep, 0,
1876 adapter->big_page_size,
2b7bcebf 1877 DMA_FROM_DEVICE);
ba42fad0
IV
1878 if (dma_mapping_error(dev, page_dmaaddr)) {
1879 put_page(pagep);
1880 pagep = NULL;
d3de1540 1881 adapter->drv_stats.dma_map_errors++;
ba42fad0
IV
1882 break;
1883 }
e50287be 1884 page_offset = 0;
6b7c5b94
SP
1885 } else {
1886 get_page(pagep);
e50287be 1887 page_offset += rx_frag_size;
6b7c5b94 1888 }
e50287be 1889 page_info->page_offset = page_offset;
6b7c5b94 1890 page_info->page = pagep;
6b7c5b94
SP
1891
1892 rxd = queue_head_node(rxq);
e50287be 1893 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
6b7c5b94
SP
1894 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1895 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
6b7c5b94
SP
1896
1897 /* Any space left in the current big page for another frag? */
1898 if ((page_offset + rx_frag_size + rx_frag_size) >
1899 adapter->big_page_size) {
1900 pagep = NULL;
e50287be
SP
1901 page_info->last_frag = true;
1902 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
1903 } else {
1904 dma_unmap_addr_set(page_info, bus, frag_dmaaddr);
6b7c5b94 1905 }
26d92f92
SP
1906
1907 prev_page_info = page_info;
1908 queue_head_inc(rxq);
10ef9ab4 1909 page_info = &rxo->page_info_tbl[rxq->head];
6b7c5b94 1910 }
e50287be
SP
1911
1912 /* Mark the last frag of a page when we break out of the above loop
1913 * with no more slots available in the RXQ
1914 */
1915 if (pagep) {
1916 prev_page_info->last_frag = true;
1917 dma_unmap_addr_set(prev_page_info, bus, page_dmaaddr);
1918 }
6b7c5b94
SP
1919
1920 if (posted) {
6b7c5b94 1921 atomic_add(posted, &rxq->used);
6384a4d0
SP
1922 if (rxo->rx_post_starved)
1923 rxo->rx_post_starved = false;
8788fdc2 1924 be_rxq_notify(adapter, rxq->id, posted);
ea1dae11
SP
1925 } else if (atomic_read(&rxq->used) == 0) {
1926 /* Let be_worker replenish when memory is available */
3abcdeda 1927 rxo->rx_post_starved = true;
6b7c5b94 1928 }
6b7c5b94
SP
1929}
1930
5fb379ee 1931static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
6b7c5b94 1932{
6b7c5b94
SP
1933 struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1934
1935 if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1936 return NULL;
1937
f3eb62d2 1938 rmb();
6b7c5b94
SP
1939 be_dws_le_to_cpu(txcp, sizeof(*txcp));
1940
1941 txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1942
1943 queue_tail_inc(tx_cq);
1944 return txcp;
1945}
1946
3c8def97 1947static u16 be_tx_compl_process(struct be_adapter *adapter,
748b539a 1948 struct be_tx_obj *txo, u16 last_index)
6b7c5b94 1949{
3c8def97 1950 struct be_queue_info *txq = &txo->q;
a73b796e 1951 struct be_eth_wrb *wrb;
3c8def97 1952 struct sk_buff **sent_skbs = txo->sent_skb_list;
6b7c5b94 1953 struct sk_buff *sent_skb;
ec43b1a6
SP
1954 u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1955 bool unmap_skb_hdr = true;
6b7c5b94 1956
ec43b1a6 1957 sent_skb = sent_skbs[txq->tail];
6b7c5b94 1958 BUG_ON(!sent_skb);
ec43b1a6
SP
1959 sent_skbs[txq->tail] = NULL;
1960
1961 /* skip header wrb */
a73b796e 1962 queue_tail_inc(txq);
6b7c5b94 1963
ec43b1a6 1964 do {
6b7c5b94 1965 cur_index = txq->tail;
a73b796e 1966 wrb = queue_tail_node(txq);
2b7bcebf
IV
1967 unmap_tx_frag(&adapter->pdev->dev, wrb,
1968 (unmap_skb_hdr && skb_headlen(sent_skb)));
ec43b1a6
SP
1969 unmap_skb_hdr = false;
1970
6b7c5b94
SP
1971 num_wrbs++;
1972 queue_tail_inc(txq);
ec43b1a6 1973 } while (cur_index != last_index);
6b7c5b94 1974
96d49225 1975 dev_consume_skb_any(sent_skb);
4d586b82 1976 return num_wrbs;
6b7c5b94
SP
1977}
1978
10ef9ab4
SP
1979/* Return the number of events in the event queue */
1980static inline int events_get(struct be_eq_obj *eqo)
859b1e4e 1981{
10ef9ab4
SP
1982 struct be_eq_entry *eqe;
1983 int num = 0;
859b1e4e 1984
10ef9ab4
SP
1985 do {
1986 eqe = queue_tail_node(&eqo->q);
1987 if (eqe->evt == 0)
1988 break;
859b1e4e 1989
10ef9ab4
SP
1990 rmb();
1991 eqe->evt = 0;
1992 num++;
1993 queue_tail_inc(&eqo->q);
1994 } while (true);
1995
1996 return num;
859b1e4e
SP
1997}
1998
10ef9ab4
SP
1999/* Leaves the EQ is disarmed state */
2000static void be_eq_clean(struct be_eq_obj *eqo)
859b1e4e 2001{
10ef9ab4 2002 int num = events_get(eqo);
859b1e4e 2003
10ef9ab4 2004 be_eq_notify(eqo->adapter, eqo->q.id, false, true, num);
859b1e4e
SP
2005}
2006
10ef9ab4 2007static void be_rx_cq_clean(struct be_rx_obj *rxo)
6b7c5b94
SP
2008{
2009 struct be_rx_page_info *page_info;
3abcdeda
SP
2010 struct be_queue_info *rxq = &rxo->q;
2011 struct be_queue_info *rx_cq = &rxo->cq;
2e588f84 2012 struct be_rx_compl_info *rxcp;
d23e946c
SP
2013 struct be_adapter *adapter = rxo->adapter;
2014 int flush_wait = 0;
6b7c5b94 2015
d23e946c
SP
2016 /* Consume pending rx completions.
2017 * Wait for the flush completion (identified by zero num_rcvd)
2018 * to arrive. Notify CQ even when there are no more CQ entries
2019 * for HW to flush partially coalesced CQ entries.
2020 * In Lancer, there is no need to wait for flush compl.
2021 */
2022 for (;;) {
2023 rxcp = be_rx_compl_get(rxo);
ddf1169f 2024 if (!rxcp) {
d23e946c
SP
2025 if (lancer_chip(adapter))
2026 break;
2027
2028 if (flush_wait++ > 10 || be_hw_error(adapter)) {
2029 dev_warn(&adapter->pdev->dev,
2030 "did not receive flush compl\n");
2031 break;
2032 }
2033 be_cq_notify(adapter, rx_cq->id, true, 0);
2034 mdelay(1);
2035 } else {
2036 be_rx_compl_discard(rxo, rxcp);
3f5dffe6 2037 be_cq_notify(adapter, rx_cq->id, false, 1);
d23e946c
SP
2038 if (rxcp->num_rcvd == 0)
2039 break;
2040 }
6b7c5b94
SP
2041 }
2042
d23e946c
SP
2043 /* After cleanup, leave the CQ in unarmed state */
2044 be_cq_notify(adapter, rx_cq->id, false, 0);
2045
2046 /* Then free posted rx buffers that were not used */
0b0ef1d0
SR
2047 while (atomic_read(&rxq->used) > 0) {
2048 page_info = get_rx_page_info(rxo);
6b7c5b94
SP
2049 put_page(page_info->page);
2050 memset(page_info, 0, sizeof(*page_info));
2051 }
2052 BUG_ON(atomic_read(&rxq->used));
482c9e79 2053 rxq->tail = rxq->head = 0;
6b7c5b94
SP
2054}
2055
0ae57bb3 2056static void be_tx_compl_clean(struct be_adapter *adapter)
6b7c5b94 2057{
0ae57bb3
SP
2058 struct be_tx_obj *txo;
2059 struct be_queue_info *txq;
a8e9179a 2060 struct be_eth_tx_compl *txcp;
4d586b82 2061 u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
b03388d6
SP
2062 struct sk_buff *sent_skb;
2063 bool dummy_wrb;
0ae57bb3 2064 int i, pending_txqs;
a8e9179a 2065
1a3d0717 2066 /* Stop polling for compls when HW has been silent for 10ms */
a8e9179a 2067 do {
0ae57bb3
SP
2068 pending_txqs = adapter->num_tx_qs;
2069
2070 for_all_tx_queues(adapter, txo, i) {
1a3d0717
VV
2071 cmpl = 0;
2072 num_wrbs = 0;
0ae57bb3
SP
2073 txq = &txo->q;
2074 while ((txcp = be_tx_compl_get(&txo->cq))) {
c3c18bc1 2075 end_idx = GET_TX_COMPL_BITS(wrb_index, txcp);
0ae57bb3
SP
2076 num_wrbs += be_tx_compl_process(adapter, txo,
2077 end_idx);
2078 cmpl++;
2079 }
2080 if (cmpl) {
2081 be_cq_notify(adapter, txo->cq.id, false, cmpl);
2082 atomic_sub(num_wrbs, &txq->used);
1a3d0717 2083 timeo = 0;
0ae57bb3
SP
2084 }
2085 if (atomic_read(&txq->used) == 0)
2086 pending_txqs--;
a8e9179a
SP
2087 }
2088
1a3d0717 2089 if (pending_txqs == 0 || ++timeo > 10 || be_hw_error(adapter))
a8e9179a
SP
2090 break;
2091
2092 mdelay(1);
2093 } while (true);
2094
0ae57bb3
SP
2095 for_all_tx_queues(adapter, txo, i) {
2096 txq = &txo->q;
2097 if (atomic_read(&txq->used))
2098 dev_err(&adapter->pdev->dev, "%d pending tx-compls\n",
2099 atomic_read(&txq->used));
2100
2101 /* free posted tx for which compls will never arrive */
2102 while (atomic_read(&txq->used)) {
2103 sent_skb = txo->sent_skb_list[txq->tail];
2104 end_idx = txq->tail;
2105 num_wrbs = wrb_cnt_for_skb(adapter, sent_skb,
2106 &dummy_wrb);
2107 index_adv(&end_idx, num_wrbs - 1, txq->len);
2108 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
2109 atomic_sub(num_wrbs, &txq->used);
2110 }
b03388d6 2111 }
6b7c5b94
SP
2112}
2113
10ef9ab4
SP
2114static void be_evt_queues_destroy(struct be_adapter *adapter)
2115{
2116 struct be_eq_obj *eqo;
2117 int i;
2118
2119 for_all_evt_queues(adapter, eqo, i) {
19d59aa7
PR
2120 if (eqo->q.created) {
2121 be_eq_clean(eqo);
10ef9ab4 2122 be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
6384a4d0 2123 napi_hash_del(&eqo->napi);
68d7bdcb 2124 netif_napi_del(&eqo->napi);
19d59aa7 2125 }
10ef9ab4
SP
2126 be_queue_free(adapter, &eqo->q);
2127 }
2128}
2129
2130static int be_evt_queues_create(struct be_adapter *adapter)
2131{
2132 struct be_queue_info *eq;
2133 struct be_eq_obj *eqo;
2632bafd 2134 struct be_aic_obj *aic;
10ef9ab4
SP
2135 int i, rc;
2136
92bf14ab
SP
2137 adapter->num_evt_qs = min_t(u16, num_irqs(adapter),
2138 adapter->cfg_num_qs);
10ef9ab4
SP
2139
2140 for_all_evt_queues(adapter, eqo, i) {
68d7bdcb
SP
2141 netif_napi_add(adapter->netdev, &eqo->napi, be_poll,
2142 BE_NAPI_WEIGHT);
6384a4d0 2143 napi_hash_add(&eqo->napi);
2632bafd 2144 aic = &adapter->aic_obj[i];
10ef9ab4 2145 eqo->adapter = adapter;
10ef9ab4 2146 eqo->idx = i;
2632bafd
SP
2147 aic->max_eqd = BE_MAX_EQD;
2148 aic->enable = true;
10ef9ab4
SP
2149
2150 eq = &eqo->q;
2151 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
748b539a 2152 sizeof(struct be_eq_entry));
10ef9ab4
SP
2153 if (rc)
2154 return rc;
2155
f2f781a7 2156 rc = be_cmd_eq_create(adapter, eqo);
10ef9ab4
SP
2157 if (rc)
2158 return rc;
2159 }
1cfafab9 2160 return 0;
10ef9ab4
SP
2161}
2162
5fb379ee
SP
2163static void be_mcc_queues_destroy(struct be_adapter *adapter)
2164{
2165 struct be_queue_info *q;
5fb379ee 2166
8788fdc2 2167 q = &adapter->mcc_obj.q;
5fb379ee 2168 if (q->created)
8788fdc2 2169 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
5fb379ee
SP
2170 be_queue_free(adapter, q);
2171
8788fdc2 2172 q = &adapter->mcc_obj.cq;
5fb379ee 2173 if (q->created)
8788fdc2 2174 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
5fb379ee
SP
2175 be_queue_free(adapter, q);
2176}
2177
2178/* Must be called only after TX qs are created as MCC shares TX EQ */
2179static int be_mcc_queues_create(struct be_adapter *adapter)
2180{
2181 struct be_queue_info *q, *cq;
5fb379ee 2182
8788fdc2 2183 cq = &adapter->mcc_obj.cq;
5fb379ee 2184 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
748b539a 2185 sizeof(struct be_mcc_compl)))
5fb379ee
SP
2186 goto err;
2187
10ef9ab4
SP
2188 /* Use the default EQ for MCC completions */
2189 if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0))
5fb379ee
SP
2190 goto mcc_cq_free;
2191
8788fdc2 2192 q = &adapter->mcc_obj.q;
5fb379ee
SP
2193 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
2194 goto mcc_cq_destroy;
2195
8788fdc2 2196 if (be_cmd_mccq_create(adapter, q, cq))
5fb379ee
SP
2197 goto mcc_q_free;
2198
2199 return 0;
2200
2201mcc_q_free:
2202 be_queue_free(adapter, q);
2203mcc_cq_destroy:
8788fdc2 2204 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
5fb379ee
SP
2205mcc_cq_free:
2206 be_queue_free(adapter, cq);
2207err:
2208 return -1;
2209}
2210
6b7c5b94
SP
2211static void be_tx_queues_destroy(struct be_adapter *adapter)
2212{
2213 struct be_queue_info *q;
3c8def97
SP
2214 struct be_tx_obj *txo;
2215 u8 i;
6b7c5b94 2216
3c8def97
SP
2217 for_all_tx_queues(adapter, txo, i) {
2218 q = &txo->q;
2219 if (q->created)
2220 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
2221 be_queue_free(adapter, q);
6b7c5b94 2222
3c8def97
SP
2223 q = &txo->cq;
2224 if (q->created)
2225 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2226 be_queue_free(adapter, q);
2227 }
6b7c5b94
SP
2228}
2229
7707133c 2230static int be_tx_qs_create(struct be_adapter *adapter)
6b7c5b94 2231{
10ef9ab4 2232 struct be_queue_info *cq, *eq;
3c8def97 2233 struct be_tx_obj *txo;
92bf14ab 2234 int status, i;
6b7c5b94 2235
92bf14ab 2236 adapter->num_tx_qs = min(adapter->num_evt_qs, be_max_txqs(adapter));
dafc0fe3 2237
10ef9ab4
SP
2238 for_all_tx_queues(adapter, txo, i) {
2239 cq = &txo->cq;
2240 status = be_queue_alloc(adapter, cq, TX_CQ_LEN,
2241 sizeof(struct be_eth_tx_compl));
2242 if (status)
2243 return status;
3c8def97 2244
827da44c
JS
2245 u64_stats_init(&txo->stats.sync);
2246 u64_stats_init(&txo->stats.sync_compl);
2247
10ef9ab4
SP
2248 /* If num_evt_qs is less than num_tx_qs, then more than
2249 * one txq share an eq
2250 */
2251 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2252 status = be_cmd_cq_create(adapter, cq, eq, false, 3);
2253 if (status)
2254 return status;
6b7c5b94 2255
10ef9ab4
SP
2256 status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
2257 sizeof(struct be_eth_wrb));
2258 if (status)
2259 return status;
6b7c5b94 2260
94d73aaa 2261 status = be_cmd_txq_create(adapter, txo);
10ef9ab4
SP
2262 if (status)
2263 return status;
3c8def97 2264 }
6b7c5b94 2265
d379142b
SP
2266 dev_info(&adapter->pdev->dev, "created %d TX queue(s)\n",
2267 adapter->num_tx_qs);
10ef9ab4 2268 return 0;
6b7c5b94
SP
2269}
2270
10ef9ab4 2271static void be_rx_cqs_destroy(struct be_adapter *adapter)
6b7c5b94
SP
2272{
2273 struct be_queue_info *q;
3abcdeda
SP
2274 struct be_rx_obj *rxo;
2275 int i;
2276
2277 for_all_rx_queues(adapter, rxo, i) {
3abcdeda
SP
2278 q = &rxo->cq;
2279 if (q->created)
2280 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2281 be_queue_free(adapter, q);
ac6a0c4a
SP
2282 }
2283}
2284
10ef9ab4 2285static int be_rx_cqs_create(struct be_adapter *adapter)
6b7c5b94 2286{
10ef9ab4 2287 struct be_queue_info *eq, *cq;
3abcdeda
SP
2288 struct be_rx_obj *rxo;
2289 int rc, i;
6b7c5b94 2290
92bf14ab
SP
2291 /* We can create as many RSS rings as there are EQs. */
2292 adapter->num_rx_qs = adapter->num_evt_qs;
2293
2294 /* We'll use RSS only if atleast 2 RSS rings are supported.
2295 * When RSS is used, we'll need a default RXQ for non-IP traffic.
10ef9ab4 2296 */
92bf14ab
SP
2297 if (adapter->num_rx_qs > 1)
2298 adapter->num_rx_qs++;
2299
6b7c5b94 2300 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
3abcdeda
SP
2301 for_all_rx_queues(adapter, rxo, i) {
2302 rxo->adapter = adapter;
3abcdeda
SP
2303 cq = &rxo->cq;
2304 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
748b539a 2305 sizeof(struct be_eth_rx_compl));
3abcdeda 2306 if (rc)
10ef9ab4 2307 return rc;
3abcdeda 2308
827da44c 2309 u64_stats_init(&rxo->stats.sync);
10ef9ab4
SP
2310 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2311 rc = be_cmd_cq_create(adapter, cq, eq, false, 3);
3abcdeda 2312 if (rc)
10ef9ab4 2313 return rc;
3abcdeda 2314 }
6b7c5b94 2315
d379142b
SP
2316 dev_info(&adapter->pdev->dev,
2317 "created %d RSS queue(s) and 1 default RX queue\n",
2318 adapter->num_rx_qs - 1);
10ef9ab4 2319 return 0;
b628bde2
SP
2320}
2321
6b7c5b94
SP
2322static irqreturn_t be_intx(int irq, void *dev)
2323{
e49cc34f
SP
2324 struct be_eq_obj *eqo = dev;
2325 struct be_adapter *adapter = eqo->adapter;
2326 int num_evts = 0;
6b7c5b94 2327
d0b9cec3
SP
2328 /* IRQ is not expected when NAPI is scheduled as the EQ
2329 * will not be armed.
2330 * But, this can happen on Lancer INTx where it takes
2331 * a while to de-assert INTx or in BE2 where occasionaly
2332 * an interrupt may be raised even when EQ is unarmed.
2333 * If NAPI is already scheduled, then counting & notifying
2334 * events will orphan them.
e49cc34f 2335 */
d0b9cec3 2336 if (napi_schedule_prep(&eqo->napi)) {
e49cc34f 2337 num_evts = events_get(eqo);
d0b9cec3
SP
2338 __napi_schedule(&eqo->napi);
2339 if (num_evts)
2340 eqo->spurious_intr = 0;
2341 }
2342 be_eq_notify(adapter, eqo->q.id, false, true, num_evts);
e49cc34f 2343
d0b9cec3
SP
2344 /* Return IRQ_HANDLED only for the the first spurious intr
2345 * after a valid intr to stop the kernel from branding
2346 * this irq as a bad one!
e49cc34f 2347 */
d0b9cec3
SP
2348 if (num_evts || eqo->spurious_intr++ == 0)
2349 return IRQ_HANDLED;
2350 else
2351 return IRQ_NONE;
6b7c5b94
SP
2352}
2353
10ef9ab4 2354static irqreturn_t be_msix(int irq, void *dev)
6b7c5b94 2355{
10ef9ab4 2356 struct be_eq_obj *eqo = dev;
6b7c5b94 2357
0b545a62
SP
2358 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
2359 napi_schedule(&eqo->napi);
6b7c5b94
SP
2360 return IRQ_HANDLED;
2361}
2362
2e588f84 2363static inline bool do_gro(struct be_rx_compl_info *rxcp)
6b7c5b94 2364{
e38b1706 2365 return (rxcp->tcpf && !rxcp->err && rxcp->l4_csum) ? true : false;
6b7c5b94
SP
2366}
2367
10ef9ab4 2368static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
748b539a 2369 int budget, int polling)
6b7c5b94 2370{
3abcdeda
SP
2371 struct be_adapter *adapter = rxo->adapter;
2372 struct be_queue_info *rx_cq = &rxo->cq;
2e588f84 2373 struct be_rx_compl_info *rxcp;
6b7c5b94
SP
2374 u32 work_done;
2375
2376 for (work_done = 0; work_done < budget; work_done++) {
3abcdeda 2377 rxcp = be_rx_compl_get(rxo);
6b7c5b94
SP
2378 if (!rxcp)
2379 break;
2380
12004ae9
SP
2381 /* Is it a flush compl that has no data */
2382 if (unlikely(rxcp->num_rcvd == 0))
2383 goto loop_continue;
2384
2385 /* Discard compl with partial DMA Lancer B0 */
2386 if (unlikely(!rxcp->pkt_size)) {
10ef9ab4 2387 be_rx_compl_discard(rxo, rxcp);
12004ae9
SP
2388 goto loop_continue;
2389 }
2390
2391 /* On BE drop pkts that arrive due to imperfect filtering in
2392 * promiscuous mode on some skews
2393 */
2394 if (unlikely(rxcp->port != adapter->port_num &&
748b539a 2395 !lancer_chip(adapter))) {
10ef9ab4 2396 be_rx_compl_discard(rxo, rxcp);
12004ae9 2397 goto loop_continue;
64642811 2398 }
009dd872 2399
6384a4d0
SP
2400 /* Don't do gro when we're busy_polling */
2401 if (do_gro(rxcp) && polling != BUSY_POLLING)
10ef9ab4 2402 be_rx_compl_process_gro(rxo, napi, rxcp);
12004ae9 2403 else
6384a4d0
SP
2404 be_rx_compl_process(rxo, napi, rxcp);
2405
12004ae9 2406loop_continue:
2e588f84 2407 be_rx_stats_update(rxo, rxcp);
6b7c5b94
SP
2408 }
2409
10ef9ab4
SP
2410 if (work_done) {
2411 be_cq_notify(adapter, rx_cq->id, true, work_done);
9372cacb 2412
6384a4d0
SP
2413 /* When an rx-obj gets into post_starved state, just
2414 * let be_worker do the posting.
2415 */
2416 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM &&
2417 !rxo->rx_post_starved)
10ef9ab4 2418 be_post_rx_frags(rxo, GFP_ATOMIC);
6b7c5b94 2419 }
10ef9ab4 2420
6b7c5b94
SP
2421 return work_done;
2422}
2423
512bb8a2
KA
2424static inline void be_update_tx_err(struct be_tx_obj *txo, u32 status)
2425{
2426 switch (status) {
2427 case BE_TX_COMP_HDR_PARSE_ERR:
2428 tx_stats(txo)->tx_hdr_parse_err++;
2429 break;
2430 case BE_TX_COMP_NDMA_ERR:
2431 tx_stats(txo)->tx_dma_err++;
2432 break;
2433 case BE_TX_COMP_ACL_ERR:
2434 tx_stats(txo)->tx_spoof_check_err++;
2435 break;
2436 }
2437}
2438
2439static inline void lancer_update_tx_err(struct be_tx_obj *txo, u32 status)
2440{
2441 switch (status) {
2442 case LANCER_TX_COMP_LSO_ERR:
2443 tx_stats(txo)->tx_tso_err++;
2444 break;
2445 case LANCER_TX_COMP_HSW_DROP_MAC_ERR:
2446 case LANCER_TX_COMP_HSW_DROP_VLAN_ERR:
2447 tx_stats(txo)->tx_spoof_check_err++;
2448 break;
2449 case LANCER_TX_COMP_QINQ_ERR:
2450 tx_stats(txo)->tx_qinq_err++;
2451 break;
2452 case LANCER_TX_COMP_PARITY_ERR:
2453 tx_stats(txo)->tx_internal_parity_err++;
2454 break;
2455 case LANCER_TX_COMP_DMA_ERR:
2456 tx_stats(txo)->tx_dma_err++;
2457 break;
2458 }
2459}
2460
c8f64615
SP
2461static void be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
2462 int idx)
6b7c5b94 2463{
6b7c5b94 2464 struct be_eth_tx_compl *txcp;
c8f64615 2465 int num_wrbs = 0, work_done = 0;
512bb8a2 2466 u32 compl_status;
c8f64615
SP
2467 u16 last_idx;
2468
2469 while ((txcp = be_tx_compl_get(&txo->cq))) {
2470 last_idx = GET_TX_COMPL_BITS(wrb_index, txcp);
2471 num_wrbs += be_tx_compl_process(adapter, txo, last_idx);
2472 work_done++;
3c8def97 2473
512bb8a2
KA
2474 compl_status = GET_TX_COMPL_BITS(status, txcp);
2475 if (compl_status) {
2476 if (lancer_chip(adapter))
2477 lancer_update_tx_err(txo, compl_status);
2478 else
2479 be_update_tx_err(txo, compl_status);
2480 }
10ef9ab4 2481 }
6b7c5b94 2482
10ef9ab4
SP
2483 if (work_done) {
2484 be_cq_notify(adapter, txo->cq.id, true, work_done);
2485 atomic_sub(num_wrbs, &txo->q.used);
3c8def97 2486
10ef9ab4
SP
2487 /* As Tx wrbs have been freed up, wake up netdev queue
2488 * if it was stopped due to lack of tx wrbs. */
2489 if (__netif_subqueue_stopped(adapter->netdev, idx) &&
748b539a 2490 atomic_read(&txo->q.used) < txo->q.len / 2) {
10ef9ab4 2491 netif_wake_subqueue(adapter->netdev, idx);
3c8def97 2492 }
10ef9ab4
SP
2493
2494 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
2495 tx_stats(txo)->tx_compl += work_done;
2496 u64_stats_update_end(&tx_stats(txo)->sync_compl);
6b7c5b94 2497 }
10ef9ab4 2498}
6b7c5b94 2499
68d7bdcb 2500int be_poll(struct napi_struct *napi, int budget)
10ef9ab4
SP
2501{
2502 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2503 struct be_adapter *adapter = eqo->adapter;
0b545a62 2504 int max_work = 0, work, i, num_evts;
6384a4d0 2505 struct be_rx_obj *rxo;
a4906ea0 2506 struct be_tx_obj *txo;
f31e50a8 2507
0b545a62
SP
2508 num_evts = events_get(eqo);
2509
a4906ea0
SP
2510 for_all_tx_queues_on_eq(adapter, eqo, txo, i)
2511 be_process_tx(adapter, txo, i);
f31e50a8 2512
6384a4d0
SP
2513 if (be_lock_napi(eqo)) {
2514 /* This loop will iterate twice for EQ0 in which
2515 * completions of the last RXQ (default one) are also processed
2516 * For other EQs the loop iterates only once
2517 */
2518 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
2519 work = be_process_rx(rxo, napi, budget, NAPI_POLLING);
2520 max_work = max(work, max_work);
2521 }
2522 be_unlock_napi(eqo);
2523 } else {
2524 max_work = budget;
10ef9ab4 2525 }
6b7c5b94 2526
10ef9ab4
SP
2527 if (is_mcc_eqo(eqo))
2528 be_process_mcc(adapter);
93c86700 2529
10ef9ab4
SP
2530 if (max_work < budget) {
2531 napi_complete(napi);
0b545a62 2532 be_eq_notify(adapter, eqo->q.id, true, false, num_evts);
10ef9ab4
SP
2533 } else {
2534 /* As we'll continue in polling mode, count and clear events */
0b545a62 2535 be_eq_notify(adapter, eqo->q.id, false, false, num_evts);
93c86700 2536 }
10ef9ab4 2537 return max_work;
6b7c5b94
SP
2538}
2539
6384a4d0
SP
2540#ifdef CONFIG_NET_RX_BUSY_POLL
2541static int be_busy_poll(struct napi_struct *napi)
2542{
2543 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2544 struct be_adapter *adapter = eqo->adapter;
2545 struct be_rx_obj *rxo;
2546 int i, work = 0;
2547
2548 if (!be_lock_busy_poll(eqo))
2549 return LL_FLUSH_BUSY;
2550
2551 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
2552 work = be_process_rx(rxo, napi, 4, BUSY_POLLING);
2553 if (work)
2554 break;
2555 }
2556
2557 be_unlock_busy_poll(eqo);
2558 return work;
2559}
2560#endif
2561
f67ef7ba 2562void be_detect_error(struct be_adapter *adapter)
7c185276 2563{
e1cfb67a
PR
2564 u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
2565 u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
7c185276 2566 u32 i;
eb0eecc1
SK
2567 bool error_detected = false;
2568 struct device *dev = &adapter->pdev->dev;
2569 struct net_device *netdev = adapter->netdev;
7c185276 2570
d23e946c 2571 if (be_hw_error(adapter))
72f02485
SP
2572 return;
2573
e1cfb67a
PR
2574 if (lancer_chip(adapter)) {
2575 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2576 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2577 sliport_err1 = ioread32(adapter->db +
748b539a 2578 SLIPORT_ERROR1_OFFSET);
e1cfb67a 2579 sliport_err2 = ioread32(adapter->db +
748b539a 2580 SLIPORT_ERROR2_OFFSET);
eb0eecc1
SK
2581 adapter->hw_error = true;
2582 /* Do not log error messages if its a FW reset */
2583 if (sliport_err1 == SLIPORT_ERROR_FW_RESET1 &&
2584 sliport_err2 == SLIPORT_ERROR_FW_RESET2) {
2585 dev_info(dev, "Firmware update in progress\n");
2586 } else {
2587 error_detected = true;
2588 dev_err(dev, "Error detected in the card\n");
2589 dev_err(dev, "ERR: sliport status 0x%x\n",
2590 sliport_status);
2591 dev_err(dev, "ERR: sliport error1 0x%x\n",
2592 sliport_err1);
2593 dev_err(dev, "ERR: sliport error2 0x%x\n",
2594 sliport_err2);
2595 }
e1cfb67a
PR
2596 }
2597 } else {
2598 pci_read_config_dword(adapter->pdev,
748b539a 2599 PCICFG_UE_STATUS_LOW, &ue_lo);
e1cfb67a 2600 pci_read_config_dword(adapter->pdev,
748b539a 2601 PCICFG_UE_STATUS_HIGH, &ue_hi);
e1cfb67a 2602 pci_read_config_dword(adapter->pdev,
748b539a 2603 PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask);
e1cfb67a 2604 pci_read_config_dword(adapter->pdev,
748b539a 2605 PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
e1cfb67a 2606
f67ef7ba
PR
2607 ue_lo = (ue_lo & ~ue_lo_mask);
2608 ue_hi = (ue_hi & ~ue_hi_mask);
7c185276 2609
eb0eecc1
SK
2610 /* On certain platforms BE hardware can indicate spurious UEs.
2611 * Allow HW to stop working completely in case of a real UE.
2612 * Hence not setting the hw_error for UE detection.
2613 */
f67ef7ba 2614
eb0eecc1
SK
2615 if (ue_lo || ue_hi) {
2616 error_detected = true;
2617 dev_err(dev,
2618 "Unrecoverable Error detected in the adapter");
2619 dev_err(dev, "Please reboot server to recover");
2620 if (skyhawk_chip(adapter))
2621 adapter->hw_error = true;
2622 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
2623 if (ue_lo & 1)
2624 dev_err(dev, "UE: %s bit set\n",
2625 ue_status_low_desc[i]);
2626 }
2627 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
2628 if (ue_hi & 1)
2629 dev_err(dev, "UE: %s bit set\n",
2630 ue_status_hi_desc[i]);
2631 }
7c185276
AK
2632 }
2633 }
eb0eecc1
SK
2634 if (error_detected)
2635 netif_carrier_off(netdev);
7c185276
AK
2636}
2637
8d56ff11
SP
2638static void be_msix_disable(struct be_adapter *adapter)
2639{
ac6a0c4a 2640 if (msix_enabled(adapter)) {
8d56ff11 2641 pci_disable_msix(adapter->pdev);
ac6a0c4a 2642 adapter->num_msix_vec = 0;
68d7bdcb 2643 adapter->num_msix_roce_vec = 0;
3abcdeda
SP
2644 }
2645}
2646
c2bba3df 2647static int be_msix_enable(struct be_adapter *adapter)
6b7c5b94 2648{
7dc4c064 2649 int i, num_vec;
d379142b 2650 struct device *dev = &adapter->pdev->dev;
6b7c5b94 2651
92bf14ab
SP
2652 /* If RoCE is supported, program the max number of NIC vectors that
2653 * may be configured via set-channels, along with vectors needed for
2654 * RoCe. Else, just program the number we'll use initially.
2655 */
2656 if (be_roce_supported(adapter))
2657 num_vec = min_t(int, 2 * be_max_eqs(adapter),
2658 2 * num_online_cpus());
2659 else
2660 num_vec = adapter->cfg_num_qs;
3abcdeda 2661
ac6a0c4a 2662 for (i = 0; i < num_vec; i++)
6b7c5b94
SP
2663 adapter->msix_entries[i].entry = i;
2664
7dc4c064
AG
2665 num_vec = pci_enable_msix_range(adapter->pdev, adapter->msix_entries,
2666 MIN_MSIX_VECTORS, num_vec);
2667 if (num_vec < 0)
2668 goto fail;
92bf14ab 2669
92bf14ab
SP
2670 if (be_roce_supported(adapter) && num_vec > MIN_MSIX_VECTORS) {
2671 adapter->num_msix_roce_vec = num_vec / 2;
2672 dev_info(dev, "enabled %d MSI-x vector(s) for RoCE\n",
2673 adapter->num_msix_roce_vec);
2674 }
2675
2676 adapter->num_msix_vec = num_vec - adapter->num_msix_roce_vec;
2677
2678 dev_info(dev, "enabled %d MSI-x vector(s) for NIC\n",
2679 adapter->num_msix_vec);
c2bba3df 2680 return 0;
7dc4c064
AG
2681
2682fail:
2683 dev_warn(dev, "MSIx enable failed\n");
2684
2685 /* INTx is not supported in VFs, so fail probe if enable_msix fails */
2686 if (!be_physfn(adapter))
2687 return num_vec;
2688 return 0;
6b7c5b94
SP
2689}
2690
fe6d2a38 2691static inline int be_msix_vec_get(struct be_adapter *adapter,
748b539a 2692 struct be_eq_obj *eqo)
b628bde2 2693{
f2f781a7 2694 return adapter->msix_entries[eqo->msix_idx].vector;
b628bde2 2695}
6b7c5b94 2696
b628bde2
SP
2697static int be_msix_register(struct be_adapter *adapter)
2698{
10ef9ab4
SP
2699 struct net_device *netdev = adapter->netdev;
2700 struct be_eq_obj *eqo;
2701 int status, i, vec;
6b7c5b94 2702
10ef9ab4
SP
2703 for_all_evt_queues(adapter, eqo, i) {
2704 sprintf(eqo->desc, "%s-q%d", netdev->name, i);
2705 vec = be_msix_vec_get(adapter, eqo);
2706 status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
3abcdeda
SP
2707 if (status)
2708 goto err_msix;
2709 }
b628bde2 2710
6b7c5b94 2711 return 0;
3abcdeda 2712err_msix:
10ef9ab4
SP
2713 for (i--, eqo = &adapter->eq_obj[i]; i >= 0; i--, eqo--)
2714 free_irq(be_msix_vec_get(adapter, eqo), eqo);
2715 dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
748b539a 2716 status);
ac6a0c4a 2717 be_msix_disable(adapter);
6b7c5b94
SP
2718 return status;
2719}
2720
2721static int be_irq_register(struct be_adapter *adapter)
2722{
2723 struct net_device *netdev = adapter->netdev;
2724 int status;
2725
ac6a0c4a 2726 if (msix_enabled(adapter)) {
6b7c5b94
SP
2727 status = be_msix_register(adapter);
2728 if (status == 0)
2729 goto done;
ba343c77
SB
2730 /* INTx is not supported for VF */
2731 if (!be_physfn(adapter))
2732 return status;
6b7c5b94
SP
2733 }
2734
e49cc34f 2735 /* INTx: only the first EQ is used */
6b7c5b94
SP
2736 netdev->irq = adapter->pdev->irq;
2737 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
e49cc34f 2738 &adapter->eq_obj[0]);
6b7c5b94
SP
2739 if (status) {
2740 dev_err(&adapter->pdev->dev,
2741 "INTx request IRQ failed - err %d\n", status);
2742 return status;
2743 }
2744done:
2745 adapter->isr_registered = true;
2746 return 0;
2747}
2748
2749static void be_irq_unregister(struct be_adapter *adapter)
2750{
2751 struct net_device *netdev = adapter->netdev;
10ef9ab4 2752 struct be_eq_obj *eqo;
3abcdeda 2753 int i;
6b7c5b94
SP
2754
2755 if (!adapter->isr_registered)
2756 return;
2757
2758 /* INTx */
ac6a0c4a 2759 if (!msix_enabled(adapter)) {
e49cc34f 2760 free_irq(netdev->irq, &adapter->eq_obj[0]);
6b7c5b94
SP
2761 goto done;
2762 }
2763
2764 /* MSIx */
10ef9ab4
SP
2765 for_all_evt_queues(adapter, eqo, i)
2766 free_irq(be_msix_vec_get(adapter, eqo), eqo);
3abcdeda 2767
6b7c5b94
SP
2768done:
2769 adapter->isr_registered = false;
6b7c5b94
SP
2770}
2771
10ef9ab4 2772static void be_rx_qs_destroy(struct be_adapter *adapter)
482c9e79
SP
2773{
2774 struct be_queue_info *q;
2775 struct be_rx_obj *rxo;
2776 int i;
2777
2778 for_all_rx_queues(adapter, rxo, i) {
2779 q = &rxo->q;
2780 if (q->created) {
2781 be_cmd_rxq_destroy(adapter, q);
10ef9ab4 2782 be_rx_cq_clean(rxo);
482c9e79 2783 }
10ef9ab4 2784 be_queue_free(adapter, q);
482c9e79
SP
2785 }
2786}
2787
889cd4b2
SP
2788static int be_close(struct net_device *netdev)
2789{
2790 struct be_adapter *adapter = netdev_priv(netdev);
10ef9ab4
SP
2791 struct be_eq_obj *eqo;
2792 int i;
889cd4b2 2793
e1ad8e33
KA
2794 /* This protection is needed as be_close() may be called even when the
2795 * adapter is in cleared state (after eeh perm failure)
2796 */
2797 if (!(adapter->flags & BE_FLAGS_SETUP_DONE))
2798 return 0;
2799
045508a8
PP
2800 be_roce_dev_close(adapter);
2801
dff345c5
IV
2802 if (adapter->flags & BE_FLAGS_NAPI_ENABLED) {
2803 for_all_evt_queues(adapter, eqo, i) {
04d3d624 2804 napi_disable(&eqo->napi);
6384a4d0
SP
2805 be_disable_busy_poll(eqo);
2806 }
71237b6f 2807 adapter->flags &= ~BE_FLAGS_NAPI_ENABLED;
04d3d624 2808 }
a323d9bf
SP
2809
2810 be_async_mcc_disable(adapter);
2811
2812 /* Wait for all pending tx completions to arrive so that
2813 * all tx skbs are freed.
2814 */
fba87559 2815 netif_tx_disable(netdev);
6e1f9975 2816 be_tx_compl_clean(adapter);
a323d9bf
SP
2817
2818 be_rx_qs_destroy(adapter);
2819
d11a347d
AK
2820 for (i = 1; i < (adapter->uc_macs + 1); i++)
2821 be_cmd_pmac_del(adapter, adapter->if_handle,
2822 adapter->pmac_id[i], 0);
2823 adapter->uc_macs = 0;
2824
a323d9bf 2825 for_all_evt_queues(adapter, eqo, i) {
10ef9ab4
SP
2826 if (msix_enabled(adapter))
2827 synchronize_irq(be_msix_vec_get(adapter, eqo));
2828 else
2829 synchronize_irq(netdev->irq);
2830 be_eq_clean(eqo);
63fcb27f
PR
2831 }
2832
889cd4b2
SP
2833 be_irq_unregister(adapter);
2834
482c9e79
SP
2835 return 0;
2836}
2837
10ef9ab4 2838static int be_rx_qs_create(struct be_adapter *adapter)
482c9e79
SP
2839{
2840 struct be_rx_obj *rxo;
e9008ee9 2841 int rc, i, j;
e2557877
VD
2842 u8 rss_hkey[RSS_HASH_KEY_LEN];
2843 struct rss_info *rss = &adapter->rss_info;
482c9e79
SP
2844
2845 for_all_rx_queues(adapter, rxo, i) {
10ef9ab4
SP
2846 rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
2847 sizeof(struct be_eth_rx_d));
2848 if (rc)
2849 return rc;
2850 }
2851
2852 /* The FW would like the default RXQ to be created first */
2853 rxo = default_rxo(adapter);
2854 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id, rx_frag_size,
2855 adapter->if_handle, false, &rxo->rss_id);
2856 if (rc)
2857 return rc;
2858
2859 for_all_rss_queues(adapter, rxo, i) {
482c9e79 2860 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
10ef9ab4
SP
2861 rx_frag_size, adapter->if_handle,
2862 true, &rxo->rss_id);
482c9e79
SP
2863 if (rc)
2864 return rc;
2865 }
2866
2867 if (be_multi_rxq(adapter)) {
e2557877
VD
2868 for (j = 0; j < RSS_INDIR_TABLE_LEN;
2869 j += adapter->num_rx_qs - 1) {
e9008ee9 2870 for_all_rss_queues(adapter, rxo, i) {
e2557877 2871 if ((j + i) >= RSS_INDIR_TABLE_LEN)
e9008ee9 2872 break;
e2557877
VD
2873 rss->rsstable[j + i] = rxo->rss_id;
2874 rss->rss_queue[j + i] = i;
e9008ee9
PR
2875 }
2876 }
e2557877
VD
2877 rss->rss_flags = RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_IPV4 |
2878 RSS_ENABLE_TCP_IPV6 | RSS_ENABLE_IPV6;
594ad54a
SR
2879
2880 if (!BEx_chip(adapter))
e2557877
VD
2881 rss->rss_flags |= RSS_ENABLE_UDP_IPV4 |
2882 RSS_ENABLE_UDP_IPV6;
da1388d6
VV
2883 } else {
2884 /* Disable RSS, if only default RX Q is created */
e2557877 2885 rss->rss_flags = RSS_ENABLE_NONE;
da1388d6 2886 }
594ad54a 2887
e2557877 2888 get_random_bytes(rss_hkey, RSS_HASH_KEY_LEN);
748b539a 2889 rc = be_cmd_rss_config(adapter, rss->rsstable, rss->rss_flags,
e2557877 2890 128, rss_hkey);
da1388d6 2891 if (rc) {
e2557877 2892 rss->rss_flags = RSS_ENABLE_NONE;
da1388d6 2893 return rc;
482c9e79
SP
2894 }
2895
e2557877
VD
2896 memcpy(rss->rss_hkey, rss_hkey, RSS_HASH_KEY_LEN);
2897
482c9e79 2898 /* First time posting */
10ef9ab4 2899 for_all_rx_queues(adapter, rxo, i)
482c9e79 2900 be_post_rx_frags(rxo, GFP_KERNEL);
889cd4b2
SP
2901 return 0;
2902}
2903
6b7c5b94
SP
2904static int be_open(struct net_device *netdev)
2905{
2906 struct be_adapter *adapter = netdev_priv(netdev);
10ef9ab4 2907 struct be_eq_obj *eqo;
3abcdeda 2908 struct be_rx_obj *rxo;
10ef9ab4 2909 struct be_tx_obj *txo;
b236916a 2910 u8 link_status;
3abcdeda 2911 int status, i;
5fb379ee 2912
10ef9ab4 2913 status = be_rx_qs_create(adapter);
482c9e79
SP
2914 if (status)
2915 goto err;
2916
c2bba3df
SK
2917 status = be_irq_register(adapter);
2918 if (status)
2919 goto err;
5fb379ee 2920
10ef9ab4 2921 for_all_rx_queues(adapter, rxo, i)
3abcdeda 2922 be_cq_notify(adapter, rxo->cq.id, true, 0);
5fb379ee 2923
10ef9ab4
SP
2924 for_all_tx_queues(adapter, txo, i)
2925 be_cq_notify(adapter, txo->cq.id, true, 0);
2926
7a1e9b20
SP
2927 be_async_mcc_enable(adapter);
2928
10ef9ab4
SP
2929 for_all_evt_queues(adapter, eqo, i) {
2930 napi_enable(&eqo->napi);
6384a4d0 2931 be_enable_busy_poll(eqo);
4cad9f3b 2932 be_eq_notify(adapter, eqo->q.id, true, true, 0);
10ef9ab4 2933 }
04d3d624 2934 adapter->flags |= BE_FLAGS_NAPI_ENABLED;
10ef9ab4 2935
323ff71e 2936 status = be_cmd_link_status_query(adapter, NULL, &link_status, 0);
b236916a
AK
2937 if (!status)
2938 be_link_status_update(adapter, link_status);
2939
fba87559 2940 netif_tx_start_all_queues(netdev);
045508a8 2941 be_roce_dev_open(adapter);
c9c47142 2942
c5abe7c0 2943#ifdef CONFIG_BE2NET_VXLAN
c9c47142
SP
2944 if (skyhawk_chip(adapter))
2945 vxlan_get_rx_port(netdev);
c5abe7c0
SP
2946#endif
2947
889cd4b2
SP
2948 return 0;
2949err:
2950 be_close(adapter->netdev);
2951 return -EIO;
5fb379ee
SP
2952}
2953
71d8d1b5
AK
2954static int be_setup_wol(struct be_adapter *adapter, bool enable)
2955{
2956 struct be_dma_mem cmd;
2957 int status = 0;
2958 u8 mac[ETH_ALEN];
2959
2960 memset(mac, 0, ETH_ALEN);
2961
2962 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
ede23fa8
JP
2963 cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2964 GFP_KERNEL);
ddf1169f 2965 if (!cmd.va)
6b568689 2966 return -ENOMEM;
71d8d1b5
AK
2967
2968 if (enable) {
2969 status = pci_write_config_dword(adapter->pdev,
748b539a
SP
2970 PCICFG_PM_CONTROL_OFFSET,
2971 PCICFG_PM_CONTROL_MASK);
71d8d1b5
AK
2972 if (status) {
2973 dev_err(&adapter->pdev->dev,
2381a55c 2974 "Could not enable Wake-on-lan\n");
2b7bcebf
IV
2975 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2976 cmd.dma);
71d8d1b5
AK
2977 return status;
2978 }
2979 status = be_cmd_enable_magic_wol(adapter,
748b539a
SP
2980 adapter->netdev->dev_addr,
2981 &cmd);
71d8d1b5
AK
2982 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2983 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2984 } else {
2985 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2986 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2987 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2988 }
2989
2b7bcebf 2990 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
71d8d1b5
AK
2991 return status;
2992}
2993
6d87f5c3
AK
2994/*
2995 * Generate a seed MAC address from the PF MAC Address using jhash.
2996 * MAC Address for VFs are assigned incrementally starting from the seed.
2997 * These addresses are programmed in the ASIC by the PF and the VF driver
2998 * queries for the MAC address during its probe.
2999 */
4c876616 3000static int be_vf_eth_addr_config(struct be_adapter *adapter)
6d87f5c3 3001{
f9449ab7 3002 u32 vf;
3abcdeda 3003 int status = 0;
6d87f5c3 3004 u8 mac[ETH_ALEN];
11ac75ed 3005 struct be_vf_cfg *vf_cfg;
6d87f5c3
AK
3006
3007 be_vf_eth_addr_generate(adapter, mac);
3008
11ac75ed 3009 for_all_vfs(adapter, vf_cfg, vf) {
3175d8c2 3010 if (BEx_chip(adapter))
590c391d 3011 status = be_cmd_pmac_add(adapter, mac,
11ac75ed
SP
3012 vf_cfg->if_handle,
3013 &vf_cfg->pmac_id, vf + 1);
3175d8c2
SP
3014 else
3015 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
3016 vf + 1);
590c391d 3017
6d87f5c3
AK
3018 if (status)
3019 dev_err(&adapter->pdev->dev,
748b539a
SP
3020 "Mac address assignment failed for VF %d\n",
3021 vf);
6d87f5c3 3022 else
11ac75ed 3023 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
6d87f5c3
AK
3024
3025 mac[5] += 1;
3026 }
3027 return status;
3028}
3029
4c876616
SP
3030static int be_vfs_mac_query(struct be_adapter *adapter)
3031{
3032 int status, vf;
3033 u8 mac[ETH_ALEN];
3034 struct be_vf_cfg *vf_cfg;
4c876616
SP
3035
3036 for_all_vfs(adapter, vf_cfg, vf) {
b188f090
SR
3037 status = be_cmd_get_active_mac(adapter, vf_cfg->pmac_id,
3038 mac, vf_cfg->if_handle,
3039 false, vf+1);
4c876616
SP
3040 if (status)
3041 return status;
3042 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
3043 }
3044 return 0;
3045}
3046
f9449ab7 3047static void be_vf_clear(struct be_adapter *adapter)
6d87f5c3 3048{
11ac75ed 3049 struct be_vf_cfg *vf_cfg;
6d87f5c3
AK
3050 u32 vf;
3051
257a3feb 3052 if (pci_vfs_assigned(adapter->pdev)) {
4c876616
SP
3053 dev_warn(&adapter->pdev->dev,
3054 "VFs are assigned to VMs: not disabling VFs\n");
39f1d94d
SP
3055 goto done;
3056 }
3057
b4c1df93
SP
3058 pci_disable_sriov(adapter->pdev);
3059
11ac75ed 3060 for_all_vfs(adapter, vf_cfg, vf) {
3175d8c2 3061 if (BEx_chip(adapter))
11ac75ed
SP
3062 be_cmd_pmac_del(adapter, vf_cfg->if_handle,
3063 vf_cfg->pmac_id, vf + 1);
3175d8c2
SP
3064 else
3065 be_cmd_set_mac(adapter, NULL, vf_cfg->if_handle,
3066 vf + 1);
f9449ab7 3067
11ac75ed
SP
3068 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
3069 }
39f1d94d
SP
3070done:
3071 kfree(adapter->vf_cfg);
3072 adapter->num_vfs = 0;
f174c7ec 3073 adapter->flags &= ~BE_FLAGS_SRIOV_ENABLED;
6d87f5c3
AK
3074}
3075
7707133c
SP
3076static void be_clear_queues(struct be_adapter *adapter)
3077{
3078 be_mcc_queues_destroy(adapter);
3079 be_rx_cqs_destroy(adapter);
3080 be_tx_queues_destroy(adapter);
3081 be_evt_queues_destroy(adapter);
3082}
3083
68d7bdcb 3084static void be_cancel_worker(struct be_adapter *adapter)
a54769f5 3085{
191eb756
SP
3086 if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) {
3087 cancel_delayed_work_sync(&adapter->work);
3088 adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED;
3089 }
68d7bdcb
SP
3090}
3091
b05004ad 3092static void be_mac_clear(struct be_adapter *adapter)
68d7bdcb
SP
3093{
3094 int i;
3095
b05004ad
SK
3096 if (adapter->pmac_id) {
3097 for (i = 0; i < (adapter->uc_macs + 1); i++)
3098 be_cmd_pmac_del(adapter, adapter->if_handle,
3099 adapter->pmac_id[i], 0);
3100 adapter->uc_macs = 0;
3101
3102 kfree(adapter->pmac_id);
3103 adapter->pmac_id = NULL;
3104 }
3105}
3106
c5abe7c0 3107#ifdef CONFIG_BE2NET_VXLAN
c9c47142
SP
3108static void be_disable_vxlan_offloads(struct be_adapter *adapter)
3109{
3110 if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS)
3111 be_cmd_manage_iface(adapter, adapter->if_handle,
3112 OP_CONVERT_TUNNEL_TO_NORMAL);
3113
3114 if (adapter->vxlan_port)
3115 be_cmd_set_vxlan_port(adapter, 0);
3116
3117 adapter->flags &= ~BE_FLAGS_VXLAN_OFFLOADS;
3118 adapter->vxlan_port = 0;
3119}
c5abe7c0 3120#endif
c9c47142 3121
b05004ad
SK
3122static int be_clear(struct be_adapter *adapter)
3123{
68d7bdcb 3124 be_cancel_worker(adapter);
191eb756 3125
11ac75ed 3126 if (sriov_enabled(adapter))
f9449ab7
SP
3127 be_vf_clear(adapter);
3128
bec84e6b
VV
3129 /* Re-configure FW to distribute resources evenly across max-supported
3130 * number of VFs, only when VFs are not already enabled.
3131 */
3132 if (be_physfn(adapter) && !pci_vfs_assigned(adapter->pdev))
3133 be_cmd_set_sriov_config(adapter, adapter->pool_res,
3134 pci_sriov_get_totalvfs(adapter->pdev));
3135
c5abe7c0 3136#ifdef CONFIG_BE2NET_VXLAN
c9c47142 3137 be_disable_vxlan_offloads(adapter);
c5abe7c0 3138#endif
2d17f403 3139 /* delete the primary mac along with the uc-mac list */
b05004ad 3140 be_mac_clear(adapter);
fbc13f01 3141
f9449ab7 3142 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
a54769f5 3143
7707133c 3144 be_clear_queues(adapter);
a54769f5 3145
10ef9ab4 3146 be_msix_disable(adapter);
e1ad8e33 3147 adapter->flags &= ~BE_FLAGS_SETUP_DONE;
a54769f5
SP
3148 return 0;
3149}
3150
4c876616 3151static int be_vfs_if_create(struct be_adapter *adapter)
abb93951 3152{
92bf14ab 3153 struct be_resources res = {0};
4c876616
SP
3154 struct be_vf_cfg *vf_cfg;
3155 u32 cap_flags, en_flags, vf;
922bbe88 3156 int status = 0;
abb93951 3157
4c876616
SP
3158 cap_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
3159 BE_IF_FLAGS_MULTICAST;
abb93951 3160
4c876616 3161 for_all_vfs(adapter, vf_cfg, vf) {
92bf14ab
SP
3162 if (!BE3_chip(adapter)) {
3163 status = be_cmd_get_profile_config(adapter, &res,
3164 vf + 1);
3165 if (!status)
3166 cap_flags = res.if_cap_flags;
3167 }
4c876616
SP
3168
3169 /* If a FW profile exists, then cap_flags are updated */
3170 en_flags = cap_flags & (BE_IF_FLAGS_UNTAGGED |
748b539a
SP
3171 BE_IF_FLAGS_BROADCAST |
3172 BE_IF_FLAGS_MULTICAST);
3173 status =
3174 be_cmd_if_create(adapter, cap_flags, en_flags,
3175 &vf_cfg->if_handle, vf + 1);
4c876616
SP
3176 if (status)
3177 goto err;
3178 }
3179err:
3180 return status;
abb93951
PR
3181}
3182
39f1d94d 3183static int be_vf_setup_init(struct be_adapter *adapter)
30128031 3184{
11ac75ed 3185 struct be_vf_cfg *vf_cfg;
30128031
SP
3186 int vf;
3187
39f1d94d
SP
3188 adapter->vf_cfg = kcalloc(adapter->num_vfs, sizeof(*vf_cfg),
3189 GFP_KERNEL);
3190 if (!adapter->vf_cfg)
3191 return -ENOMEM;
3192
11ac75ed
SP
3193 for_all_vfs(adapter, vf_cfg, vf) {
3194 vf_cfg->if_handle = -1;
3195 vf_cfg->pmac_id = -1;
30128031 3196 }
39f1d94d 3197 return 0;
30128031
SP
3198}
3199
f9449ab7
SP
3200static int be_vf_setup(struct be_adapter *adapter)
3201{
c502224e 3202 struct device *dev = &adapter->pdev->dev;
11ac75ed 3203 struct be_vf_cfg *vf_cfg;
4c876616 3204 int status, old_vfs, vf;
04a06028 3205 u32 privileges;
39f1d94d 3206
257a3feb 3207 old_vfs = pci_num_vf(adapter->pdev);
39f1d94d
SP
3208
3209 status = be_vf_setup_init(adapter);
3210 if (status)
3211 goto err;
30128031 3212
4c876616
SP
3213 if (old_vfs) {
3214 for_all_vfs(adapter, vf_cfg, vf) {
3215 status = be_cmd_get_if_id(adapter, vf_cfg, vf);
3216 if (status)
3217 goto err;
3218 }
f9449ab7 3219
4c876616
SP
3220 status = be_vfs_mac_query(adapter);
3221 if (status)
3222 goto err;
3223 } else {
bec84e6b
VV
3224 status = be_vfs_if_create(adapter);
3225 if (status)
3226 goto err;
3227
39f1d94d
SP
3228 status = be_vf_eth_addr_config(adapter);
3229 if (status)
3230 goto err;
3231 }
f9449ab7 3232
11ac75ed 3233 for_all_vfs(adapter, vf_cfg, vf) {
04a06028
SP
3234 /* Allow VFs to programs MAC/VLAN filters */
3235 status = be_cmd_get_fn_privileges(adapter, &privileges, vf + 1);
3236 if (!status && !(privileges & BE_PRIV_FILTMGMT)) {
3237 status = be_cmd_set_fn_privileges(adapter,
3238 privileges |
3239 BE_PRIV_FILTMGMT,
3240 vf + 1);
3241 if (!status)
3242 dev_info(dev, "VF%d has FILTMGMT privilege\n",
3243 vf);
3244 }
3245
0f77ba73
RN
3246 /* Allow full available bandwidth */
3247 if (!old_vfs)
3248 be_cmd_config_qos(adapter, 0, 0, vf + 1);
f1f3ee1b 3249
bdce2ad7 3250 if (!old_vfs) {
0599863d 3251 be_cmd_enable_vf(adapter, vf + 1);
bdce2ad7
SR
3252 be_cmd_set_logical_link_config(adapter,
3253 IFLA_VF_LINK_STATE_AUTO,
3254 vf+1);
3255 }
f9449ab7 3256 }
b4c1df93
SP
3257
3258 if (!old_vfs) {
3259 status = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
3260 if (status) {
3261 dev_err(dev, "SRIOV enable failed\n");
3262 adapter->num_vfs = 0;
3263 goto err;
3264 }
3265 }
f174c7ec
VV
3266
3267 adapter->flags |= BE_FLAGS_SRIOV_ENABLED;
f9449ab7
SP
3268 return 0;
3269err:
4c876616
SP
3270 dev_err(dev, "VF setup failed\n");
3271 be_vf_clear(adapter);
f9449ab7
SP
3272 return status;
3273}
3274
f93f160b
VV
3275/* Converting function_mode bits on BE3 to SH mc_type enums */
3276
3277static u8 be_convert_mc_type(u32 function_mode)
3278{
66064dbc 3279 if (function_mode & VNIC_MODE && function_mode & QNQ_MODE)
f93f160b 3280 return vNIC1;
66064dbc 3281 else if (function_mode & QNQ_MODE)
f93f160b
VV
3282 return FLEX10;
3283 else if (function_mode & VNIC_MODE)
3284 return vNIC2;
3285 else if (function_mode & UMC_ENABLED)
3286 return UMC;
3287 else
3288 return MC_NONE;
3289}
3290
92bf14ab
SP
3291/* On BE2/BE3 FW does not suggest the supported limits */
3292static void BEx_get_resources(struct be_adapter *adapter,
3293 struct be_resources *res)
3294{
bec84e6b 3295 bool use_sriov = adapter->num_vfs ? 1 : 0;
92bf14ab
SP
3296
3297 if (be_physfn(adapter))
3298 res->max_uc_mac = BE_UC_PMAC_COUNT;
3299 else
3300 res->max_uc_mac = BE_VF_UC_PMAC_COUNT;
3301
f93f160b
VV
3302 adapter->mc_type = be_convert_mc_type(adapter->function_mode);
3303
3304 if (be_is_mc(adapter)) {
3305 /* Assuming that there are 4 channels per port,
3306 * when multi-channel is enabled
3307 */
3308 if (be_is_qnq_mode(adapter))
3309 res->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
3310 else
3311 /* In a non-qnq multichannel mode, the pvid
3312 * takes up one vlan entry
3313 */
3314 res->max_vlans = (BE_NUM_VLANS_SUPPORTED / 4) - 1;
3315 } else {
92bf14ab 3316 res->max_vlans = BE_NUM_VLANS_SUPPORTED;
f93f160b
VV
3317 }
3318
92bf14ab
SP
3319 res->max_mcast_mac = BE_MAX_MC;
3320
a5243dab
VV
3321 /* 1) For BE3 1Gb ports, FW does not support multiple TXQs
3322 * 2) Create multiple TX rings on a BE3-R multi-channel interface
3323 * *only* if it is RSS-capable.
3324 */
3325 if (BE2_chip(adapter) || use_sriov || (adapter->port_num > 1) ||
3326 !be_physfn(adapter) || (be_is_mc(adapter) &&
3327 !(adapter->function_caps & BE_FUNCTION_CAPS_RSS)))
92bf14ab
SP
3328 res->max_tx_qs = 1;
3329 else
3330 res->max_tx_qs = BE3_MAX_TX_QS;
3331
3332 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
3333 !use_sriov && be_physfn(adapter))
3334 res->max_rss_qs = (adapter->be3_native) ?
3335 BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
3336 res->max_rx_qs = res->max_rss_qs + 1;
3337
e3dc867c 3338 if (be_physfn(adapter))
d3518e21 3339 res->max_evt_qs = (be_max_vfs(adapter) > 0) ?
e3dc867c
SR
3340 BE3_SRIOV_MAX_EVT_QS : BE3_MAX_EVT_QS;
3341 else
3342 res->max_evt_qs = 1;
92bf14ab
SP
3343
3344 res->if_cap_flags = BE_IF_CAP_FLAGS_WANT;
3345 if (!(adapter->function_caps & BE_FUNCTION_CAPS_RSS))
3346 res->if_cap_flags &= ~BE_IF_FLAGS_RSS;
3347}
3348
30128031
SP
3349static void be_setup_init(struct be_adapter *adapter)
3350{
3351 adapter->vlan_prio_bmap = 0xff;
42f11cf2 3352 adapter->phy.link_speed = -1;
30128031
SP
3353 adapter->if_handle = -1;
3354 adapter->be3_native = false;
3355 adapter->promiscuous = false;
f25b119c
PR
3356 if (be_physfn(adapter))
3357 adapter->cmd_privileges = MAX_PRIVILEGES;
3358 else
3359 adapter->cmd_privileges = MIN_PRIVILEGES;
30128031
SP
3360}
3361
bec84e6b
VV
3362static int be_get_sriov_config(struct be_adapter *adapter)
3363{
3364 struct device *dev = &adapter->pdev->dev;
3365 struct be_resources res = {0};
d3d18312 3366 int max_vfs, old_vfs;
bec84e6b
VV
3367
3368 /* Some old versions of BE3 FW don't report max_vfs value */
d3d18312
SP
3369 be_cmd_get_profile_config(adapter, &res, 0);
3370
bec84e6b
VV
3371 if (BE3_chip(adapter) && !res.max_vfs) {
3372 max_vfs = pci_sriov_get_totalvfs(adapter->pdev);
3373 res.max_vfs = max_vfs > 0 ? min(MAX_VFS, max_vfs) : 0;
3374 }
3375
d3d18312 3376 adapter->pool_res = res;
bec84e6b
VV
3377
3378 if (!be_max_vfs(adapter)) {
3379 if (num_vfs)
3380 dev_warn(dev, "device doesn't support SRIOV\n");
3381 adapter->num_vfs = 0;
3382 return 0;
3383 }
3384
d3d18312
SP
3385 pci_sriov_set_totalvfs(adapter->pdev, be_max_vfs(adapter));
3386
bec84e6b
VV
3387 /* validate num_vfs module param */
3388 old_vfs = pci_num_vf(adapter->pdev);
3389 if (old_vfs) {
3390 dev_info(dev, "%d VFs are already enabled\n", old_vfs);
3391 if (old_vfs != num_vfs)
3392 dev_warn(dev, "Ignoring num_vfs=%d setting\n", num_vfs);
3393 adapter->num_vfs = old_vfs;
3394 } else {
3395 if (num_vfs > be_max_vfs(adapter)) {
3396 dev_info(dev, "Resources unavailable to init %d VFs\n",
3397 num_vfs);
3398 dev_info(dev, "Limiting to %d VFs\n",
3399 be_max_vfs(adapter));
3400 }
3401 adapter->num_vfs = min_t(u16, num_vfs, be_max_vfs(adapter));
3402 }
3403
3404 return 0;
3405}
3406
92bf14ab 3407static int be_get_resources(struct be_adapter *adapter)
abb93951 3408{
92bf14ab
SP
3409 struct device *dev = &adapter->pdev->dev;
3410 struct be_resources res = {0};
3411 int status;
abb93951 3412
92bf14ab
SP
3413 if (BEx_chip(adapter)) {
3414 BEx_get_resources(adapter, &res);
3415 adapter->res = res;
abb93951
PR
3416 }
3417
92bf14ab
SP
3418 /* For Lancer, SH etc read per-function resource limits from FW.
3419 * GET_FUNC_CONFIG returns per function guaranteed limits.
3420 * GET_PROFILE_CONFIG returns PCI-E related limits PF-pool limits
3421 */
3422 if (!BEx_chip(adapter)) {
3423 status = be_cmd_get_func_config(adapter, &res);
3424 if (status)
3425 return status;
abb93951 3426
92bf14ab
SP
3427 /* If RoCE may be enabled stash away half the EQs for RoCE */
3428 if (be_roce_supported(adapter))
3429 res.max_evt_qs /= 2;
3430 adapter->res = res;
abb93951 3431 }
4c876616 3432
acbafeb1
SP
3433 dev_info(dev, "Max: txqs %d, rxqs %d, rss %d, eqs %d, vfs %d\n",
3434 be_max_txqs(adapter), be_max_rxqs(adapter),
3435 be_max_rss(adapter), be_max_eqs(adapter),
3436 be_max_vfs(adapter));
3437 dev_info(dev, "Max: uc-macs %d, mc-macs %d, vlans %d\n",
3438 be_max_uc(adapter), be_max_mc(adapter),
3439 be_max_vlans(adapter));
3440
92bf14ab 3441 return 0;
abb93951
PR
3442}
3443
d3d18312
SP
3444static void be_sriov_config(struct be_adapter *adapter)
3445{
3446 struct device *dev = &adapter->pdev->dev;
3447 int status;
3448
3449 status = be_get_sriov_config(adapter);
3450 if (status) {
3451 dev_err(dev, "Failed to query SR-IOV configuration\n");
3452 dev_err(dev, "SR-IOV cannot be enabled\n");
3453 return;
3454 }
3455
3456 /* When the HW is in SRIOV capable configuration, the PF-pool
3457 * resources are equally distributed across the max-number of
3458 * VFs. The user may request only a subset of the max-vfs to be
3459 * enabled. Based on num_vfs, redistribute the resources across
3460 * num_vfs so that each VF will have access to more number of
3461 * resources. This facility is not available in BE3 FW.
3462 * Also, this is done by FW in Lancer chip.
3463 */
3464 if (be_max_vfs(adapter) && !pci_num_vf(adapter->pdev)) {
3465 status = be_cmd_set_sriov_config(adapter,
3466 adapter->pool_res,
3467 adapter->num_vfs);
3468 if (status)
3469 dev_err(dev, "Failed to optimize SR-IOV resources\n");
3470 }
3471}
3472
39f1d94d
SP
3473static int be_get_config(struct be_adapter *adapter)
3474{
542963b7 3475 u16 profile_id;
4c876616 3476 int status;
39f1d94d 3477
e97e3cda 3478 status = be_cmd_query_fw_cfg(adapter);
abb93951 3479 if (status)
92bf14ab 3480 return status;
abb93951 3481
542963b7
VV
3482 if (be_physfn(adapter)) {
3483 status = be_cmd_get_active_profile(adapter, &profile_id);
3484 if (!status)
3485 dev_info(&adapter->pdev->dev,
3486 "Using profile 0x%x\n", profile_id);
962bcb75 3487 }
bec84e6b 3488
d3d18312
SP
3489 if (!BE2_chip(adapter) && be_physfn(adapter))
3490 be_sriov_config(adapter);
542963b7 3491
92bf14ab
SP
3492 status = be_get_resources(adapter);
3493 if (status)
3494 return status;
abb93951 3495
46ee9c14
RN
3496 adapter->pmac_id = kcalloc(be_max_uc(adapter),
3497 sizeof(*adapter->pmac_id), GFP_KERNEL);
92bf14ab
SP
3498 if (!adapter->pmac_id)
3499 return -ENOMEM;
abb93951 3500
92bf14ab
SP
3501 /* Sanitize cfg_num_qs based on HW and platform limits */
3502 adapter->cfg_num_qs = min(adapter->cfg_num_qs, be_max_qs(adapter));
3503
3504 return 0;
39f1d94d
SP
3505}
3506
95046b92
SP
3507static int be_mac_setup(struct be_adapter *adapter)
3508{
3509 u8 mac[ETH_ALEN];
3510 int status;
3511
3512 if (is_zero_ether_addr(adapter->netdev->dev_addr)) {
3513 status = be_cmd_get_perm_mac(adapter, mac);
3514 if (status)
3515 return status;
3516
3517 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
3518 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
3519 } else {
3520 /* Maybe the HW was reset; dev_addr must be re-programmed */
3521 memcpy(mac, adapter->netdev->dev_addr, ETH_ALEN);
3522 }
3523
2c7a9dc1
AK
3524 /* For BE3-R VFs, the PF programs the initial MAC address */
3525 if (!(BEx_chip(adapter) && be_virtfn(adapter)))
3526 be_cmd_pmac_add(adapter, mac, adapter->if_handle,
3527 &adapter->pmac_id[0], 0);
95046b92
SP
3528 return 0;
3529}
3530
68d7bdcb
SP
3531static void be_schedule_worker(struct be_adapter *adapter)
3532{
3533 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
3534 adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
3535}
3536
7707133c 3537static int be_setup_queues(struct be_adapter *adapter)
5fb379ee 3538{
68d7bdcb 3539 struct net_device *netdev = adapter->netdev;
10ef9ab4 3540 int status;
ba343c77 3541
7707133c 3542 status = be_evt_queues_create(adapter);
abb93951
PR
3543 if (status)
3544 goto err;
73d540f2 3545
7707133c 3546 status = be_tx_qs_create(adapter);
c2bba3df
SK
3547 if (status)
3548 goto err;
10ef9ab4 3549
7707133c 3550 status = be_rx_cqs_create(adapter);
10ef9ab4 3551 if (status)
a54769f5 3552 goto err;
6b7c5b94 3553
7707133c 3554 status = be_mcc_queues_create(adapter);
10ef9ab4
SP
3555 if (status)
3556 goto err;
3557
68d7bdcb
SP
3558 status = netif_set_real_num_rx_queues(netdev, adapter->num_rx_qs);
3559 if (status)
3560 goto err;
3561
3562 status = netif_set_real_num_tx_queues(netdev, adapter->num_tx_qs);
3563 if (status)
3564 goto err;
3565
7707133c
SP
3566 return 0;
3567err:
3568 dev_err(&adapter->pdev->dev, "queue_setup failed\n");
3569 return status;
3570}
3571
68d7bdcb
SP
3572int be_update_queues(struct be_adapter *adapter)
3573{
3574 struct net_device *netdev = adapter->netdev;
3575 int status;
3576
3577 if (netif_running(netdev))
3578 be_close(netdev);
3579
3580 be_cancel_worker(adapter);
3581
3582 /* If any vectors have been shared with RoCE we cannot re-program
3583 * the MSIx table.
3584 */
3585 if (!adapter->num_msix_roce_vec)
3586 be_msix_disable(adapter);
3587
3588 be_clear_queues(adapter);
3589
3590 if (!msix_enabled(adapter)) {
3591 status = be_msix_enable(adapter);
3592 if (status)
3593 return status;
3594 }
3595
3596 status = be_setup_queues(adapter);
3597 if (status)
3598 return status;
3599
3600 be_schedule_worker(adapter);
3601
3602 if (netif_running(netdev))
3603 status = be_open(netdev);
3604
3605 return status;
3606}
3607
7707133c
SP
3608static int be_setup(struct be_adapter *adapter)
3609{
3610 struct device *dev = &adapter->pdev->dev;
3611 u32 tx_fc, rx_fc, en_flags;
3612 int status;
3613
3614 be_setup_init(adapter);
3615
3616 if (!lancer_chip(adapter))
3617 be_cmd_req_native_mode(adapter);
3618
3619 status = be_get_config(adapter);
10ef9ab4 3620 if (status)
a54769f5 3621 goto err;
6b7c5b94 3622
7707133c 3623 status = be_msix_enable(adapter);
10ef9ab4 3624 if (status)
a54769f5 3625 goto err;
6b7c5b94 3626
f9449ab7 3627 en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
7707133c 3628 BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
abb93951 3629 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS)
f9449ab7 3630 en_flags |= BE_IF_FLAGS_RSS;
92bf14ab
SP
3631 en_flags = en_flags & be_if_cap_flags(adapter);
3632 status = be_cmd_if_create(adapter, be_if_cap_flags(adapter), en_flags,
1578e777 3633 &adapter->if_handle, 0);
7707133c 3634 if (status)
a54769f5 3635 goto err;
6b7c5b94 3636
68d7bdcb
SP
3637 /* Updating real_num_tx/rx_queues() requires rtnl_lock() */
3638 rtnl_lock();
7707133c 3639 status = be_setup_queues(adapter);
68d7bdcb 3640 rtnl_unlock();
95046b92 3641 if (status)
1578e777
PR
3642 goto err;
3643
7707133c 3644 be_cmd_get_fn_privileges(adapter, &adapter->cmd_privileges, 0);
7707133c
SP
3645
3646 status = be_mac_setup(adapter);
10ef9ab4
SP
3647 if (status)
3648 goto err;
3649
e97e3cda 3650 be_cmd_get_fw_ver(adapter);
acbafeb1 3651 dev_info(dev, "FW version is %s\n", adapter->fw_ver);
5a56eb10 3652
e9e2a904
SK
3653 if (BE2_chip(adapter) && fw_major_num(adapter->fw_ver) < 4) {
3654 dev_err(dev, "Firmware on card is old(%s), IRQs may not work.",
3655 adapter->fw_ver);
3656 dev_err(dev, "Please upgrade firmware to version >= 4.0\n");
3657 }
3658
1d1e9a46 3659 if (adapter->vlans_added)
10329df8 3660 be_vid_config(adapter);
7ab8b0b4 3661
a54769f5 3662 be_set_rx_mode(adapter->netdev);
5fb379ee 3663
76a9e08e
SR
3664 be_cmd_get_acpi_wol_cap(adapter);
3665
ddc3f5cb 3666 be_cmd_get_flow_control(adapter, &tx_fc, &rx_fc);
590c391d 3667
ddc3f5cb
AK
3668 if (rx_fc != adapter->rx_fc || tx_fc != adapter->tx_fc)
3669 be_cmd_set_flow_control(adapter, adapter->tx_fc,
a54769f5 3670 adapter->rx_fc);
2dc1deb6 3671
bdce2ad7
SR
3672 if (be_physfn(adapter))
3673 be_cmd_set_logical_link_config(adapter,
3674 IFLA_VF_LINK_STATE_AUTO, 0);
3675
bec84e6b
VV
3676 if (adapter->num_vfs)
3677 be_vf_setup(adapter);
f9449ab7 3678
f25b119c
PR
3679 status = be_cmd_get_phy_info(adapter);
3680 if (!status && be_pause_supported(adapter))
42f11cf2
AK
3681 adapter->phy.fc_autoneg = 1;
3682
68d7bdcb 3683 be_schedule_worker(adapter);
e1ad8e33 3684 adapter->flags |= BE_FLAGS_SETUP_DONE;
f9449ab7 3685 return 0;
a54769f5
SP
3686err:
3687 be_clear(adapter);
3688 return status;
3689}
6b7c5b94 3690
66268739
IV
3691#ifdef CONFIG_NET_POLL_CONTROLLER
3692static void be_netpoll(struct net_device *netdev)
3693{
3694 struct be_adapter *adapter = netdev_priv(netdev);
10ef9ab4 3695 struct be_eq_obj *eqo;
66268739
IV
3696 int i;
3697
e49cc34f
SP
3698 for_all_evt_queues(adapter, eqo, i) {
3699 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
3700 napi_schedule(&eqo->napi);
3701 }
10ef9ab4
SP
3702
3703 return;
66268739
IV
3704}
3705#endif
3706
96c9b2e4 3707static char flash_cookie[2][16] = {"*** SE FLAS", "H DIRECTORY *** "};
fa9a6fed 3708
306f1348
SP
3709static bool phy_flashing_required(struct be_adapter *adapter)
3710{
42f11cf2
AK
3711 return (adapter->phy.phy_type == TN_8022 &&
3712 adapter->phy.interface_type == PHY_TYPE_BASET_10GB);
306f1348
SP
3713}
3714
c165541e
PR
3715static bool is_comp_in_ufi(struct be_adapter *adapter,
3716 struct flash_section_info *fsec, int type)
3717{
3718 int i = 0, img_type = 0;
3719 struct flash_section_info_g2 *fsec_g2 = NULL;
3720
ca34fe38 3721 if (BE2_chip(adapter))
c165541e
PR
3722 fsec_g2 = (struct flash_section_info_g2 *)fsec;
3723
3724 for (i = 0; i < MAX_FLASH_COMP; i++) {
3725 if (fsec_g2)
3726 img_type = le32_to_cpu(fsec_g2->fsec_entry[i].type);
3727 else
3728 img_type = le32_to_cpu(fsec->fsec_entry[i].type);
3729
3730 if (img_type == type)
3731 return true;
3732 }
3733 return false;
3734
3735}
3736
4188e7df 3737static struct flash_section_info *get_fsec_info(struct be_adapter *adapter,
748b539a
SP
3738 int header_size,
3739 const struct firmware *fw)
c165541e
PR
3740{
3741 struct flash_section_info *fsec = NULL;
3742 const u8 *p = fw->data;
3743
3744 p += header_size;
3745 while (p < (fw->data + fw->size)) {
3746 fsec = (struct flash_section_info *)p;
3747 if (!memcmp(flash_cookie, fsec->cookie, sizeof(flash_cookie)))
3748 return fsec;
3749 p += 32;
3750 }
3751 return NULL;
3752}
3753
96c9b2e4
VV
3754static int be_check_flash_crc(struct be_adapter *adapter, const u8 *p,
3755 u32 img_offset, u32 img_size, int hdr_size,
3756 u16 img_optype, bool *crc_match)
3757{
3758 u32 crc_offset;
3759 int status;
3760 u8 crc[4];
3761
3762 status = be_cmd_get_flash_crc(adapter, crc, img_optype, img_size - 4);
3763 if (status)
3764 return status;
3765
3766 crc_offset = hdr_size + img_offset + img_size - 4;
3767
3768 /* Skip flashing, if crc of flashed region matches */
3769 if (!memcmp(crc, p + crc_offset, 4))
3770 *crc_match = true;
3771 else
3772 *crc_match = false;
3773
3774 return status;
3775}
3776
773a2d7c 3777static int be_flash(struct be_adapter *adapter, const u8 *img,
748b539a 3778 struct be_dma_mem *flash_cmd, int optype, int img_size)
773a2d7c 3779{
773a2d7c 3780 struct be_cmd_write_flashrom *req = flash_cmd->va;
96c9b2e4
VV
3781 u32 total_bytes, flash_op, num_bytes;
3782 int status;
773a2d7c
PR
3783
3784 total_bytes = img_size;
3785 while (total_bytes) {
3786 num_bytes = min_t(u32, 32*1024, total_bytes);
3787
3788 total_bytes -= num_bytes;
3789
3790 if (!total_bytes) {
3791 if (optype == OPTYPE_PHY_FW)
3792 flash_op = FLASHROM_OPER_PHY_FLASH;
3793 else
3794 flash_op = FLASHROM_OPER_FLASH;
3795 } else {
3796 if (optype == OPTYPE_PHY_FW)
3797 flash_op = FLASHROM_OPER_PHY_SAVE;
3798 else
3799 flash_op = FLASHROM_OPER_SAVE;
3800 }
3801
be716446 3802 memcpy(req->data_buf, img, num_bytes);
773a2d7c
PR
3803 img += num_bytes;
3804 status = be_cmd_write_flashrom(adapter, flash_cmd, optype,
748b539a 3805 flash_op, num_bytes);
4c60005f 3806 if (base_status(status) == MCC_STATUS_ILLEGAL_REQUEST &&
96c9b2e4
VV
3807 optype == OPTYPE_PHY_FW)
3808 break;
3809 else if (status)
773a2d7c 3810 return status;
773a2d7c
PR
3811 }
3812 return 0;
3813}
3814
0ad3157e 3815/* For BE2, BE3 and BE3-R */
ca34fe38 3816static int be_flash_BEx(struct be_adapter *adapter,
748b539a
SP
3817 const struct firmware *fw,
3818 struct be_dma_mem *flash_cmd, int num_of_images)
84517482 3819{
c165541e 3820 int img_hdrs_size = (num_of_images * sizeof(struct image_hdr));
96c9b2e4 3821 struct device *dev = &adapter->pdev->dev;
c165541e 3822 struct flash_section_info *fsec = NULL;
96c9b2e4
VV
3823 int status, i, filehdr_size, num_comp;
3824 const struct flash_comp *pflashcomp;
3825 bool crc_match;
3826 const u8 *p;
c165541e
PR
3827
3828 struct flash_comp gen3_flash_types[] = {
3829 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, OPTYPE_ISCSI_ACTIVE,
3830 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_iSCSI},
3831 { FLASH_REDBOOT_START_g3, OPTYPE_REDBOOT,
3832 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3, IMAGE_BOOT_CODE},
3833 { FLASH_iSCSI_BIOS_START_g3, OPTYPE_BIOS,
3834 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_ISCSI},
3835 { FLASH_PXE_BIOS_START_g3, OPTYPE_PXE_BIOS,
3836 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_PXE},
3837 { FLASH_FCoE_BIOS_START_g3, OPTYPE_FCOE_BIOS,
3838 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_FCoE},
3839 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, OPTYPE_ISCSI_BACKUP,
3840 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_iSCSI},
3841 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, OPTYPE_FCOE_FW_ACTIVE,
3842 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_FCoE},
3843 { FLASH_FCoE_BACKUP_IMAGE_START_g3, OPTYPE_FCOE_FW_BACKUP,
3844 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_FCoE},
3845 { FLASH_NCSI_START_g3, OPTYPE_NCSI_FW,
3846 FLASH_NCSI_IMAGE_MAX_SIZE_g3, IMAGE_NCSI},
3847 { FLASH_PHY_FW_START_g3, OPTYPE_PHY_FW,
3848 FLASH_PHY_FW_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_PHY}
3f0d4560 3849 };
c165541e
PR
3850
3851 struct flash_comp gen2_flash_types[] = {
3852 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, OPTYPE_ISCSI_ACTIVE,
3853 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_iSCSI},
3854 { FLASH_REDBOOT_START_g2, OPTYPE_REDBOOT,
3855 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2, IMAGE_BOOT_CODE},
3856 { FLASH_iSCSI_BIOS_START_g2, OPTYPE_BIOS,
3857 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_ISCSI},
3858 { FLASH_PXE_BIOS_START_g2, OPTYPE_PXE_BIOS,
3859 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_PXE},
3860 { FLASH_FCoE_BIOS_START_g2, OPTYPE_FCOE_BIOS,
3861 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_FCoE},
3862 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, OPTYPE_ISCSI_BACKUP,
3863 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_iSCSI},
3864 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, OPTYPE_FCOE_FW_ACTIVE,
3865 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_FCoE},
3866 { FLASH_FCoE_BACKUP_IMAGE_START_g2, OPTYPE_FCOE_FW_BACKUP,
3867 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_FCoE}
3f0d4560
AK
3868 };
3869
ca34fe38 3870 if (BE3_chip(adapter)) {
3f0d4560
AK
3871 pflashcomp = gen3_flash_types;
3872 filehdr_size = sizeof(struct flash_file_hdr_g3);
215faf9c 3873 num_comp = ARRAY_SIZE(gen3_flash_types);
3f0d4560
AK
3874 } else {
3875 pflashcomp = gen2_flash_types;
3876 filehdr_size = sizeof(struct flash_file_hdr_g2);
215faf9c 3877 num_comp = ARRAY_SIZE(gen2_flash_types);
84517482 3878 }
ca34fe38 3879
c165541e
PR
3880 /* Get flash section info*/
3881 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3882 if (!fsec) {
96c9b2e4 3883 dev_err(dev, "Invalid Cookie. FW image may be corrupted\n");
c165541e
PR
3884 return -1;
3885 }
9fe96934 3886 for (i = 0; i < num_comp; i++) {
c165541e 3887 if (!is_comp_in_ufi(adapter, fsec, pflashcomp[i].img_type))
9fe96934 3888 continue;
c165541e
PR
3889
3890 if ((pflashcomp[i].optype == OPTYPE_NCSI_FW) &&
3891 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
3892 continue;
3893
773a2d7c
PR
3894 if (pflashcomp[i].optype == OPTYPE_PHY_FW &&
3895 !phy_flashing_required(adapter))
306f1348 3896 continue;
c165541e 3897
773a2d7c 3898 if (pflashcomp[i].optype == OPTYPE_REDBOOT) {
96c9b2e4
VV
3899 status = be_check_flash_crc(adapter, fw->data,
3900 pflashcomp[i].offset,
3901 pflashcomp[i].size,
3902 filehdr_size +
3903 img_hdrs_size,
3904 OPTYPE_REDBOOT, &crc_match);
3905 if (status) {
3906 dev_err(dev,
3907 "Could not get CRC for 0x%x region\n",
3908 pflashcomp[i].optype);
3909 continue;
3910 }
3911
3912 if (crc_match)
773a2d7c
PR
3913 continue;
3914 }
c165541e 3915
96c9b2e4
VV
3916 p = fw->data + filehdr_size + pflashcomp[i].offset +
3917 img_hdrs_size;
306f1348
SP
3918 if (p + pflashcomp[i].size > fw->data + fw->size)
3919 return -1;
773a2d7c
PR
3920
3921 status = be_flash(adapter, p, flash_cmd, pflashcomp[i].optype,
748b539a 3922 pflashcomp[i].size);
773a2d7c 3923 if (status) {
96c9b2e4 3924 dev_err(dev, "Flashing section type 0x%x failed\n",
773a2d7c
PR
3925 pflashcomp[i].img_type);
3926 return status;
84517482 3927 }
84517482 3928 }
84517482
AK
3929 return 0;
3930}
3931
96c9b2e4
VV
3932static u16 be_get_img_optype(struct flash_section_entry fsec_entry)
3933{
3934 u32 img_type = le32_to_cpu(fsec_entry.type);
3935 u16 img_optype = le16_to_cpu(fsec_entry.optype);
3936
3937 if (img_optype != 0xFFFF)
3938 return img_optype;
3939
3940 switch (img_type) {
3941 case IMAGE_FIRMWARE_iSCSI:
3942 img_optype = OPTYPE_ISCSI_ACTIVE;
3943 break;
3944 case IMAGE_BOOT_CODE:
3945 img_optype = OPTYPE_REDBOOT;
3946 break;
3947 case IMAGE_OPTION_ROM_ISCSI:
3948 img_optype = OPTYPE_BIOS;
3949 break;
3950 case IMAGE_OPTION_ROM_PXE:
3951 img_optype = OPTYPE_PXE_BIOS;
3952 break;
3953 case IMAGE_OPTION_ROM_FCoE:
3954 img_optype = OPTYPE_FCOE_BIOS;
3955 break;
3956 case IMAGE_FIRMWARE_BACKUP_iSCSI:
3957 img_optype = OPTYPE_ISCSI_BACKUP;
3958 break;
3959 case IMAGE_NCSI:
3960 img_optype = OPTYPE_NCSI_FW;
3961 break;
3962 case IMAGE_FLASHISM_JUMPVECTOR:
3963 img_optype = OPTYPE_FLASHISM_JUMPVECTOR;
3964 break;
3965 case IMAGE_FIRMWARE_PHY:
3966 img_optype = OPTYPE_SH_PHY_FW;
3967 break;
3968 case IMAGE_REDBOOT_DIR:
3969 img_optype = OPTYPE_REDBOOT_DIR;
3970 break;
3971 case IMAGE_REDBOOT_CONFIG:
3972 img_optype = OPTYPE_REDBOOT_CONFIG;
3973 break;
3974 case IMAGE_UFI_DIR:
3975 img_optype = OPTYPE_UFI_DIR;
3976 break;
3977 default:
3978 break;
3979 }
3980
3981 return img_optype;
3982}
3983
773a2d7c 3984static int be_flash_skyhawk(struct be_adapter *adapter,
748b539a
SP
3985 const struct firmware *fw,
3986 struct be_dma_mem *flash_cmd, int num_of_images)
3f0d4560 3987{
773a2d7c 3988 int img_hdrs_size = num_of_images * sizeof(struct image_hdr);
96c9b2e4 3989 struct device *dev = &adapter->pdev->dev;
773a2d7c 3990 struct flash_section_info *fsec = NULL;
96c9b2e4
VV
3991 u32 img_offset, img_size, img_type;
3992 int status, i, filehdr_size;
3993 bool crc_match, old_fw_img;
3994 u16 img_optype;
3995 const u8 *p;
773a2d7c
PR
3996
3997 filehdr_size = sizeof(struct flash_file_hdr_g3);
3998 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3999 if (!fsec) {
96c9b2e4 4000 dev_err(dev, "Invalid Cookie. FW image may be corrupted\n");
56ace3a0 4001 return -EINVAL;
773a2d7c
PR
4002 }
4003
4004 for (i = 0; i < le32_to_cpu(fsec->fsec_hdr.num_images); i++) {
4005 img_offset = le32_to_cpu(fsec->fsec_entry[i].offset);
4006 img_size = le32_to_cpu(fsec->fsec_entry[i].pad_size);
96c9b2e4
VV
4007 img_type = le32_to_cpu(fsec->fsec_entry[i].type);
4008 img_optype = be_get_img_optype(fsec->fsec_entry[i]);
4009 old_fw_img = fsec->fsec_entry[i].optype == 0xFFFF;
773a2d7c 4010
96c9b2e4 4011 if (img_optype == 0xFFFF)
773a2d7c 4012 continue;
96c9b2e4
VV
4013 /* Don't bother verifying CRC if an old FW image is being
4014 * flashed
4015 */
4016 if (old_fw_img)
4017 goto flash;
4018
4019 status = be_check_flash_crc(adapter, fw->data, img_offset,
4020 img_size, filehdr_size +
4021 img_hdrs_size, img_optype,
4022 &crc_match);
4023 /* The current FW image on the card does not recognize the new
4024 * FLASH op_type. The FW download is partially complete.
4025 * Reboot the server now to enable FW image to recognize the
4026 * new FLASH op_type. To complete the remaining process,
4027 * download the same FW again after the reboot.
4028 */
4c60005f
KA
4029 if (base_status(status) == MCC_STATUS_ILLEGAL_REQUEST ||
4030 base_status(status) == MCC_STATUS_ILLEGAL_FIELD) {
96c9b2e4
VV
4031 dev_err(dev, "Flash incomplete. Reset the server\n");
4032 dev_err(dev, "Download FW image again after reset\n");
4033 return -EAGAIN;
4034 } else if (status) {
4035 dev_err(dev, "Could not get CRC for 0x%x region\n",
4036 img_optype);
4037 return -EFAULT;
773a2d7c
PR
4038 }
4039
96c9b2e4
VV
4040 if (crc_match)
4041 continue;
773a2d7c 4042
96c9b2e4
VV
4043flash:
4044 p = fw->data + filehdr_size + img_offset + img_hdrs_size;
773a2d7c
PR
4045 if (p + img_size > fw->data + fw->size)
4046 return -1;
4047
4048 status = be_flash(adapter, p, flash_cmd, img_optype, img_size);
96c9b2e4
VV
4049 /* For old FW images ignore ILLEGAL_FIELD error or errors on
4050 * UFI_DIR region
4051 */
4c60005f
KA
4052 if (old_fw_img &&
4053 (base_status(status) == MCC_STATUS_ILLEGAL_FIELD ||
4054 (img_optype == OPTYPE_UFI_DIR &&
4055 base_status(status) == MCC_STATUS_FAILED))) {
96c9b2e4
VV
4056 continue;
4057 } else if (status) {
4058 dev_err(dev, "Flashing section type 0x%x failed\n",
4059 img_type);
4060 return -EFAULT;
773a2d7c
PR
4061 }
4062 }
4063 return 0;
3f0d4560
AK
4064}
4065
485bf569 4066static int lancer_fw_download(struct be_adapter *adapter,
748b539a 4067 const struct firmware *fw)
84517482 4068{
485bf569
SN
4069#define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024)
4070#define LANCER_FW_DOWNLOAD_LOCATION "/prg"
bb864e07 4071 struct device *dev = &adapter->pdev->dev;
84517482 4072 struct be_dma_mem flash_cmd;
485bf569
SN
4073 const u8 *data_ptr = NULL;
4074 u8 *dest_image_ptr = NULL;
4075 size_t image_size = 0;
4076 u32 chunk_size = 0;
4077 u32 data_written = 0;
4078 u32 offset = 0;
4079 int status = 0;
4080 u8 add_status = 0;
f67ef7ba 4081 u8 change_status;
84517482 4082
485bf569 4083 if (!IS_ALIGNED(fw->size, sizeof(u32))) {
bb864e07 4084 dev_err(dev, "FW image size should be multiple of 4\n");
3fb8cb80 4085 return -EINVAL;
d9efd2af
SB
4086 }
4087
485bf569
SN
4088 flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
4089 + LANCER_FW_DOWNLOAD_CHUNK;
bb864e07 4090 flash_cmd.va = dma_alloc_coherent(dev, flash_cmd.size,
d0320f75 4091 &flash_cmd.dma, GFP_KERNEL);
3fb8cb80
KA
4092 if (!flash_cmd.va)
4093 return -ENOMEM;
84517482 4094
485bf569
SN
4095 dest_image_ptr = flash_cmd.va +
4096 sizeof(struct lancer_cmd_req_write_object);
4097 image_size = fw->size;
4098 data_ptr = fw->data;
4099
4100 while (image_size) {
4101 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
4102
4103 /* Copy the image chunk content. */
4104 memcpy(dest_image_ptr, data_ptr, chunk_size);
4105
4106 status = lancer_cmd_write_object(adapter, &flash_cmd,
f67ef7ba
PR
4107 chunk_size, offset,
4108 LANCER_FW_DOWNLOAD_LOCATION,
4109 &data_written, &change_status,
4110 &add_status);
485bf569
SN
4111 if (status)
4112 break;
4113
4114 offset += data_written;
4115 data_ptr += data_written;
4116 image_size -= data_written;
4117 }
4118
4119 if (!status) {
4120 /* Commit the FW written */
4121 status = lancer_cmd_write_object(adapter, &flash_cmd,
f67ef7ba
PR
4122 0, offset,
4123 LANCER_FW_DOWNLOAD_LOCATION,
4124 &data_written, &change_status,
4125 &add_status);
485bf569
SN
4126 }
4127
bb864e07 4128 dma_free_coherent(dev, flash_cmd.size, flash_cmd.va, flash_cmd.dma);
485bf569 4129 if (status) {
bb864e07 4130 dev_err(dev, "Firmware load error\n");
3fb8cb80 4131 return be_cmd_status(status);
485bf569
SN
4132 }
4133
bb864e07
KA
4134 dev_info(dev, "Firmware flashed successfully\n");
4135
f67ef7ba 4136 if (change_status == LANCER_FW_RESET_NEEDED) {
bb864e07 4137 dev_info(dev, "Resetting adapter to activate new FW\n");
5c510811
SK
4138 status = lancer_physdev_ctrl(adapter,
4139 PHYSDEV_CONTROL_FW_RESET_MASK);
f67ef7ba 4140 if (status) {
bb864e07
KA
4141 dev_err(dev, "Adapter busy, could not reset FW\n");
4142 dev_err(dev, "Reboot server to activate new FW\n");
f67ef7ba
PR
4143 }
4144 } else if (change_status != LANCER_NO_RESET_NEEDED) {
bb864e07 4145 dev_info(dev, "Reboot server to activate new FW\n");
f67ef7ba 4146 }
3fb8cb80
KA
4147
4148 return 0;
485bf569
SN
4149}
4150
ca34fe38
SP
4151#define UFI_TYPE2 2
4152#define UFI_TYPE3 3
0ad3157e 4153#define UFI_TYPE3R 10
ca34fe38
SP
4154#define UFI_TYPE4 4
4155static int be_get_ufi_type(struct be_adapter *adapter,
0ad3157e 4156 struct flash_file_hdr_g3 *fhdr)
773a2d7c 4157{
ddf1169f 4158 if (!fhdr)
773a2d7c
PR
4159 goto be_get_ufi_exit;
4160
ca34fe38
SP
4161 if (skyhawk_chip(adapter) && fhdr->build[0] == '4')
4162 return UFI_TYPE4;
0ad3157e
VV
4163 else if (BE3_chip(adapter) && fhdr->build[0] == '3') {
4164 if (fhdr->asic_type_rev == 0x10)
4165 return UFI_TYPE3R;
4166 else
4167 return UFI_TYPE3;
4168 } else if (BE2_chip(adapter) && fhdr->build[0] == '2')
ca34fe38 4169 return UFI_TYPE2;
773a2d7c
PR
4170
4171be_get_ufi_exit:
4172 dev_err(&adapter->pdev->dev,
4173 "UFI and Interface are not compatible for flashing\n");
4174 return -1;
4175}
4176
485bf569
SN
4177static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
4178{
485bf569
SN
4179 struct flash_file_hdr_g3 *fhdr3;
4180 struct image_hdr *img_hdr_ptr = NULL;
4181 struct be_dma_mem flash_cmd;
4182 const u8 *p;
773a2d7c 4183 int status = 0, i = 0, num_imgs = 0, ufi_type = 0;
84517482 4184
be716446 4185 flash_cmd.size = sizeof(struct be_cmd_write_flashrom);
2b7bcebf
IV
4186 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
4187 &flash_cmd.dma, GFP_KERNEL);
84517482
AK
4188 if (!flash_cmd.va) {
4189 status = -ENOMEM;
485bf569 4190 goto be_fw_exit;
84517482
AK
4191 }
4192
773a2d7c 4193 p = fw->data;
0ad3157e 4194 fhdr3 = (struct flash_file_hdr_g3 *)p;
773a2d7c 4195
0ad3157e 4196 ufi_type = be_get_ufi_type(adapter, fhdr3);
773a2d7c 4197
773a2d7c
PR
4198 num_imgs = le32_to_cpu(fhdr3->num_imgs);
4199 for (i = 0; i < num_imgs; i++) {
4200 img_hdr_ptr = (struct image_hdr *)(fw->data +
4201 (sizeof(struct flash_file_hdr_g3) +
4202 i * sizeof(struct image_hdr)));
4203 if (le32_to_cpu(img_hdr_ptr->imageid) == 1) {
0ad3157e
VV
4204 switch (ufi_type) {
4205 case UFI_TYPE4:
773a2d7c 4206 status = be_flash_skyhawk(adapter, fw,
748b539a 4207 &flash_cmd, num_imgs);
0ad3157e
VV
4208 break;
4209 case UFI_TYPE3R:
ca34fe38
SP
4210 status = be_flash_BEx(adapter, fw, &flash_cmd,
4211 num_imgs);
0ad3157e
VV
4212 break;
4213 case UFI_TYPE3:
4214 /* Do not flash this ufi on BE3-R cards */
4215 if (adapter->asic_rev < 0x10)
4216 status = be_flash_BEx(adapter, fw,
4217 &flash_cmd,
4218 num_imgs);
4219 else {
56ace3a0 4220 status = -EINVAL;
0ad3157e
VV
4221 dev_err(&adapter->pdev->dev,
4222 "Can't load BE3 UFI on BE3R\n");
4223 }
4224 }
3f0d4560 4225 }
773a2d7c
PR
4226 }
4227
ca34fe38
SP
4228 if (ufi_type == UFI_TYPE2)
4229 status = be_flash_BEx(adapter, fw, &flash_cmd, 0);
773a2d7c 4230 else if (ufi_type == -1)
56ace3a0 4231 status = -EINVAL;
84517482 4232
2b7bcebf
IV
4233 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
4234 flash_cmd.dma);
84517482
AK
4235 if (status) {
4236 dev_err(&adapter->pdev->dev, "Firmware load error\n");
485bf569 4237 goto be_fw_exit;
84517482
AK
4238 }
4239
af901ca1 4240 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
84517482 4241
485bf569
SN
4242be_fw_exit:
4243 return status;
4244}
4245
4246int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
4247{
4248 const struct firmware *fw;
4249 int status;
4250
4251 if (!netif_running(adapter->netdev)) {
4252 dev_err(&adapter->pdev->dev,
4253 "Firmware load not allowed (interface is down)\n");
940a3fcd 4254 return -ENETDOWN;
485bf569
SN
4255 }
4256
4257 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
4258 if (status)
4259 goto fw_exit;
4260
4261 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
4262
4263 if (lancer_chip(adapter))
4264 status = lancer_fw_download(adapter, fw);
4265 else
4266 status = be_fw_download(adapter, fw);
4267
eeb65ced 4268 if (!status)
e97e3cda 4269 be_cmd_get_fw_ver(adapter);
eeb65ced 4270
84517482
AK
4271fw_exit:
4272 release_firmware(fw);
4273 return status;
4274}
4275
748b539a 4276static int be_ndo_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh)
a77dcb8c
AK
4277{
4278 struct be_adapter *adapter = netdev_priv(dev);
4279 struct nlattr *attr, *br_spec;
4280 int rem;
4281 int status = 0;
4282 u16 mode = 0;
4283
4284 if (!sriov_enabled(adapter))
4285 return -EOPNOTSUPP;
4286
4287 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
4288
4289 nla_for_each_nested(attr, br_spec, rem) {
4290 if (nla_type(attr) != IFLA_BRIDGE_MODE)
4291 continue;
4292
4293 mode = nla_get_u16(attr);
4294 if (mode != BRIDGE_MODE_VEPA && mode != BRIDGE_MODE_VEB)
4295 return -EINVAL;
4296
4297 status = be_cmd_set_hsw_config(adapter, 0, 0,
4298 adapter->if_handle,
4299 mode == BRIDGE_MODE_VEPA ?
4300 PORT_FWD_TYPE_VEPA :
4301 PORT_FWD_TYPE_VEB);
4302 if (status)
4303 goto err;
4304
4305 dev_info(&adapter->pdev->dev, "enabled switch mode: %s\n",
4306 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
4307
4308 return status;
4309 }
4310err:
4311 dev_err(&adapter->pdev->dev, "Failed to set switch mode %s\n",
4312 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
4313
4314 return status;
4315}
4316
4317static int be_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
748b539a 4318 struct net_device *dev, u32 filter_mask)
a77dcb8c
AK
4319{
4320 struct be_adapter *adapter = netdev_priv(dev);
4321 int status = 0;
4322 u8 hsw_mode;
4323
4324 if (!sriov_enabled(adapter))
4325 return 0;
4326
4327 /* BE and Lancer chips support VEB mode only */
4328 if (BEx_chip(adapter) || lancer_chip(adapter)) {
4329 hsw_mode = PORT_FWD_TYPE_VEB;
4330 } else {
4331 status = be_cmd_get_hsw_config(adapter, NULL, 0,
4332 adapter->if_handle, &hsw_mode);
4333 if (status)
4334 return 0;
4335 }
4336
4337 return ndo_dflt_bridge_getlink(skb, pid, seq, dev,
4338 hsw_mode == PORT_FWD_TYPE_VEPA ?
4339 BRIDGE_MODE_VEPA : BRIDGE_MODE_VEB);
4340}
4341
c5abe7c0 4342#ifdef CONFIG_BE2NET_VXLAN
c9c47142
SP
4343static void be_add_vxlan_port(struct net_device *netdev, sa_family_t sa_family,
4344 __be16 port)
4345{
4346 struct be_adapter *adapter = netdev_priv(netdev);
4347 struct device *dev = &adapter->pdev->dev;
4348 int status;
4349
4350 if (lancer_chip(adapter) || BEx_chip(adapter))
4351 return;
4352
4353 if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS) {
4354 dev_warn(dev, "Cannot add UDP port %d for VxLAN offloads\n",
4355 be16_to_cpu(port));
4356 dev_info(dev,
4357 "Only one UDP port supported for VxLAN offloads\n");
4358 return;
4359 }
4360
4361 status = be_cmd_manage_iface(adapter, adapter->if_handle,
4362 OP_CONVERT_NORMAL_TO_TUNNEL);
4363 if (status) {
4364 dev_warn(dev, "Failed to convert normal interface to tunnel\n");
4365 goto err;
4366 }
4367
4368 status = be_cmd_set_vxlan_port(adapter, port);
4369 if (status) {
4370 dev_warn(dev, "Failed to add VxLAN port\n");
4371 goto err;
4372 }
4373 adapter->flags |= BE_FLAGS_VXLAN_OFFLOADS;
4374 adapter->vxlan_port = port;
4375
4376 dev_info(dev, "Enabled VxLAN offloads for UDP port %d\n",
4377 be16_to_cpu(port));
4378 return;
4379err:
4380 be_disable_vxlan_offloads(adapter);
4381 return;
4382}
4383
4384static void be_del_vxlan_port(struct net_device *netdev, sa_family_t sa_family,
4385 __be16 port)
4386{
4387 struct be_adapter *adapter = netdev_priv(netdev);
4388
4389 if (lancer_chip(adapter) || BEx_chip(adapter))
4390 return;
4391
4392 if (adapter->vxlan_port != port)
4393 return;
4394
4395 be_disable_vxlan_offloads(adapter);
4396
4397 dev_info(&adapter->pdev->dev,
4398 "Disabled VxLAN offloads for UDP port %d\n",
4399 be16_to_cpu(port));
4400}
c5abe7c0 4401#endif
c9c47142 4402
e5686ad8 4403static const struct net_device_ops be_netdev_ops = {
6b7c5b94
SP
4404 .ndo_open = be_open,
4405 .ndo_stop = be_close,
4406 .ndo_start_xmit = be_xmit,
a54769f5 4407 .ndo_set_rx_mode = be_set_rx_mode,
6b7c5b94
SP
4408 .ndo_set_mac_address = be_mac_addr_set,
4409 .ndo_change_mtu = be_change_mtu,
ab1594e9 4410 .ndo_get_stats64 = be_get_stats64,
6b7c5b94 4411 .ndo_validate_addr = eth_validate_addr,
6b7c5b94
SP
4412 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
4413 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
64600ea5 4414 .ndo_set_vf_mac = be_set_vf_mac,
1da87b7f 4415 .ndo_set_vf_vlan = be_set_vf_vlan,
ed616689 4416 .ndo_set_vf_rate = be_set_vf_tx_rate,
66268739 4417 .ndo_get_vf_config = be_get_vf_config,
bdce2ad7 4418 .ndo_set_vf_link_state = be_set_vf_link_state,
66268739
IV
4419#ifdef CONFIG_NET_POLL_CONTROLLER
4420 .ndo_poll_controller = be_netpoll,
4421#endif
a77dcb8c
AK
4422 .ndo_bridge_setlink = be_ndo_bridge_setlink,
4423 .ndo_bridge_getlink = be_ndo_bridge_getlink,
6384a4d0 4424#ifdef CONFIG_NET_RX_BUSY_POLL
c9c47142 4425 .ndo_busy_poll = be_busy_poll,
6384a4d0 4426#endif
c5abe7c0 4427#ifdef CONFIG_BE2NET_VXLAN
c9c47142
SP
4428 .ndo_add_vxlan_port = be_add_vxlan_port,
4429 .ndo_del_vxlan_port = be_del_vxlan_port,
c5abe7c0 4430#endif
6b7c5b94
SP
4431};
4432
4433static void be_netdev_init(struct net_device *netdev)
4434{
4435 struct be_adapter *adapter = netdev_priv(netdev);
4436
c9c47142
SP
4437 if (skyhawk_chip(adapter)) {
4438 netdev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
4439 NETIF_F_TSO | NETIF_F_TSO6 |
4440 NETIF_F_GSO_UDP_TUNNEL;
4441 netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL;
4442 }
6332c8d3 4443 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
8b8ddc68 4444 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
f646968f 4445 NETIF_F_HW_VLAN_CTAG_TX;
8b8ddc68
MM
4446 if (be_multi_rxq(adapter))
4447 netdev->hw_features |= NETIF_F_RXHASH;
6332c8d3
MM
4448
4449 netdev->features |= netdev->hw_features |
f646968f 4450 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER;
4b972914 4451
eb8a50d9 4452 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
79032644 4453 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
51c59870 4454
fbc13f01
AK
4455 netdev->priv_flags |= IFF_UNICAST_FLT;
4456
6b7c5b94
SP
4457 netdev->flags |= IFF_MULTICAST;
4458
b7e5887e 4459 netif_set_gso_max_size(netdev, 65535 - ETH_HLEN);
c190e3c8 4460
10ef9ab4 4461 netdev->netdev_ops = &be_netdev_ops;
6b7c5b94 4462
7ad24ea4 4463 netdev->ethtool_ops = &be_ethtool_ops;
6b7c5b94
SP
4464}
4465
4466static void be_unmap_pci_bars(struct be_adapter *adapter)
4467{
c5b3ad4c
SP
4468 if (adapter->csr)
4469 pci_iounmap(adapter->pdev, adapter->csr);
8788fdc2 4470 if (adapter->db)
ce66f781 4471 pci_iounmap(adapter->pdev, adapter->db);
045508a8
PP
4472}
4473
ce66f781
SP
4474static int db_bar(struct be_adapter *adapter)
4475{
4476 if (lancer_chip(adapter) || !be_physfn(adapter))
4477 return 0;
4478 else
4479 return 4;
4480}
4481
4482static int be_roce_map_pci_bars(struct be_adapter *adapter)
045508a8 4483{
dbf0f2a7 4484 if (skyhawk_chip(adapter)) {
ce66f781
SP
4485 adapter->roce_db.size = 4096;
4486 adapter->roce_db.io_addr = pci_resource_start(adapter->pdev,
4487 db_bar(adapter));
4488 adapter->roce_db.total_size = pci_resource_len(adapter->pdev,
4489 db_bar(adapter));
4490 }
045508a8 4491 return 0;
6b7c5b94
SP
4492}
4493
4494static int be_map_pci_bars(struct be_adapter *adapter)
4495{
4496 u8 __iomem *addr;
fe6d2a38 4497
c5b3ad4c
SP
4498 if (BEx_chip(adapter) && be_physfn(adapter)) {
4499 adapter->csr = pci_iomap(adapter->pdev, 2, 0);
ddf1169f 4500 if (!adapter->csr)
c5b3ad4c
SP
4501 return -ENOMEM;
4502 }
4503
ce66f781 4504 addr = pci_iomap(adapter->pdev, db_bar(adapter), 0);
ddf1169f 4505 if (!addr)
6b7c5b94 4506 goto pci_map_err;
ba343c77 4507 adapter->db = addr;
ce66f781
SP
4508
4509 be_roce_map_pci_bars(adapter);
6b7c5b94 4510 return 0;
ce66f781 4511
6b7c5b94 4512pci_map_err:
acbafeb1 4513 dev_err(&adapter->pdev->dev, "Error in mapping PCI BARs\n");
6b7c5b94
SP
4514 be_unmap_pci_bars(adapter);
4515 return -ENOMEM;
4516}
4517
6b7c5b94
SP
4518static void be_ctrl_cleanup(struct be_adapter *adapter)
4519{
8788fdc2 4520 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
6b7c5b94
SP
4521
4522 be_unmap_pci_bars(adapter);
4523
4524 if (mem->va)
2b7bcebf
IV
4525 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
4526 mem->dma);
e7b909a6 4527
5b8821b7 4528 mem = &adapter->rx_filter;
e7b909a6 4529 if (mem->va)
2b7bcebf
IV
4530 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
4531 mem->dma);
6b7c5b94
SP
4532}
4533
6b7c5b94
SP
4534static int be_ctrl_init(struct be_adapter *adapter)
4535{
8788fdc2
SP
4536 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
4537 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
5b8821b7 4538 struct be_dma_mem *rx_filter = &adapter->rx_filter;
ce66f781 4539 u32 sli_intf;
6b7c5b94 4540 int status;
6b7c5b94 4541
ce66f781
SP
4542 pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
4543 adapter->sli_family = (sli_intf & SLI_INTF_FAMILY_MASK) >>
4544 SLI_INTF_FAMILY_SHIFT;
4545 adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
4546
6b7c5b94
SP
4547 status = be_map_pci_bars(adapter);
4548 if (status)
e7b909a6 4549 goto done;
6b7c5b94
SP
4550
4551 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
2b7bcebf
IV
4552 mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
4553 mbox_mem_alloc->size,
4554 &mbox_mem_alloc->dma,
4555 GFP_KERNEL);
6b7c5b94 4556 if (!mbox_mem_alloc->va) {
e7b909a6
SP
4557 status = -ENOMEM;
4558 goto unmap_pci_bars;
6b7c5b94
SP
4559 }
4560 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
4561 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
4562 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
4563 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
e7b909a6 4564
5b8821b7 4565 rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
ede23fa8
JP
4566 rx_filter->va = dma_zalloc_coherent(&adapter->pdev->dev,
4567 rx_filter->size, &rx_filter->dma,
4568 GFP_KERNEL);
ddf1169f 4569 if (!rx_filter->va) {
e7b909a6
SP
4570 status = -ENOMEM;
4571 goto free_mbox;
4572 }
1f9061d2 4573
2984961c 4574 mutex_init(&adapter->mbox_lock);
8788fdc2
SP
4575 spin_lock_init(&adapter->mcc_lock);
4576 spin_lock_init(&adapter->mcc_cq_lock);
a8f447bd 4577
5eeff635 4578 init_completion(&adapter->et_cmd_compl);
cf588477 4579 pci_save_state(adapter->pdev);
6b7c5b94 4580 return 0;
e7b909a6
SP
4581
4582free_mbox:
2b7bcebf
IV
4583 dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
4584 mbox_mem_alloc->va, mbox_mem_alloc->dma);
e7b909a6
SP
4585
4586unmap_pci_bars:
4587 be_unmap_pci_bars(adapter);
4588
4589done:
4590 return status;
6b7c5b94
SP
4591}
4592
4593static void be_stats_cleanup(struct be_adapter *adapter)
4594{
3abcdeda 4595 struct be_dma_mem *cmd = &adapter->stats_cmd;
6b7c5b94
SP
4596
4597 if (cmd->va)
2b7bcebf
IV
4598 dma_free_coherent(&adapter->pdev->dev, cmd->size,
4599 cmd->va, cmd->dma);
6b7c5b94
SP
4600}
4601
4602static int be_stats_init(struct be_adapter *adapter)
4603{
3abcdeda 4604 struct be_dma_mem *cmd = &adapter->stats_cmd;
6b7c5b94 4605
ca34fe38
SP
4606 if (lancer_chip(adapter))
4607 cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
4608 else if (BE2_chip(adapter))
89a88ab8 4609 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
61000861 4610 else if (BE3_chip(adapter))
ca34fe38 4611 cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
61000861
AK
4612 else
4613 /* ALL non-BE ASICs */
4614 cmd->size = sizeof(struct be_cmd_req_get_stats_v2);
ca34fe38 4615
ede23fa8
JP
4616 cmd->va = dma_zalloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
4617 GFP_KERNEL);
ddf1169f 4618 if (!cmd->va)
6b568689 4619 return -ENOMEM;
6b7c5b94
SP
4620 return 0;
4621}
4622
3bc6b06c 4623static void be_remove(struct pci_dev *pdev)
6b7c5b94
SP
4624{
4625 struct be_adapter *adapter = pci_get_drvdata(pdev);
8d56ff11 4626
6b7c5b94
SP
4627 if (!adapter)
4628 return;
4629
045508a8 4630 be_roce_dev_remove(adapter);
8cef7a78 4631 be_intr_set(adapter, false);
045508a8 4632
f67ef7ba
PR
4633 cancel_delayed_work_sync(&adapter->func_recovery_work);
4634
6b7c5b94
SP
4635 unregister_netdev(adapter->netdev);
4636
5fb379ee
SP
4637 be_clear(adapter);
4638
bf99e50d
PR
4639 /* tell fw we're done with firing cmds */
4640 be_cmd_fw_clean(adapter);
4641
6b7c5b94
SP
4642 be_stats_cleanup(adapter);
4643
4644 be_ctrl_cleanup(adapter);
4645
d6b6d987
SP
4646 pci_disable_pcie_error_reporting(pdev);
4647
6b7c5b94
SP
4648 pci_release_regions(pdev);
4649 pci_disable_device(pdev);
4650
4651 free_netdev(adapter->netdev);
4652}
4653
39f1d94d 4654static int be_get_initial_config(struct be_adapter *adapter)
6b7c5b94 4655{
baaa08d1 4656 int status, level;
6b7c5b94 4657
9e1453c5
AK
4658 status = be_cmd_get_cntl_attributes(adapter);
4659 if (status)
4660 return status;
4661
7aeb2156
PR
4662 /* Must be a power of 2 or else MODULO will BUG_ON */
4663 adapter->be_get_temp_freq = 64;
4664
baaa08d1
VV
4665 if (BEx_chip(adapter)) {
4666 level = be_cmd_get_fw_log_level(adapter);
4667 adapter->msg_enable =
4668 level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0;
4669 }
941a77d5 4670
92bf14ab 4671 adapter->cfg_num_qs = netif_get_num_default_rss_queues();
2243e2e9 4672 return 0;
6b7c5b94
SP
4673}
4674
f67ef7ba 4675static int lancer_recover_func(struct be_adapter *adapter)
d8110f62 4676{
01e5b2c4 4677 struct device *dev = &adapter->pdev->dev;
d8110f62 4678 int status;
d8110f62 4679
f67ef7ba
PR
4680 status = lancer_test_and_set_rdy_state(adapter);
4681 if (status)
4682 goto err;
d8110f62 4683
f67ef7ba
PR
4684 if (netif_running(adapter->netdev))
4685 be_close(adapter->netdev);
d8110f62 4686
f67ef7ba
PR
4687 be_clear(adapter);
4688
01e5b2c4 4689 be_clear_all_error(adapter);
f67ef7ba
PR
4690
4691 status = be_setup(adapter);
4692 if (status)
4693 goto err;
d8110f62 4694
f67ef7ba
PR
4695 if (netif_running(adapter->netdev)) {
4696 status = be_open(adapter->netdev);
d8110f62
PR
4697 if (status)
4698 goto err;
f67ef7ba 4699 }
d8110f62 4700
4bebb56a 4701 dev_err(dev, "Adapter recovery successful\n");
f67ef7ba
PR
4702 return 0;
4703err:
01e5b2c4
SK
4704 if (status == -EAGAIN)
4705 dev_err(dev, "Waiting for resource provisioning\n");
4706 else
4bebb56a 4707 dev_err(dev, "Adapter recovery failed\n");
d8110f62 4708
f67ef7ba
PR
4709 return status;
4710}
4711
4712static void be_func_recovery_task(struct work_struct *work)
4713{
4714 struct be_adapter *adapter =
4715 container_of(work, struct be_adapter, func_recovery_work.work);
01e5b2c4 4716 int status = 0;
d8110f62 4717
f67ef7ba 4718 be_detect_error(adapter);
d8110f62 4719
f67ef7ba 4720 if (adapter->hw_error && lancer_chip(adapter)) {
d8110f62 4721
f67ef7ba
PR
4722 rtnl_lock();
4723 netif_device_detach(adapter->netdev);
4724 rtnl_unlock();
d8110f62 4725
f67ef7ba 4726 status = lancer_recover_func(adapter);
f67ef7ba
PR
4727 if (!status)
4728 netif_device_attach(adapter->netdev);
d8110f62 4729 }
f67ef7ba 4730
01e5b2c4
SK
4731 /* In Lancer, for all errors other than provisioning error (-EAGAIN),
4732 * no need to attempt further recovery.
4733 */
4734 if (!status || status == -EAGAIN)
4735 schedule_delayed_work(&adapter->func_recovery_work,
4736 msecs_to_jiffies(1000));
d8110f62
PR
4737}
4738
4739static void be_worker(struct work_struct *work)
4740{
4741 struct be_adapter *adapter =
4742 container_of(work, struct be_adapter, work.work);
4743 struct be_rx_obj *rxo;
4744 int i;
4745
d8110f62
PR
4746 /* when interrupts are not yet enabled, just reap any pending
4747 * mcc completions */
4748 if (!netif_running(adapter->netdev)) {
072a9c48 4749 local_bh_disable();
10ef9ab4 4750 be_process_mcc(adapter);
072a9c48 4751 local_bh_enable();
d8110f62
PR
4752 goto reschedule;
4753 }
4754
4755 if (!adapter->stats_cmd_sent) {
4756 if (lancer_chip(adapter))
4757 lancer_cmd_get_pport_stats(adapter,
4758 &adapter->stats_cmd);
4759 else
4760 be_cmd_get_stats(adapter, &adapter->stats_cmd);
4761 }
4762
d696b5e2
VV
4763 if (be_physfn(adapter) &&
4764 MODULO(adapter->work_counter, adapter->be_get_temp_freq) == 0)
7aeb2156
PR
4765 be_cmd_get_die_temperature(adapter);
4766
d8110f62 4767 for_all_rx_queues(adapter, rxo, i) {
6384a4d0
SP
4768 /* Replenish RX-queues starved due to memory
4769 * allocation failures.
4770 */
4771 if (rxo->rx_post_starved)
d8110f62 4772 be_post_rx_frags(rxo, GFP_KERNEL);
d8110f62
PR
4773 }
4774
2632bafd 4775 be_eqd_update(adapter);
10ef9ab4 4776
d8110f62
PR
4777reschedule:
4778 adapter->work_counter++;
4779 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
4780}
4781
257a3feb 4782/* If any VFs are already enabled don't FLR the PF */
39f1d94d
SP
4783static bool be_reset_required(struct be_adapter *adapter)
4784{
257a3feb 4785 return pci_num_vf(adapter->pdev) ? false : true;
39f1d94d
SP
4786}
4787
d379142b
SP
4788static char *mc_name(struct be_adapter *adapter)
4789{
f93f160b
VV
4790 char *str = ""; /* default */
4791
4792 switch (adapter->mc_type) {
4793 case UMC:
4794 str = "UMC";
4795 break;
4796 case FLEX10:
4797 str = "FLEX10";
4798 break;
4799 case vNIC1:
4800 str = "vNIC-1";
4801 break;
4802 case nPAR:
4803 str = "nPAR";
4804 break;
4805 case UFP:
4806 str = "UFP";
4807 break;
4808 case vNIC2:
4809 str = "vNIC-2";
4810 break;
4811 default:
4812 str = "";
4813 }
4814
4815 return str;
d379142b
SP
4816}
4817
4818static inline char *func_name(struct be_adapter *adapter)
4819{
4820 return be_physfn(adapter) ? "PF" : "VF";
4821}
4822
1dd06ae8 4823static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id)
6b7c5b94
SP
4824{
4825 int status = 0;
4826 struct be_adapter *adapter;
4827 struct net_device *netdev;
b4e32a71 4828 char port_name;
6b7c5b94 4829
acbafeb1
SP
4830 dev_info(&pdev->dev, "%s version is %s\n", DRV_NAME, DRV_VER);
4831
6b7c5b94
SP
4832 status = pci_enable_device(pdev);
4833 if (status)
4834 goto do_none;
4835
4836 status = pci_request_regions(pdev, DRV_NAME);
4837 if (status)
4838 goto disable_dev;
4839 pci_set_master(pdev);
4840
7f640062 4841 netdev = alloc_etherdev_mqs(sizeof(*adapter), MAX_TX_QS, MAX_RX_QS);
ddf1169f 4842 if (!netdev) {
6b7c5b94
SP
4843 status = -ENOMEM;
4844 goto rel_reg;
4845 }
4846 adapter = netdev_priv(netdev);
4847 adapter->pdev = pdev;
4848 pci_set_drvdata(pdev, adapter);
4849 adapter->netdev = netdev;
2243e2e9 4850 SET_NETDEV_DEV(netdev, &pdev->dev);
6b7c5b94 4851
4c15c243 4852 status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
6b7c5b94
SP
4853 if (!status) {
4854 netdev->features |= NETIF_F_HIGHDMA;
4855 } else {
4c15c243 4856 status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
6b7c5b94
SP
4857 if (status) {
4858 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
4859 goto free_netdev;
4860 }
4861 }
4862
ea58c180
AK
4863 if (be_physfn(adapter)) {
4864 status = pci_enable_pcie_error_reporting(pdev);
4865 if (!status)
4866 dev_info(&pdev->dev, "PCIe error reporting enabled\n");
4867 }
d6b6d987 4868
6b7c5b94
SP
4869 status = be_ctrl_init(adapter);
4870 if (status)
39f1d94d 4871 goto free_netdev;
6b7c5b94 4872
2243e2e9 4873 /* sync up with fw's ready state */
ba343c77 4874 if (be_physfn(adapter)) {
bf99e50d 4875 status = be_fw_wait_ready(adapter);
ba343c77
SB
4876 if (status)
4877 goto ctrl_clean;
ba343c77 4878 }
6b7c5b94 4879
39f1d94d
SP
4880 if (be_reset_required(adapter)) {
4881 status = be_cmd_reset_function(adapter);
4882 if (status)
4883 goto ctrl_clean;
556ae191 4884
2d177be8
KA
4885 /* Wait for interrupts to quiesce after an FLR */
4886 msleep(100);
4887 }
8cef7a78
SK
4888
4889 /* Allow interrupts for other ULPs running on NIC function */
4890 be_intr_set(adapter, true);
10ef9ab4 4891
2d177be8
KA
4892 /* tell fw we're ready to fire cmds */
4893 status = be_cmd_fw_init(adapter);
4894 if (status)
4895 goto ctrl_clean;
4896
2243e2e9
SP
4897 status = be_stats_init(adapter);
4898 if (status)
4899 goto ctrl_clean;
4900
39f1d94d 4901 status = be_get_initial_config(adapter);
6b7c5b94
SP
4902 if (status)
4903 goto stats_clean;
6b7c5b94
SP
4904
4905 INIT_DELAYED_WORK(&adapter->work, be_worker);
f67ef7ba 4906 INIT_DELAYED_WORK(&adapter->func_recovery_work, be_func_recovery_task);
a54769f5 4907 adapter->rx_fc = adapter->tx_fc = true;
6b7c5b94 4908
5fb379ee
SP
4909 status = be_setup(adapter);
4910 if (status)
55f5c3c5 4911 goto stats_clean;
2243e2e9 4912
3abcdeda 4913 be_netdev_init(netdev);
6b7c5b94
SP
4914 status = register_netdev(netdev);
4915 if (status != 0)
5fb379ee 4916 goto unsetup;
6b7c5b94 4917
045508a8
PP
4918 be_roce_dev_add(adapter);
4919
f67ef7ba
PR
4920 schedule_delayed_work(&adapter->func_recovery_work,
4921 msecs_to_jiffies(1000));
b4e32a71
PR
4922
4923 be_cmd_query_port_name(adapter, &port_name);
4924
d379142b
SP
4925 dev_info(&pdev->dev, "%s: %s %s port %c\n", nic_name(pdev),
4926 func_name(adapter), mc_name(adapter), port_name);
34b1ef04 4927
6b7c5b94
SP
4928 return 0;
4929
5fb379ee
SP
4930unsetup:
4931 be_clear(adapter);
6b7c5b94
SP
4932stats_clean:
4933 be_stats_cleanup(adapter);
4934ctrl_clean:
4935 be_ctrl_cleanup(adapter);
f9449ab7 4936free_netdev:
fe6d2a38 4937 free_netdev(netdev);
6b7c5b94
SP
4938rel_reg:
4939 pci_release_regions(pdev);
4940disable_dev:
4941 pci_disable_device(pdev);
4942do_none:
c4ca2374 4943 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
6b7c5b94
SP
4944 return status;
4945}
4946
4947static int be_suspend(struct pci_dev *pdev, pm_message_t state)
4948{
4949 struct be_adapter *adapter = pci_get_drvdata(pdev);
4950 struct net_device *netdev = adapter->netdev;
4951
76a9e08e 4952 if (adapter->wol_en)
71d8d1b5
AK
4953 be_setup_wol(adapter, true);
4954
d4360d6f 4955 be_intr_set(adapter, false);
f67ef7ba
PR
4956 cancel_delayed_work_sync(&adapter->func_recovery_work);
4957
6b7c5b94
SP
4958 netif_device_detach(netdev);
4959 if (netif_running(netdev)) {
4960 rtnl_lock();
4961 be_close(netdev);
4962 rtnl_unlock();
4963 }
9b0365f1 4964 be_clear(adapter);
6b7c5b94
SP
4965
4966 pci_save_state(pdev);
4967 pci_disable_device(pdev);
4968 pci_set_power_state(pdev, pci_choose_state(pdev, state));
4969 return 0;
4970}
4971
4972static int be_resume(struct pci_dev *pdev)
4973{
4974 int status = 0;
4975 struct be_adapter *adapter = pci_get_drvdata(pdev);
4976 struct net_device *netdev = adapter->netdev;
4977
4978 netif_device_detach(netdev);
4979
4980 status = pci_enable_device(pdev);
4981 if (status)
4982 return status;
4983
1ca01512 4984 pci_set_power_state(pdev, PCI_D0);
6b7c5b94
SP
4985 pci_restore_state(pdev);
4986
dd5746bf
SB
4987 status = be_fw_wait_ready(adapter);
4988 if (status)
4989 return status;
4990
d4360d6f 4991 be_intr_set(adapter, true);
2243e2e9
SP
4992 /* tell fw we're ready to fire cmds */
4993 status = be_cmd_fw_init(adapter);
4994 if (status)
4995 return status;
4996
9b0365f1 4997 be_setup(adapter);
6b7c5b94
SP
4998 if (netif_running(netdev)) {
4999 rtnl_lock();
5000 be_open(netdev);
5001 rtnl_unlock();
5002 }
f67ef7ba
PR
5003
5004 schedule_delayed_work(&adapter->func_recovery_work,
5005 msecs_to_jiffies(1000));
6b7c5b94 5006 netif_device_attach(netdev);
71d8d1b5 5007
76a9e08e 5008 if (adapter->wol_en)
71d8d1b5 5009 be_setup_wol(adapter, false);
a4ca055f 5010
6b7c5b94
SP
5011 return 0;
5012}
5013
82456b03
SP
5014/*
5015 * An FLR will stop BE from DMAing any data.
5016 */
5017static void be_shutdown(struct pci_dev *pdev)
5018{
5019 struct be_adapter *adapter = pci_get_drvdata(pdev);
82456b03 5020
2d5d4154
AK
5021 if (!adapter)
5022 return;
82456b03 5023
d114f99a 5024 be_roce_dev_shutdown(adapter);
0f4a6828 5025 cancel_delayed_work_sync(&adapter->work);
f67ef7ba 5026 cancel_delayed_work_sync(&adapter->func_recovery_work);
a4ca055f 5027
2d5d4154 5028 netif_device_detach(adapter->netdev);
82456b03 5029
57841869
AK
5030 be_cmd_reset_function(adapter);
5031
82456b03 5032 pci_disable_device(pdev);
82456b03
SP
5033}
5034
cf588477 5035static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
748b539a 5036 pci_channel_state_t state)
cf588477
SP
5037{
5038 struct be_adapter *adapter = pci_get_drvdata(pdev);
5039 struct net_device *netdev = adapter->netdev;
5040
5041 dev_err(&adapter->pdev->dev, "EEH error detected\n");
5042
01e5b2c4
SK
5043 if (!adapter->eeh_error) {
5044 adapter->eeh_error = true;
cf588477 5045
01e5b2c4 5046 cancel_delayed_work_sync(&adapter->func_recovery_work);
cf588477 5047
cf588477 5048 rtnl_lock();
01e5b2c4
SK
5049 netif_device_detach(netdev);
5050 if (netif_running(netdev))
5051 be_close(netdev);
cf588477 5052 rtnl_unlock();
01e5b2c4
SK
5053
5054 be_clear(adapter);
cf588477 5055 }
cf588477
SP
5056
5057 if (state == pci_channel_io_perm_failure)
5058 return PCI_ERS_RESULT_DISCONNECT;
5059
5060 pci_disable_device(pdev);
5061
eeb7fc7b
SK
5062 /* The error could cause the FW to trigger a flash debug dump.
5063 * Resetting the card while flash dump is in progress
c8a54163
PR
5064 * can cause it not to recover; wait for it to finish.
5065 * Wait only for first function as it is needed only once per
5066 * adapter.
eeb7fc7b 5067 */
c8a54163
PR
5068 if (pdev->devfn == 0)
5069 ssleep(30);
5070
cf588477
SP
5071 return PCI_ERS_RESULT_NEED_RESET;
5072}
5073
5074static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
5075{
5076 struct be_adapter *adapter = pci_get_drvdata(pdev);
5077 int status;
5078
5079 dev_info(&adapter->pdev->dev, "EEH reset\n");
cf588477
SP
5080
5081 status = pci_enable_device(pdev);
5082 if (status)
5083 return PCI_ERS_RESULT_DISCONNECT;
5084
5085 pci_set_master(pdev);
1ca01512 5086 pci_set_power_state(pdev, PCI_D0);
cf588477
SP
5087 pci_restore_state(pdev);
5088
5089 /* Check if card is ok and fw is ready */
c5b3ad4c
SP
5090 dev_info(&adapter->pdev->dev,
5091 "Waiting for FW to be ready after EEH reset\n");
bf99e50d 5092 status = be_fw_wait_ready(adapter);
cf588477
SP
5093 if (status)
5094 return PCI_ERS_RESULT_DISCONNECT;
5095
d6b6d987 5096 pci_cleanup_aer_uncorrect_error_status(pdev);
01e5b2c4 5097 be_clear_all_error(adapter);
cf588477
SP
5098 return PCI_ERS_RESULT_RECOVERED;
5099}
5100
5101static void be_eeh_resume(struct pci_dev *pdev)
5102{
5103 int status = 0;
5104 struct be_adapter *adapter = pci_get_drvdata(pdev);
5105 struct net_device *netdev = adapter->netdev;
5106
5107 dev_info(&adapter->pdev->dev, "EEH resume\n");
5108
5109 pci_save_state(pdev);
5110
2d177be8 5111 status = be_cmd_reset_function(adapter);
cf588477
SP
5112 if (status)
5113 goto err;
5114
03a58baa
KA
5115 /* On some BE3 FW versions, after a HW reset,
5116 * interrupts will remain disabled for each function.
5117 * So, explicitly enable interrupts
5118 */
5119 be_intr_set(adapter, true);
5120
2d177be8
KA
5121 /* tell fw we're ready to fire cmds */
5122 status = be_cmd_fw_init(adapter);
bf99e50d
PR
5123 if (status)
5124 goto err;
5125
cf588477
SP
5126 status = be_setup(adapter);
5127 if (status)
5128 goto err;
5129
5130 if (netif_running(netdev)) {
5131 status = be_open(netdev);
5132 if (status)
5133 goto err;
5134 }
f67ef7ba
PR
5135
5136 schedule_delayed_work(&adapter->func_recovery_work,
5137 msecs_to_jiffies(1000));
cf588477
SP
5138 netif_device_attach(netdev);
5139 return;
5140err:
5141 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
cf588477
SP
5142}
5143
3646f0e5 5144static const struct pci_error_handlers be_eeh_handlers = {
cf588477
SP
5145 .error_detected = be_eeh_err_detected,
5146 .slot_reset = be_eeh_reset,
5147 .resume = be_eeh_resume,
5148};
5149
6b7c5b94
SP
5150static struct pci_driver be_driver = {
5151 .name = DRV_NAME,
5152 .id_table = be_dev_ids,
5153 .probe = be_probe,
5154 .remove = be_remove,
5155 .suspend = be_suspend,
cf588477 5156 .resume = be_resume,
82456b03 5157 .shutdown = be_shutdown,
cf588477 5158 .err_handler = &be_eeh_handlers
6b7c5b94
SP
5159};
5160
5161static int __init be_init_module(void)
5162{
8e95a202
JP
5163 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
5164 rx_frag_size != 2048) {
6b7c5b94
SP
5165 printk(KERN_WARNING DRV_NAME
5166 " : Module param rx_frag_size must be 2048/4096/8192."
5167 " Using 2048\n");
5168 rx_frag_size = 2048;
5169 }
6b7c5b94
SP
5170
5171 return pci_register_driver(&be_driver);
5172}
5173module_init(be_init_module);
5174
5175static void __exit be_exit_module(void)
5176{
5177 pci_unregister_driver(&be_driver);
5178}
5179module_exit(be_exit_module);