sock: deduplicate errqueue dequeue
[linux-2.6-block.git] / drivers / net / ethernet / emulex / benet / be_main.c
CommitLineData
6b7c5b94 1/*
40263820 2 * Copyright (C) 2005 - 2014 Emulex
6b7c5b94
SP
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
d2145cde 11 * linux-drivers@emulex.com
6b7c5b94 12 *
d2145cde
AK
13 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
6b7c5b94
SP
16 */
17
70c71606 18#include <linux/prefetch.h>
9d9779e7 19#include <linux/module.h>
6b7c5b94 20#include "be.h"
8788fdc2 21#include "be_cmds.h"
65f71b8b 22#include <asm/div64.h>
d6b6d987 23#include <linux/aer.h>
a77dcb8c 24#include <linux/if_bridge.h>
6384a4d0 25#include <net/busy_poll.h>
c9c47142 26#include <net/vxlan.h>
6b7c5b94
SP
27
28MODULE_VERSION(DRV_VER);
29MODULE_DEVICE_TABLE(pci, be_dev_ids);
30MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
00d3d51e 31MODULE_AUTHOR("Emulex Corporation");
6b7c5b94
SP
32MODULE_LICENSE("GPL");
33
ba343c77 34static unsigned int num_vfs;
ba343c77 35module_param(num_vfs, uint, S_IRUGO);
ba343c77 36MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
6b7c5b94 37
11ac75ed
SP
38static ushort rx_frag_size = 2048;
39module_param(rx_frag_size, ushort, S_IRUGO);
40MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
41
9baa3c34 42static const struct pci_device_id be_dev_ids[] = {
c4ca2374 43 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
59fd5d87 44 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
c4ca2374
AK
45 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
46 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
fe6d2a38 47 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
12f4d0a8 48 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
ecedb6ae 49 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
76b73530 50 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID6)},
6b7c5b94
SP
51 { 0 }
52};
53MODULE_DEVICE_TABLE(pci, be_dev_ids);
7c185276 54/* UE Status Low CSR */
42c8b11e 55static const char * const ue_status_low_desc[] = {
7c185276
AK
56 "CEV",
57 "CTX",
58 "DBUF",
59 "ERX",
60 "Host",
61 "MPU",
62 "NDMA",
63 "PTC ",
64 "RDMA ",
65 "RXF ",
66 "RXIPS ",
67 "RXULP0 ",
68 "RXULP1 ",
69 "RXULP2 ",
70 "TIM ",
71 "TPOST ",
72 "TPRE ",
73 "TXIPS ",
74 "TXULP0 ",
75 "TXULP1 ",
76 "UC ",
77 "WDMA ",
78 "TXULP2 ",
79 "HOST1 ",
80 "P0_OB_LINK ",
81 "P1_OB_LINK ",
82 "HOST_GPIO ",
83 "MBOX ",
6bdf8f55
VV
84 "ERX2 ",
85 "SPARE ",
86 "JTAG ",
87 "MPU_INTPEND "
7c185276
AK
88};
89/* UE Status High CSR */
42c8b11e 90static const char * const ue_status_hi_desc[] = {
7c185276
AK
91 "LPCMEMHOST",
92 "MGMT_MAC",
93 "PCS0ONLINE",
94 "MPU_IRAM",
95 "PCS1ONLINE",
96 "PCTL0",
97 "PCTL1",
98 "PMEM",
99 "RR",
100 "TXPB",
101 "RXPP",
102 "XAUI",
103 "TXP",
104 "ARM",
105 "IPC",
106 "HOST2",
107 "HOST3",
108 "HOST4",
109 "HOST5",
110 "HOST6",
111 "HOST7",
6bdf8f55
VV
112 "ECRC",
113 "Poison TLP",
42c8b11e 114 "NETC",
6bdf8f55
VV
115 "PERIPH",
116 "LLTXULP",
117 "D2P",
118 "RCON",
119 "LDMA",
120 "LLTXP",
121 "LLTXPB",
7c185276
AK
122 "Unknown"
123};
6b7c5b94 124
752961a1 125
6b7c5b94
SP
126static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
127{
128 struct be_dma_mem *mem = &q->dma_mem;
1cfafab9 129 if (mem->va) {
2b7bcebf
IV
130 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
131 mem->dma);
1cfafab9
SP
132 mem->va = NULL;
133 }
6b7c5b94
SP
134}
135
136static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
748b539a 137 u16 len, u16 entry_size)
6b7c5b94
SP
138{
139 struct be_dma_mem *mem = &q->dma_mem;
140
141 memset(q, 0, sizeof(*q));
142 q->len = len;
143 q->entry_size = entry_size;
144 mem->size = len * entry_size;
ede23fa8
JP
145 mem->va = dma_zalloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
146 GFP_KERNEL);
6b7c5b94 147 if (!mem->va)
10ef9ab4 148 return -ENOMEM;
6b7c5b94
SP
149 return 0;
150}
151
68c45a2d 152static void be_reg_intr_set(struct be_adapter *adapter, bool enable)
6b7c5b94 153{
db3ea781 154 u32 reg, enabled;
5f0b849e 155
db3ea781 156 pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
748b539a 157 &reg);
db3ea781
SP
158 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
159
5f0b849e 160 if (!enabled && enable)
6b7c5b94 161 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
5f0b849e 162 else if (enabled && !enable)
6b7c5b94 163 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
5f0b849e 164 else
6b7c5b94 165 return;
5f0b849e 166
db3ea781 167 pci_write_config_dword(adapter->pdev,
748b539a 168 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
6b7c5b94
SP
169}
170
68c45a2d
SK
171static void be_intr_set(struct be_adapter *adapter, bool enable)
172{
173 int status = 0;
174
175 /* On lancer interrupts can't be controlled via this register */
176 if (lancer_chip(adapter))
177 return;
178
179 if (adapter->eeh_error)
180 return;
181
182 status = be_cmd_intr_set(adapter, enable);
183 if (status)
184 be_reg_intr_set(adapter, enable);
185}
186
8788fdc2 187static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
6b7c5b94
SP
188{
189 u32 val = 0;
190 val |= qid & DB_RQ_RING_ID_MASK;
191 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
f3eb62d2
SP
192
193 wmb();
8788fdc2 194 iowrite32(val, adapter->db + DB_RQ_OFFSET);
6b7c5b94
SP
195}
196
94d73aaa
VV
197static void be_txq_notify(struct be_adapter *adapter, struct be_tx_obj *txo,
198 u16 posted)
6b7c5b94
SP
199{
200 u32 val = 0;
94d73aaa 201 val |= txo->q.id & DB_TXULP_RING_ID_MASK;
6b7c5b94 202 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
f3eb62d2
SP
203
204 wmb();
94d73aaa 205 iowrite32(val, adapter->db + txo->db_offset);
6b7c5b94
SP
206}
207
8788fdc2 208static void be_eq_notify(struct be_adapter *adapter, u16 qid,
748b539a 209 bool arm, bool clear_int, u16 num_popped)
6b7c5b94
SP
210{
211 u32 val = 0;
212 val |= qid & DB_EQ_RING_ID_MASK;
748b539a 213 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) << DB_EQ_RING_ID_EXT_MASK_SHIFT);
cf588477 214
f67ef7ba 215 if (adapter->eeh_error)
cf588477
SP
216 return;
217
6b7c5b94
SP
218 if (arm)
219 val |= 1 << DB_EQ_REARM_SHIFT;
220 if (clear_int)
221 val |= 1 << DB_EQ_CLR_SHIFT;
222 val |= 1 << DB_EQ_EVNT_SHIFT;
223 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
8788fdc2 224 iowrite32(val, adapter->db + DB_EQ_OFFSET);
6b7c5b94
SP
225}
226
8788fdc2 227void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
6b7c5b94
SP
228{
229 u32 val = 0;
230 val |= qid & DB_CQ_RING_ID_MASK;
fe6d2a38
SP
231 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
232 DB_CQ_RING_ID_EXT_MASK_SHIFT);
cf588477 233
f67ef7ba 234 if (adapter->eeh_error)
cf588477
SP
235 return;
236
6b7c5b94
SP
237 if (arm)
238 val |= 1 << DB_CQ_REARM_SHIFT;
239 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
8788fdc2 240 iowrite32(val, adapter->db + DB_CQ_OFFSET);
6b7c5b94
SP
241}
242
6b7c5b94
SP
243static int be_mac_addr_set(struct net_device *netdev, void *p)
244{
245 struct be_adapter *adapter = netdev_priv(netdev);
5a712c13 246 struct device *dev = &adapter->pdev->dev;
6b7c5b94 247 struct sockaddr *addr = p;
5a712c13
SP
248 int status;
249 u8 mac[ETH_ALEN];
250 u32 old_pmac_id = adapter->pmac_id[0], curr_pmac_id = 0;
6b7c5b94 251
ca9e4988
AK
252 if (!is_valid_ether_addr(addr->sa_data))
253 return -EADDRNOTAVAIL;
254
ff32f8ab
VV
255 /* Proceed further only if, User provided MAC is different
256 * from active MAC
257 */
258 if (ether_addr_equal(addr->sa_data, netdev->dev_addr))
259 return 0;
260
5a712c13
SP
261 /* The PMAC_ADD cmd may fail if the VF doesn't have FILTMGMT
262 * privilege or if PF did not provision the new MAC address.
263 * On BE3, this cmd will always fail if the VF doesn't have the
264 * FILTMGMT privilege. This failure is OK, only if the PF programmed
265 * the MAC for the VF.
704e4c88 266 */
5a712c13
SP
267 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
268 adapter->if_handle, &adapter->pmac_id[0], 0);
269 if (!status) {
270 curr_pmac_id = adapter->pmac_id[0];
271
272 /* Delete the old programmed MAC. This call may fail if the
273 * old MAC was already deleted by the PF driver.
274 */
275 if (adapter->pmac_id[0] != old_pmac_id)
276 be_cmd_pmac_del(adapter, adapter->if_handle,
277 old_pmac_id, 0);
704e4c88
PR
278 }
279
5a712c13
SP
280 /* Decide if the new MAC is successfully activated only after
281 * querying the FW
704e4c88 282 */
b188f090
SR
283 status = be_cmd_get_active_mac(adapter, curr_pmac_id, mac,
284 adapter->if_handle, true, 0);
a65027e4 285 if (status)
e3a7ae2c 286 goto err;
6b7c5b94 287
5a712c13
SP
288 /* The MAC change did not happen, either due to lack of privilege
289 * or PF didn't pre-provision.
290 */
61d23e9f 291 if (!ether_addr_equal(addr->sa_data, mac)) {
5a712c13
SP
292 status = -EPERM;
293 goto err;
294 }
295
e3a7ae2c 296 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
5a712c13 297 dev_info(dev, "MAC address changed to %pM\n", mac);
e3a7ae2c
SK
298 return 0;
299err:
5a712c13 300 dev_warn(dev, "MAC address change to %pM failed\n", addr->sa_data);
6b7c5b94
SP
301 return status;
302}
303
ca34fe38
SP
304/* BE2 supports only v0 cmd */
305static void *hw_stats_from_cmd(struct be_adapter *adapter)
306{
307 if (BE2_chip(adapter)) {
308 struct be_cmd_resp_get_stats_v0 *cmd = adapter->stats_cmd.va;
309
310 return &cmd->hw_stats;
61000861 311 } else if (BE3_chip(adapter)) {
ca34fe38
SP
312 struct be_cmd_resp_get_stats_v1 *cmd = adapter->stats_cmd.va;
313
61000861
AK
314 return &cmd->hw_stats;
315 } else {
316 struct be_cmd_resp_get_stats_v2 *cmd = adapter->stats_cmd.va;
317
ca34fe38
SP
318 return &cmd->hw_stats;
319 }
320}
321
322/* BE2 supports only v0 cmd */
323static void *be_erx_stats_from_cmd(struct be_adapter *adapter)
324{
325 if (BE2_chip(adapter)) {
326 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
327
328 return &hw_stats->erx;
61000861 329 } else if (BE3_chip(adapter)) {
ca34fe38
SP
330 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
331
61000861
AK
332 return &hw_stats->erx;
333 } else {
334 struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
335
ca34fe38
SP
336 return &hw_stats->erx;
337 }
338}
339
340static void populate_be_v0_stats(struct be_adapter *adapter)
89a88ab8 341{
ac124ff9
SP
342 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
343 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
344 struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
89a88ab8 345 struct be_port_rxf_stats_v0 *port_stats =
ac124ff9
SP
346 &rxf_stats->port[adapter->port_num];
347 struct be_drv_stats *drvs = &adapter->drv_stats;
89a88ab8 348
ac124ff9 349 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
89a88ab8
AK
350 drvs->rx_pause_frames = port_stats->rx_pause_frames;
351 drvs->rx_crc_errors = port_stats->rx_crc_errors;
352 drvs->rx_control_frames = port_stats->rx_control_frames;
353 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
354 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
355 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
356 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
357 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
358 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
359 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
360 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
361 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
362 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
363 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
ac124ff9 364 drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
89a88ab8
AK
365 drvs->rx_dropped_header_too_small =
366 port_stats->rx_dropped_header_too_small;
18fb06a1
SR
367 drvs->rx_address_filtered =
368 port_stats->rx_address_filtered +
369 port_stats->rx_vlan_filtered;
89a88ab8
AK
370 drvs->rx_alignment_symbol_errors =
371 port_stats->rx_alignment_symbol_errors;
372
373 drvs->tx_pauseframes = port_stats->tx_pauseframes;
374 drvs->tx_controlframes = port_stats->tx_controlframes;
375
376 if (adapter->port_num)
ac124ff9 377 drvs->jabber_events = rxf_stats->port1_jabber_events;
89a88ab8 378 else
ac124ff9 379 drvs->jabber_events = rxf_stats->port0_jabber_events;
89a88ab8 380 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
89a88ab8 381 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
89a88ab8
AK
382 drvs->forwarded_packets = rxf_stats->forwarded_packets;
383 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
ac124ff9
SP
384 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
385 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
89a88ab8
AK
386 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
387}
388
ca34fe38 389static void populate_be_v1_stats(struct be_adapter *adapter)
89a88ab8 390{
ac124ff9
SP
391 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
392 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
393 struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
89a88ab8 394 struct be_port_rxf_stats_v1 *port_stats =
ac124ff9
SP
395 &rxf_stats->port[adapter->port_num];
396 struct be_drv_stats *drvs = &adapter->drv_stats;
89a88ab8 397
ac124ff9 398 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
02fe7027
AK
399 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
400 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
89a88ab8
AK
401 drvs->rx_pause_frames = port_stats->rx_pause_frames;
402 drvs->rx_crc_errors = port_stats->rx_crc_errors;
403 drvs->rx_control_frames = port_stats->rx_control_frames;
404 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
405 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
406 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
407 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
408 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
409 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
410 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
411 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
412 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
413 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
414 drvs->rx_dropped_header_too_small =
415 port_stats->rx_dropped_header_too_small;
416 drvs->rx_input_fifo_overflow_drop =
417 port_stats->rx_input_fifo_overflow_drop;
18fb06a1 418 drvs->rx_address_filtered = port_stats->rx_address_filtered;
89a88ab8
AK
419 drvs->rx_alignment_symbol_errors =
420 port_stats->rx_alignment_symbol_errors;
ac124ff9 421 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
89a88ab8
AK
422 drvs->tx_pauseframes = port_stats->tx_pauseframes;
423 drvs->tx_controlframes = port_stats->tx_controlframes;
b5adffc4 424 drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
89a88ab8
AK
425 drvs->jabber_events = port_stats->jabber_events;
426 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
89a88ab8 427 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
89a88ab8
AK
428 drvs->forwarded_packets = rxf_stats->forwarded_packets;
429 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
ac124ff9
SP
430 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
431 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
89a88ab8
AK
432 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
433}
434
61000861
AK
435static void populate_be_v2_stats(struct be_adapter *adapter)
436{
437 struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
438 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
439 struct be_rxf_stats_v2 *rxf_stats = &hw_stats->rxf;
440 struct be_port_rxf_stats_v2 *port_stats =
441 &rxf_stats->port[adapter->port_num];
442 struct be_drv_stats *drvs = &adapter->drv_stats;
443
444 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
445 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
446 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
447 drvs->rx_pause_frames = port_stats->rx_pause_frames;
448 drvs->rx_crc_errors = port_stats->rx_crc_errors;
449 drvs->rx_control_frames = port_stats->rx_control_frames;
450 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
451 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
452 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
453 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
454 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
455 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
456 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
457 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
458 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
459 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
460 drvs->rx_dropped_header_too_small =
461 port_stats->rx_dropped_header_too_small;
462 drvs->rx_input_fifo_overflow_drop =
463 port_stats->rx_input_fifo_overflow_drop;
464 drvs->rx_address_filtered = port_stats->rx_address_filtered;
465 drvs->rx_alignment_symbol_errors =
466 port_stats->rx_alignment_symbol_errors;
467 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
468 drvs->tx_pauseframes = port_stats->tx_pauseframes;
469 drvs->tx_controlframes = port_stats->tx_controlframes;
470 drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
471 drvs->jabber_events = port_stats->jabber_events;
472 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
473 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
474 drvs->forwarded_packets = rxf_stats->forwarded_packets;
475 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
476 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
477 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
478 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
748b539a 479 if (be_roce_supported(adapter)) {
461ae379
AK
480 drvs->rx_roce_bytes_lsd = port_stats->roce_bytes_received_lsd;
481 drvs->rx_roce_bytes_msd = port_stats->roce_bytes_received_msd;
482 drvs->rx_roce_frames = port_stats->roce_frames_received;
483 drvs->roce_drops_crc = port_stats->roce_drops_crc;
484 drvs->roce_drops_payload_len =
485 port_stats->roce_drops_payload_len;
486 }
61000861
AK
487}
488
005d5696
SX
489static void populate_lancer_stats(struct be_adapter *adapter)
490{
89a88ab8 491
005d5696 492 struct be_drv_stats *drvs = &adapter->drv_stats;
748b539a 493 struct lancer_pport_stats *pport_stats = pport_stats_from_cmd(adapter);
ac124ff9
SP
494
495 be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
496 drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
497 drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
498 drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
005d5696 499 drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
ac124ff9 500 drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
005d5696
SX
501 drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
502 drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
503 drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
504 drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
505 drvs->rx_dropped_tcp_length =
506 pport_stats->rx_dropped_invalid_tcp_length;
507 drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
508 drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
509 drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
510 drvs->rx_dropped_header_too_small =
511 pport_stats->rx_dropped_header_too_small;
512 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
18fb06a1
SR
513 drvs->rx_address_filtered =
514 pport_stats->rx_address_filtered +
515 pport_stats->rx_vlan_filtered;
ac124ff9 516 drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
005d5696 517 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
ac124ff9
SP
518 drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
519 drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
005d5696 520 drvs->jabber_events = pport_stats->rx_jabbers;
ac124ff9
SP
521 drvs->forwarded_packets = pport_stats->num_forwards_lo;
522 drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
005d5696 523 drvs->rx_drops_too_many_frags =
ac124ff9 524 pport_stats->rx_drops_too_many_frags_lo;
005d5696 525}
89a88ab8 526
09c1c68f
SP
527static void accumulate_16bit_val(u32 *acc, u16 val)
528{
529#define lo(x) (x & 0xFFFF)
530#define hi(x) (x & 0xFFFF0000)
531 bool wrapped = val < lo(*acc);
532 u32 newacc = hi(*acc) + val;
533
534 if (wrapped)
535 newacc += 65536;
536 ACCESS_ONCE(*acc) = newacc;
537}
538
4188e7df 539static void populate_erx_stats(struct be_adapter *adapter,
748b539a 540 struct be_rx_obj *rxo, u32 erx_stat)
a6c578ef
AK
541{
542 if (!BEx_chip(adapter))
543 rx_stats(rxo)->rx_drops_no_frags = erx_stat;
544 else
545 /* below erx HW counter can actually wrap around after
546 * 65535. Driver accumulates a 32-bit value
547 */
548 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
549 (u16)erx_stat);
550}
551
89a88ab8
AK
552void be_parse_stats(struct be_adapter *adapter)
553{
61000861 554 struct be_erx_stats_v2 *erx = be_erx_stats_from_cmd(adapter);
ac124ff9
SP
555 struct be_rx_obj *rxo;
556 int i;
a6c578ef 557 u32 erx_stat;
ac124ff9 558
ca34fe38
SP
559 if (lancer_chip(adapter)) {
560 populate_lancer_stats(adapter);
005d5696 561 } else {
ca34fe38
SP
562 if (BE2_chip(adapter))
563 populate_be_v0_stats(adapter);
61000861
AK
564 else if (BE3_chip(adapter))
565 /* for BE3 */
ca34fe38 566 populate_be_v1_stats(adapter);
61000861
AK
567 else
568 populate_be_v2_stats(adapter);
d51ebd33 569
61000861 570 /* erx_v2 is longer than v0, v1. use v2 for v0, v1 access */
ca34fe38 571 for_all_rx_queues(adapter, rxo, i) {
a6c578ef
AK
572 erx_stat = erx->rx_drops_no_fragments[rxo->q.id];
573 populate_erx_stats(adapter, rxo, erx_stat);
ca34fe38 574 }
09c1c68f 575 }
89a88ab8
AK
576}
577
ab1594e9 578static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
748b539a 579 struct rtnl_link_stats64 *stats)
6b7c5b94 580{
ab1594e9 581 struct be_adapter *adapter = netdev_priv(netdev);
89a88ab8 582 struct be_drv_stats *drvs = &adapter->drv_stats;
3abcdeda 583 struct be_rx_obj *rxo;
3c8def97 584 struct be_tx_obj *txo;
ab1594e9
SP
585 u64 pkts, bytes;
586 unsigned int start;
3abcdeda 587 int i;
6b7c5b94 588
3abcdeda 589 for_all_rx_queues(adapter, rxo, i) {
ab1594e9
SP
590 const struct be_rx_stats *rx_stats = rx_stats(rxo);
591 do {
57a7744e 592 start = u64_stats_fetch_begin_irq(&rx_stats->sync);
ab1594e9
SP
593 pkts = rx_stats(rxo)->rx_pkts;
594 bytes = rx_stats(rxo)->rx_bytes;
57a7744e 595 } while (u64_stats_fetch_retry_irq(&rx_stats->sync, start));
ab1594e9
SP
596 stats->rx_packets += pkts;
597 stats->rx_bytes += bytes;
598 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
599 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
600 rx_stats(rxo)->rx_drops_no_frags;
3abcdeda
SP
601 }
602
3c8def97 603 for_all_tx_queues(adapter, txo, i) {
ab1594e9
SP
604 const struct be_tx_stats *tx_stats = tx_stats(txo);
605 do {
57a7744e 606 start = u64_stats_fetch_begin_irq(&tx_stats->sync);
ab1594e9
SP
607 pkts = tx_stats(txo)->tx_pkts;
608 bytes = tx_stats(txo)->tx_bytes;
57a7744e 609 } while (u64_stats_fetch_retry_irq(&tx_stats->sync, start));
ab1594e9
SP
610 stats->tx_packets += pkts;
611 stats->tx_bytes += bytes;
3c8def97 612 }
6b7c5b94
SP
613
614 /* bad pkts received */
ab1594e9 615 stats->rx_errors = drvs->rx_crc_errors +
89a88ab8
AK
616 drvs->rx_alignment_symbol_errors +
617 drvs->rx_in_range_errors +
618 drvs->rx_out_range_errors +
619 drvs->rx_frame_too_long +
620 drvs->rx_dropped_too_small +
621 drvs->rx_dropped_too_short +
622 drvs->rx_dropped_header_too_small +
623 drvs->rx_dropped_tcp_length +
ab1594e9 624 drvs->rx_dropped_runt;
68110868 625
6b7c5b94 626 /* detailed rx errors */
ab1594e9 627 stats->rx_length_errors = drvs->rx_in_range_errors +
89a88ab8
AK
628 drvs->rx_out_range_errors +
629 drvs->rx_frame_too_long;
68110868 630
ab1594e9 631 stats->rx_crc_errors = drvs->rx_crc_errors;
6b7c5b94
SP
632
633 /* frame alignment errors */
ab1594e9 634 stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
68110868 635
6b7c5b94
SP
636 /* receiver fifo overrun */
637 /* drops_no_pbuf is no per i/f, it's per BE card */
ab1594e9 638 stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
89a88ab8
AK
639 drvs->rx_input_fifo_overflow_drop +
640 drvs->rx_drops_no_pbuf;
ab1594e9 641 return stats;
6b7c5b94
SP
642}
643
b236916a 644void be_link_status_update(struct be_adapter *adapter, u8 link_status)
6b7c5b94 645{
6b7c5b94
SP
646 struct net_device *netdev = adapter->netdev;
647
b236916a 648 if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
ea172a01 649 netif_carrier_off(netdev);
b236916a 650 adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
6b7c5b94 651 }
b236916a 652
bdce2ad7 653 if (link_status)
b236916a
AK
654 netif_carrier_on(netdev);
655 else
656 netif_carrier_off(netdev);
6b7c5b94
SP
657}
658
3c8def97 659static void be_tx_stats_update(struct be_tx_obj *txo,
748b539a
SP
660 u32 wrb_cnt, u32 copied, u32 gso_segs,
661 bool stopped)
6b7c5b94 662{
3c8def97
SP
663 struct be_tx_stats *stats = tx_stats(txo);
664
ab1594e9 665 u64_stats_update_begin(&stats->sync);
ac124ff9
SP
666 stats->tx_reqs++;
667 stats->tx_wrbs += wrb_cnt;
668 stats->tx_bytes += copied;
669 stats->tx_pkts += (gso_segs ? gso_segs : 1);
6b7c5b94 670 if (stopped)
ac124ff9 671 stats->tx_stops++;
ab1594e9 672 u64_stats_update_end(&stats->sync);
6b7c5b94
SP
673}
674
675/* Determine number of WRB entries needed to xmit data in an skb */
fe6d2a38 676static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
748b539a 677 bool *dummy)
6b7c5b94 678{
ebc8d2ab
DM
679 int cnt = (skb->len > skb->data_len);
680
681 cnt += skb_shinfo(skb)->nr_frags;
682
6b7c5b94
SP
683 /* to account for hdr wrb */
684 cnt++;
fe6d2a38
SP
685 if (lancer_chip(adapter) || !(cnt & 1)) {
686 *dummy = false;
687 } else {
6b7c5b94
SP
688 /* add a dummy to make it an even num */
689 cnt++;
690 *dummy = true;
fe6d2a38 691 }
6b7c5b94
SP
692 BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
693 return cnt;
694}
695
696static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
697{
698 wrb->frag_pa_hi = upper_32_bits(addr);
699 wrb->frag_pa_lo = addr & 0xFFFFFFFF;
700 wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
89b1f496 701 wrb->rsvd0 = 0;
6b7c5b94
SP
702}
703
1ded132d 704static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
748b539a 705 struct sk_buff *skb)
1ded132d
AK
706{
707 u8 vlan_prio;
708 u16 vlan_tag;
709
710 vlan_tag = vlan_tx_tag_get(skb);
711 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
712 /* If vlan priority provided by OS is NOT in available bmap */
713 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
714 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
715 adapter->recommended_prio;
716
717 return vlan_tag;
718}
719
c9c47142
SP
720/* Used only for IP tunnel packets */
721static u16 skb_inner_ip_proto(struct sk_buff *skb)
722{
723 return (inner_ip_hdr(skb)->version == 4) ?
724 inner_ip_hdr(skb)->protocol : inner_ipv6_hdr(skb)->nexthdr;
725}
726
727static u16 skb_ip_proto(struct sk_buff *skb)
728{
729 return (ip_hdr(skb)->version == 4) ?
730 ip_hdr(skb)->protocol : ipv6_hdr(skb)->nexthdr;
731}
732
cc4ce020 733static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
748b539a
SP
734 struct sk_buff *skb, u32 wrb_cnt, u32 len,
735 bool skip_hw_vlan)
6b7c5b94 736{
c9c47142 737 u16 vlan_tag, proto;
cc4ce020 738
6b7c5b94
SP
739 memset(hdr, 0, sizeof(*hdr));
740
741 AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
742
49e4b847 743 if (skb_is_gso(skb)) {
6b7c5b94
SP
744 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
745 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
746 hdr, skb_shinfo(skb)->gso_size);
fe6d2a38 747 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
49e4b847 748 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
6b7c5b94 749 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
c9c47142
SP
750 if (skb->encapsulation) {
751 AMAP_SET_BITS(struct amap_eth_hdr_wrb, ipcs, hdr, 1);
752 proto = skb_inner_ip_proto(skb);
753 } else {
754 proto = skb_ip_proto(skb);
755 }
756 if (proto == IPPROTO_TCP)
6b7c5b94 757 AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
c9c47142 758 else if (proto == IPPROTO_UDP)
6b7c5b94
SP
759 AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
760 }
761
4c5102f9 762 if (vlan_tx_tag_present(skb)) {
6b7c5b94 763 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
1ded132d 764 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
cc4ce020 765 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
6b7c5b94
SP
766 }
767
bc0c3405
AK
768 /* To skip HW VLAN tagging: evt = 1, compl = 0 */
769 AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, !skip_hw_vlan);
6b7c5b94 770 AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
6b7c5b94
SP
771 AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
772 AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
773}
774
2b7bcebf 775static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
748b539a 776 bool unmap_single)
7101e111
SP
777{
778 dma_addr_t dma;
779
780 be_dws_le_to_cpu(wrb, sizeof(*wrb));
781
782 dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
b681ee77 783 if (wrb->frag_len) {
7101e111 784 if (unmap_single)
2b7bcebf
IV
785 dma_unmap_single(dev, dma, wrb->frag_len,
786 DMA_TO_DEVICE);
7101e111 787 else
2b7bcebf 788 dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
7101e111
SP
789 }
790}
6b7c5b94 791
3c8def97 792static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
748b539a
SP
793 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb,
794 bool skip_hw_vlan)
6b7c5b94 795{
7101e111
SP
796 dma_addr_t busaddr;
797 int i, copied = 0;
2b7bcebf 798 struct device *dev = &adapter->pdev->dev;
6b7c5b94 799 struct sk_buff *first_skb = skb;
6b7c5b94
SP
800 struct be_eth_wrb *wrb;
801 struct be_eth_hdr_wrb *hdr;
7101e111
SP
802 bool map_single = false;
803 u16 map_head;
6b7c5b94 804
6b7c5b94
SP
805 hdr = queue_head_node(txq);
806 queue_head_inc(txq);
7101e111 807 map_head = txq->head;
6b7c5b94 808
ebc8d2ab 809 if (skb->len > skb->data_len) {
e743d313 810 int len = skb_headlen(skb);
2b7bcebf
IV
811 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
812 if (dma_mapping_error(dev, busaddr))
7101e111
SP
813 goto dma_err;
814 map_single = true;
ebc8d2ab
DM
815 wrb = queue_head_node(txq);
816 wrb_fill(wrb, busaddr, len);
817 be_dws_cpu_to_le(wrb, sizeof(*wrb));
818 queue_head_inc(txq);
819 copied += len;
820 }
6b7c5b94 821
ebc8d2ab 822 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
748b539a 823 const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
b061b39e 824 busaddr = skb_frag_dma_map(dev, frag, 0,
9e903e08 825 skb_frag_size(frag), DMA_TO_DEVICE);
2b7bcebf 826 if (dma_mapping_error(dev, busaddr))
7101e111 827 goto dma_err;
ebc8d2ab 828 wrb = queue_head_node(txq);
9e903e08 829 wrb_fill(wrb, busaddr, skb_frag_size(frag));
ebc8d2ab
DM
830 be_dws_cpu_to_le(wrb, sizeof(*wrb));
831 queue_head_inc(txq);
9e903e08 832 copied += skb_frag_size(frag);
6b7c5b94
SP
833 }
834
835 if (dummy_wrb) {
836 wrb = queue_head_node(txq);
837 wrb_fill(wrb, 0, 0);
838 be_dws_cpu_to_le(wrb, sizeof(*wrb));
839 queue_head_inc(txq);
840 }
841
bc0c3405 842 wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied, skip_hw_vlan);
6b7c5b94
SP
843 be_dws_cpu_to_le(hdr, sizeof(*hdr));
844
845 return copied;
7101e111
SP
846dma_err:
847 txq->head = map_head;
848 while (copied) {
849 wrb = queue_head_node(txq);
2b7bcebf 850 unmap_tx_frag(dev, wrb, map_single);
7101e111
SP
851 map_single = false;
852 copied -= wrb->frag_len;
853 queue_head_inc(txq);
854 }
855 return 0;
6b7c5b94
SP
856}
857
93040ae5 858static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
bc0c3405
AK
859 struct sk_buff *skb,
860 bool *skip_hw_vlan)
93040ae5
SK
861{
862 u16 vlan_tag = 0;
863
864 skb = skb_share_check(skb, GFP_ATOMIC);
865 if (unlikely(!skb))
866 return skb;
867
efee8e87 868 if (vlan_tx_tag_present(skb))
93040ae5 869 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
52fe29e4
SB
870
871 if (qnq_async_evt_rcvd(adapter) && adapter->pvid) {
872 if (!vlan_tag)
873 vlan_tag = adapter->pvid;
874 /* f/w workaround to set skip_hw_vlan = 1, informs the F/W to
875 * skip VLAN insertion
876 */
877 if (skip_hw_vlan)
878 *skip_hw_vlan = true;
879 }
bc0c3405
AK
880
881 if (vlan_tag) {
58717686 882 skb = __vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
bc0c3405
AK
883 if (unlikely(!skb))
884 return skb;
bc0c3405
AK
885 skb->vlan_tci = 0;
886 }
887
888 /* Insert the outer VLAN, if any */
889 if (adapter->qnq_vid) {
890 vlan_tag = adapter->qnq_vid;
58717686 891 skb = __vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
bc0c3405
AK
892 if (unlikely(!skb))
893 return skb;
894 if (skip_hw_vlan)
895 *skip_hw_vlan = true;
896 }
897
93040ae5
SK
898 return skb;
899}
900
bc0c3405
AK
901static bool be_ipv6_exthdr_check(struct sk_buff *skb)
902{
903 struct ethhdr *eh = (struct ethhdr *)skb->data;
904 u16 offset = ETH_HLEN;
905
906 if (eh->h_proto == htons(ETH_P_IPV6)) {
907 struct ipv6hdr *ip6h = (struct ipv6hdr *)(skb->data + offset);
908
909 offset += sizeof(struct ipv6hdr);
910 if (ip6h->nexthdr != NEXTHDR_TCP &&
911 ip6h->nexthdr != NEXTHDR_UDP) {
912 struct ipv6_opt_hdr *ehdr =
913 (struct ipv6_opt_hdr *) (skb->data + offset);
914
915 /* offending pkt: 2nd byte following IPv6 hdr is 0xff */
916 if (ehdr->hdrlen == 0xff)
917 return true;
918 }
919 }
920 return false;
921}
922
923static int be_vlan_tag_tx_chk(struct be_adapter *adapter, struct sk_buff *skb)
924{
925 return vlan_tx_tag_present(skb) || adapter->pvid || adapter->qnq_vid;
926}
927
748b539a 928static int be_ipv6_tx_stall_chk(struct be_adapter *adapter, struct sk_buff *skb)
bc0c3405 929{
ee9c799c 930 return BE3_chip(adapter) && be_ipv6_exthdr_check(skb);
bc0c3405
AK
931}
932
ec495fac
VV
933static struct sk_buff *be_lancer_xmit_workarounds(struct be_adapter *adapter,
934 struct sk_buff *skb,
935 bool *skip_hw_vlan)
6b7c5b94 936{
d2cb6ce7 937 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
ee9c799c
SP
938 unsigned int eth_hdr_len;
939 struct iphdr *ip;
93040ae5 940
1297f9db
AK
941 /* For padded packets, BE HW modifies tot_len field in IP header
942 * incorrecly when VLAN tag is inserted by HW.
3904dcc4 943 * For padded packets, Lancer computes incorrect checksum.
1ded132d 944 */
ee9c799c
SP
945 eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
946 VLAN_ETH_HLEN : ETH_HLEN;
3904dcc4
SK
947 if (skb->len <= 60 &&
948 (lancer_chip(adapter) || vlan_tx_tag_present(skb)) &&
ee9c799c 949 is_ipv4_pkt(skb)) {
93040ae5
SK
950 ip = (struct iphdr *)ip_hdr(skb);
951 pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len));
952 }
1ded132d 953
d2cb6ce7 954 /* If vlan tag is already inlined in the packet, skip HW VLAN
f93f160b 955 * tagging in pvid-tagging mode
d2cb6ce7 956 */
f93f160b 957 if (be_pvid_tagging_enabled(adapter) &&
d2cb6ce7 958 veh->h_vlan_proto == htons(ETH_P_8021Q))
748b539a 959 *skip_hw_vlan = true;
d2cb6ce7 960
93040ae5
SK
961 /* HW has a bug wherein it will calculate CSUM for VLAN
962 * pkts even though it is disabled.
963 * Manually insert VLAN in pkt.
964 */
965 if (skb->ip_summed != CHECKSUM_PARTIAL &&
ee9c799c
SP
966 vlan_tx_tag_present(skb)) {
967 skb = be_insert_vlan_in_pkt(adapter, skb, skip_hw_vlan);
bc0c3405 968 if (unlikely(!skb))
c9128951 969 goto err;
bc0c3405
AK
970 }
971
972 /* HW may lockup when VLAN HW tagging is requested on
973 * certain ipv6 packets. Drop such pkts if the HW workaround to
974 * skip HW tagging is not enabled by FW.
975 */
976 if (unlikely(be_ipv6_tx_stall_chk(adapter, skb) &&
ee9c799c
SP
977 (adapter->pvid || adapter->qnq_vid) &&
978 !qnq_async_evt_rcvd(adapter)))
bc0c3405
AK
979 goto tx_drop;
980
981 /* Manual VLAN tag insertion to prevent:
982 * ASIC lockup when the ASIC inserts VLAN tag into
983 * certain ipv6 packets. Insert VLAN tags in driver,
984 * and set event, completion, vlan bits accordingly
985 * in the Tx WRB.
986 */
987 if (be_ipv6_tx_stall_chk(adapter, skb) &&
988 be_vlan_tag_tx_chk(adapter, skb)) {
ee9c799c 989 skb = be_insert_vlan_in_pkt(adapter, skb, skip_hw_vlan);
1ded132d 990 if (unlikely(!skb))
c9128951 991 goto err;
1ded132d
AK
992 }
993
ee9c799c
SP
994 return skb;
995tx_drop:
996 dev_kfree_skb_any(skb);
c9128951 997err:
ee9c799c
SP
998 return NULL;
999}
1000
ec495fac
VV
1001static struct sk_buff *be_xmit_workarounds(struct be_adapter *adapter,
1002 struct sk_buff *skb,
1003 bool *skip_hw_vlan)
1004{
1005 /* Lancer, SH-R ASICs have a bug wherein Packets that are 32 bytes or
1006 * less may cause a transmit stall on that port. So the work-around is
1007 * to pad short packets (<= 32 bytes) to a 36-byte length.
1008 */
1009 if (unlikely(!BEx_chip(adapter) && skb->len <= 32)) {
1010 if (skb_padto(skb, 36))
1011 return NULL;
1012 skb->len = 36;
1013 }
1014
1015 if (BEx_chip(adapter) || lancer_chip(adapter)) {
1016 skb = be_lancer_xmit_workarounds(adapter, skb, skip_hw_vlan);
1017 if (!skb)
1018 return NULL;
1019 }
1020
1021 return skb;
1022}
1023
ee9c799c
SP
1024static netdev_tx_t be_xmit(struct sk_buff *skb, struct net_device *netdev)
1025{
1026 struct be_adapter *adapter = netdev_priv(netdev);
1027 struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
1028 struct be_queue_info *txq = &txo->q;
1029 bool dummy_wrb, stopped = false;
1030 u32 wrb_cnt = 0, copied = 0;
1031 bool skip_hw_vlan = false;
1032 u32 start = txq->head;
1033
1034 skb = be_xmit_workarounds(adapter, skb, &skip_hw_vlan);
bc617526
SP
1035 if (!skb) {
1036 tx_stats(txo)->tx_drv_drops++;
ee9c799c 1037 return NETDEV_TX_OK;
bc617526 1038 }
ee9c799c 1039
fe6d2a38 1040 wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
6b7c5b94 1041
bc0c3405
AK
1042 copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb,
1043 skip_hw_vlan);
c190e3c8 1044 if (copied) {
cd8f76c0
ED
1045 int gso_segs = skb_shinfo(skb)->gso_segs;
1046
c190e3c8 1047 /* record the sent skb in the sent_skb table */
3c8def97
SP
1048 BUG_ON(txo->sent_skb_list[start]);
1049 txo->sent_skb_list[start] = skb;
c190e3c8
AK
1050
1051 /* Ensure txq has space for the next skb; Else stop the queue
1052 * *BEFORE* ringing the tx doorbell, so that we serialze the
1053 * tx compls of the current transmit which'll wake up the queue
1054 */
7101e111 1055 atomic_add(wrb_cnt, &txq->used);
c190e3c8
AK
1056 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
1057 txq->len) {
3c8def97 1058 netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
c190e3c8
AK
1059 stopped = true;
1060 }
6b7c5b94 1061
94d73aaa 1062 be_txq_notify(adapter, txo, wrb_cnt);
6b7c5b94 1063
cd8f76c0 1064 be_tx_stats_update(txo, wrb_cnt, copied, gso_segs, stopped);
c190e3c8
AK
1065 } else {
1066 txq->head = start;
bc617526 1067 tx_stats(txo)->tx_drv_drops++;
c190e3c8 1068 dev_kfree_skb_any(skb);
6b7c5b94 1069 }
6b7c5b94
SP
1070 return NETDEV_TX_OK;
1071}
1072
1073static int be_change_mtu(struct net_device *netdev, int new_mtu)
1074{
1075 struct be_adapter *adapter = netdev_priv(netdev);
1076 if (new_mtu < BE_MIN_MTU ||
748b539a 1077 new_mtu > (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN))) {
6b7c5b94 1078 dev_info(&adapter->pdev->dev,
748b539a
SP
1079 "MTU must be between %d and %d bytes\n",
1080 BE_MIN_MTU,
1081 (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
6b7c5b94
SP
1082 return -EINVAL;
1083 }
1084 dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
748b539a 1085 netdev->mtu, new_mtu);
6b7c5b94
SP
1086 netdev->mtu = new_mtu;
1087 return 0;
1088}
1089
1090/*
82903e4b
AK
1091 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
1092 * If the user configures more, place BE in vlan promiscuous mode.
6b7c5b94 1093 */
10329df8 1094static int be_vid_config(struct be_adapter *adapter)
6b7c5b94 1095{
10329df8 1096 u16 vids[BE_NUM_VLANS_SUPPORTED];
f6cbd364 1097 u16 num = 0, i = 0;
82903e4b 1098 int status = 0;
1da87b7f 1099
c0e64ef4
SP
1100 /* No need to further configure vids if in promiscuous mode */
1101 if (adapter->promiscuous)
1102 return 0;
1103
92bf14ab 1104 if (adapter->vlans_added > be_max_vlans(adapter))
0fc16ebf
PR
1105 goto set_vlan_promisc;
1106
1107 /* Construct VLAN Table to give to HW */
f6cbd364
RN
1108 for_each_set_bit(i, adapter->vids, VLAN_N_VID)
1109 vids[num++] = cpu_to_le16(i);
0fc16ebf 1110
4d567d97 1111 status = be_cmd_vlan_config(adapter, adapter->if_handle, vids, num);
0fc16ebf 1112 if (status) {
d9d604f8 1113 /* Set to VLAN promisc mode as setting VLAN filter failed */
4c60005f
KA
1114 if (addl_status(status) ==
1115 MCC_ADDL_STATUS_INSUFFICIENT_RESOURCES)
d9d604f8
AK
1116 goto set_vlan_promisc;
1117 dev_err(&adapter->pdev->dev,
1118 "Setting HW VLAN filtering failed.\n");
1119 } else {
1120 if (adapter->flags & BE_FLAGS_VLAN_PROMISC) {
1121 /* hw VLAN filtering re-enabled. */
1122 status = be_cmd_rx_filter(adapter,
1123 BE_FLAGS_VLAN_PROMISC, OFF);
1124 if (!status) {
1125 dev_info(&adapter->pdev->dev,
1126 "Disabling VLAN Promiscuous mode.\n");
1127 adapter->flags &= ~BE_FLAGS_VLAN_PROMISC;
d9d604f8
AK
1128 }
1129 }
6b7c5b94 1130 }
1da87b7f 1131
b31c50a7 1132 return status;
0fc16ebf
PR
1133
1134set_vlan_promisc:
a6b74e01
SK
1135 if (adapter->flags & BE_FLAGS_VLAN_PROMISC)
1136 return 0;
d9d604f8
AK
1137
1138 status = be_cmd_rx_filter(adapter, BE_FLAGS_VLAN_PROMISC, ON);
1139 if (!status) {
1140 dev_info(&adapter->pdev->dev, "Enable VLAN Promiscuous mode\n");
d9d604f8
AK
1141 adapter->flags |= BE_FLAGS_VLAN_PROMISC;
1142 } else
1143 dev_err(&adapter->pdev->dev,
1144 "Failed to enable VLAN Promiscuous mode.\n");
0fc16ebf 1145 return status;
6b7c5b94
SP
1146}
1147
80d5c368 1148static int be_vlan_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
6b7c5b94
SP
1149{
1150 struct be_adapter *adapter = netdev_priv(netdev);
80817cbf 1151 int status = 0;
6b7c5b94 1152
a85e9986
PR
1153 /* Packets with VID 0 are always received by Lancer by default */
1154 if (lancer_chip(adapter) && vid == 0)
48291c22
VV
1155 return status;
1156
f6cbd364 1157 if (test_bit(vid, adapter->vids))
48291c22 1158 return status;
a85e9986 1159
f6cbd364 1160 set_bit(vid, adapter->vids);
a6b74e01 1161 adapter->vlans_added++;
8e586137 1162
a6b74e01
SK
1163 status = be_vid_config(adapter);
1164 if (status) {
1165 adapter->vlans_added--;
f6cbd364 1166 clear_bit(vid, adapter->vids);
a6b74e01 1167 }
48291c22 1168
80817cbf 1169 return status;
6b7c5b94
SP
1170}
1171
80d5c368 1172static int be_vlan_rem_vid(struct net_device *netdev, __be16 proto, u16 vid)
6b7c5b94
SP
1173{
1174 struct be_adapter *adapter = netdev_priv(netdev);
1175
a85e9986
PR
1176 /* Packets with VID 0 are always received by Lancer by default */
1177 if (lancer_chip(adapter) && vid == 0)
9d4dfe4a 1178 return 0;
a85e9986 1179
f6cbd364 1180 clear_bit(vid, adapter->vids);
9d4dfe4a
KA
1181 adapter->vlans_added--;
1182
1183 return be_vid_config(adapter);
6b7c5b94
SP
1184}
1185
7ad09458
S
1186static void be_clear_promisc(struct be_adapter *adapter)
1187{
1188 adapter->promiscuous = false;
a0794885 1189 adapter->flags &= ~(BE_FLAGS_VLAN_PROMISC | BE_FLAGS_MCAST_PROMISC);
7ad09458
S
1190
1191 be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
1192}
1193
a54769f5 1194static void be_set_rx_mode(struct net_device *netdev)
6b7c5b94
SP
1195{
1196 struct be_adapter *adapter = netdev_priv(netdev);
0fc16ebf 1197 int status;
6b7c5b94 1198
24307eef 1199 if (netdev->flags & IFF_PROMISC) {
5b8821b7 1200 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
24307eef
SP
1201 adapter->promiscuous = true;
1202 goto done;
6b7c5b94
SP
1203 }
1204
25985edc 1205 /* BE was previously in promiscuous mode; disable it */
24307eef 1206 if (adapter->promiscuous) {
7ad09458 1207 be_clear_promisc(adapter);
c0e64ef4 1208 if (adapter->vlans_added)
10329df8 1209 be_vid_config(adapter);
6b7c5b94
SP
1210 }
1211
e7b909a6 1212 /* Enable multicast promisc if num configured exceeds what we support */
4cd24eaf 1213 if (netdev->flags & IFF_ALLMULTI ||
a0794885
KA
1214 netdev_mc_count(netdev) > be_max_mc(adapter))
1215 goto set_mcast_promisc;
6b7c5b94 1216
fbc13f01
AK
1217 if (netdev_uc_count(netdev) != adapter->uc_macs) {
1218 struct netdev_hw_addr *ha;
1219 int i = 1; /* First slot is claimed by the Primary MAC */
1220
1221 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++) {
1222 be_cmd_pmac_del(adapter, adapter->if_handle,
1223 adapter->pmac_id[i], 0);
1224 }
1225
92bf14ab 1226 if (netdev_uc_count(netdev) > be_max_uc(adapter)) {
fbc13f01
AK
1227 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
1228 adapter->promiscuous = true;
1229 goto done;
1230 }
1231
1232 netdev_for_each_uc_addr(ha, adapter->netdev) {
1233 adapter->uc_macs++; /* First slot is for Primary MAC */
1234 be_cmd_pmac_add(adapter, (u8 *)ha->addr,
1235 adapter->if_handle,
1236 &adapter->pmac_id[adapter->uc_macs], 0);
1237 }
1238 }
1239
0fc16ebf 1240 status = be_cmd_rx_filter(adapter, IFF_MULTICAST, ON);
a0794885
KA
1241 if (!status) {
1242 if (adapter->flags & BE_FLAGS_MCAST_PROMISC)
1243 adapter->flags &= ~BE_FLAGS_MCAST_PROMISC;
1244 goto done;
0fc16ebf 1245 }
a0794885
KA
1246
1247set_mcast_promisc:
1248 if (adapter->flags & BE_FLAGS_MCAST_PROMISC)
1249 return;
1250
1251 /* Set to MCAST promisc mode if setting MULTICAST address fails
1252 * or if num configured exceeds what we support
1253 */
1254 status = be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
1255 if (!status)
1256 adapter->flags |= BE_FLAGS_MCAST_PROMISC;
24307eef
SP
1257done:
1258 return;
6b7c5b94
SP
1259}
1260
ba343c77
SB
1261static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
1262{
1263 struct be_adapter *adapter = netdev_priv(netdev);
11ac75ed 1264 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
ba343c77
SB
1265 int status;
1266
11ac75ed 1267 if (!sriov_enabled(adapter))
ba343c77
SB
1268 return -EPERM;
1269
11ac75ed 1270 if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
ba343c77
SB
1271 return -EINVAL;
1272
3c31aaf3
VV
1273 /* Proceed further only if user provided MAC is different
1274 * from active MAC
1275 */
1276 if (ether_addr_equal(mac, vf_cfg->mac_addr))
1277 return 0;
1278
3175d8c2
SP
1279 if (BEx_chip(adapter)) {
1280 be_cmd_pmac_del(adapter, vf_cfg->if_handle, vf_cfg->pmac_id,
1281 vf + 1);
ba343c77 1282
11ac75ed
SP
1283 status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
1284 &vf_cfg->pmac_id, vf + 1);
3175d8c2
SP
1285 } else {
1286 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
1287 vf + 1);
590c391d
PR
1288 }
1289
abccf23e
KA
1290 if (status) {
1291 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed: %#x",
1292 mac, vf, status);
1293 return be_cmd_status(status);
1294 }
64600ea5 1295
abccf23e
KA
1296 ether_addr_copy(vf_cfg->mac_addr, mac);
1297
1298 return 0;
ba343c77
SB
1299}
1300
64600ea5 1301static int be_get_vf_config(struct net_device *netdev, int vf,
748b539a 1302 struct ifla_vf_info *vi)
64600ea5
AK
1303{
1304 struct be_adapter *adapter = netdev_priv(netdev);
11ac75ed 1305 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
64600ea5 1306
11ac75ed 1307 if (!sriov_enabled(adapter))
64600ea5
AK
1308 return -EPERM;
1309
11ac75ed 1310 if (vf >= adapter->num_vfs)
64600ea5
AK
1311 return -EINVAL;
1312
1313 vi->vf = vf;
ed616689
SC
1314 vi->max_tx_rate = vf_cfg->tx_rate;
1315 vi->min_tx_rate = 0;
a60b3a13
AK
1316 vi->vlan = vf_cfg->vlan_tag & VLAN_VID_MASK;
1317 vi->qos = vf_cfg->vlan_tag >> VLAN_PRIO_SHIFT;
11ac75ed 1318 memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
bdce2ad7 1319 vi->linkstate = adapter->vf_cfg[vf].plink_tracking;
64600ea5
AK
1320
1321 return 0;
1322}
1323
748b539a 1324static int be_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos)
1da87b7f
AK
1325{
1326 struct be_adapter *adapter = netdev_priv(netdev);
b9fc0e53 1327 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1da87b7f
AK
1328 int status = 0;
1329
11ac75ed 1330 if (!sriov_enabled(adapter))
1da87b7f
AK
1331 return -EPERM;
1332
b9fc0e53 1333 if (vf >= adapter->num_vfs || vlan > 4095 || qos > 7)
1da87b7f
AK
1334 return -EINVAL;
1335
b9fc0e53
AK
1336 if (vlan || qos) {
1337 vlan |= qos << VLAN_PRIO_SHIFT;
c502224e 1338 if (vf_cfg->vlan_tag != vlan)
b9fc0e53
AK
1339 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
1340 vf_cfg->if_handle, 0);
1da87b7f 1341 } else {
f1f3ee1b 1342 /* Reset Transparent Vlan Tagging. */
c502224e
SK
1343 status = be_cmd_set_hsw_config(adapter, BE_RESET_VLAN_TAG_ID,
1344 vf + 1, vf_cfg->if_handle, 0);
1da87b7f
AK
1345 }
1346
abccf23e
KA
1347 if (status) {
1348 dev_err(&adapter->pdev->dev,
1349 "VLAN %d config on VF %d failed : %#x\n", vlan,
1350 vf, status);
1351 return be_cmd_status(status);
1352 }
1353
1354 vf_cfg->vlan_tag = vlan;
1355
1356 return 0;
1da87b7f
AK
1357}
1358
ed616689
SC
1359static int be_set_vf_tx_rate(struct net_device *netdev, int vf,
1360 int min_tx_rate, int max_tx_rate)
e1d18735
AK
1361{
1362 struct be_adapter *adapter = netdev_priv(netdev);
0f77ba73
RN
1363 struct device *dev = &adapter->pdev->dev;
1364 int percent_rate, status = 0;
1365 u16 link_speed = 0;
1366 u8 link_status;
e1d18735 1367
11ac75ed 1368 if (!sriov_enabled(adapter))
e1d18735
AK
1369 return -EPERM;
1370
94f434c2 1371 if (vf >= adapter->num_vfs)
e1d18735
AK
1372 return -EINVAL;
1373
ed616689
SC
1374 if (min_tx_rate)
1375 return -EINVAL;
1376
0f77ba73
RN
1377 if (!max_tx_rate)
1378 goto config_qos;
1379
1380 status = be_cmd_link_status_query(adapter, &link_speed,
1381 &link_status, 0);
1382 if (status)
1383 goto err;
1384
1385 if (!link_status) {
1386 dev_err(dev, "TX-rate setting not allowed when link is down\n");
940a3fcd 1387 status = -ENETDOWN;
0f77ba73
RN
1388 goto err;
1389 }
1390
1391 if (max_tx_rate < 100 || max_tx_rate > link_speed) {
1392 dev_err(dev, "TX-rate must be between 100 and %d Mbps\n",
1393 link_speed);
1394 status = -EINVAL;
1395 goto err;
1396 }
1397
1398 /* On Skyhawk the QOS setting must be done only as a % value */
1399 percent_rate = link_speed / 100;
1400 if (skyhawk_chip(adapter) && (max_tx_rate % percent_rate)) {
1401 dev_err(dev, "TX-rate must be a multiple of %d Mbps\n",
1402 percent_rate);
1403 status = -EINVAL;
1404 goto err;
94f434c2 1405 }
e1d18735 1406
0f77ba73
RN
1407config_qos:
1408 status = be_cmd_config_qos(adapter, max_tx_rate, link_speed, vf + 1);
e1d18735 1409 if (status)
0f77ba73
RN
1410 goto err;
1411
1412 adapter->vf_cfg[vf].tx_rate = max_tx_rate;
1413 return 0;
1414
1415err:
1416 dev_err(dev, "TX-rate setting of %dMbps on VF%d failed\n",
1417 max_tx_rate, vf);
abccf23e 1418 return be_cmd_status(status);
e1d18735 1419}
bdce2ad7
SR
1420static int be_set_vf_link_state(struct net_device *netdev, int vf,
1421 int link_state)
1422{
1423 struct be_adapter *adapter = netdev_priv(netdev);
1424 int status;
1425
1426 if (!sriov_enabled(adapter))
1427 return -EPERM;
1428
1429 if (vf >= adapter->num_vfs)
1430 return -EINVAL;
1431
1432 status = be_cmd_set_logical_link_config(adapter, link_state, vf+1);
abccf23e
KA
1433 if (status) {
1434 dev_err(&adapter->pdev->dev,
1435 "Link state change on VF %d failed: %#x\n", vf, status);
1436 return be_cmd_status(status);
1437 }
bdce2ad7 1438
abccf23e
KA
1439 adapter->vf_cfg[vf].plink_tracking = link_state;
1440
1441 return 0;
bdce2ad7 1442}
e1d18735 1443
2632bafd
SP
1444static void be_aic_update(struct be_aic_obj *aic, u64 rx_pkts, u64 tx_pkts,
1445 ulong now)
6b7c5b94 1446{
2632bafd
SP
1447 aic->rx_pkts_prev = rx_pkts;
1448 aic->tx_reqs_prev = tx_pkts;
1449 aic->jiffies = now;
1450}
ac124ff9 1451
2632bafd
SP
1452static void be_eqd_update(struct be_adapter *adapter)
1453{
1454 struct be_set_eqd set_eqd[MAX_EVT_QS];
1455 int eqd, i, num = 0, start;
1456 struct be_aic_obj *aic;
1457 struct be_eq_obj *eqo;
1458 struct be_rx_obj *rxo;
1459 struct be_tx_obj *txo;
1460 u64 rx_pkts, tx_pkts;
1461 ulong now;
1462 u32 pps, delta;
10ef9ab4 1463
2632bafd
SP
1464 for_all_evt_queues(adapter, eqo, i) {
1465 aic = &adapter->aic_obj[eqo->idx];
1466 if (!aic->enable) {
1467 if (aic->jiffies)
1468 aic->jiffies = 0;
1469 eqd = aic->et_eqd;
1470 goto modify_eqd;
1471 }
6b7c5b94 1472
2632bafd
SP
1473 rxo = &adapter->rx_obj[eqo->idx];
1474 do {
57a7744e 1475 start = u64_stats_fetch_begin_irq(&rxo->stats.sync);
2632bafd 1476 rx_pkts = rxo->stats.rx_pkts;
57a7744e 1477 } while (u64_stats_fetch_retry_irq(&rxo->stats.sync, start));
10ef9ab4 1478
2632bafd
SP
1479 txo = &adapter->tx_obj[eqo->idx];
1480 do {
57a7744e 1481 start = u64_stats_fetch_begin_irq(&txo->stats.sync);
2632bafd 1482 tx_pkts = txo->stats.tx_reqs;
57a7744e 1483 } while (u64_stats_fetch_retry_irq(&txo->stats.sync, start));
6b7c5b94 1484
6b7c5b94 1485
2632bafd
SP
1486 /* Skip, if wrapped around or first calculation */
1487 now = jiffies;
1488 if (!aic->jiffies || time_before(now, aic->jiffies) ||
1489 rx_pkts < aic->rx_pkts_prev ||
1490 tx_pkts < aic->tx_reqs_prev) {
1491 be_aic_update(aic, rx_pkts, tx_pkts, now);
1492 continue;
1493 }
1494
1495 delta = jiffies_to_msecs(now - aic->jiffies);
1496 pps = (((u32)(rx_pkts - aic->rx_pkts_prev) * 1000) / delta) +
1497 (((u32)(tx_pkts - aic->tx_reqs_prev) * 1000) / delta);
1498 eqd = (pps / 15000) << 2;
10ef9ab4 1499
2632bafd
SP
1500 if (eqd < 8)
1501 eqd = 0;
1502 eqd = min_t(u32, eqd, aic->max_eqd);
1503 eqd = max_t(u32, eqd, aic->min_eqd);
1504
1505 be_aic_update(aic, rx_pkts, tx_pkts, now);
10ef9ab4 1506modify_eqd:
2632bafd
SP
1507 if (eqd != aic->prev_eqd) {
1508 set_eqd[num].delay_multiplier = (eqd * 65)/100;
1509 set_eqd[num].eq_id = eqo->q.id;
1510 aic->prev_eqd = eqd;
1511 num++;
1512 }
ac124ff9 1513 }
2632bafd
SP
1514
1515 if (num)
1516 be_cmd_modify_eqd(adapter, set_eqd, num);
6b7c5b94
SP
1517}
1518
3abcdeda 1519static void be_rx_stats_update(struct be_rx_obj *rxo,
748b539a 1520 struct be_rx_compl_info *rxcp)
4097f663 1521{
ac124ff9 1522 struct be_rx_stats *stats = rx_stats(rxo);
1ef78abe 1523
ab1594e9 1524 u64_stats_update_begin(&stats->sync);
3abcdeda 1525 stats->rx_compl++;
2e588f84 1526 stats->rx_bytes += rxcp->pkt_size;
3abcdeda 1527 stats->rx_pkts++;
2e588f84 1528 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
3abcdeda 1529 stats->rx_mcast_pkts++;
2e588f84 1530 if (rxcp->err)
ac124ff9 1531 stats->rx_compl_err++;
ab1594e9 1532 u64_stats_update_end(&stats->sync);
4097f663
SP
1533}
1534
2e588f84 1535static inline bool csum_passed(struct be_rx_compl_info *rxcp)
728a9972 1536{
19fad86f 1537 /* L4 checksum is not reliable for non TCP/UDP packets.
c9c47142
SP
1538 * Also ignore ipcksm for ipv6 pkts
1539 */
2e588f84 1540 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
c9c47142 1541 (rxcp->ip_csum || rxcp->ipv6) && !rxcp->err;
728a9972
AK
1542}
1543
0b0ef1d0 1544static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo)
6b7c5b94 1545{
10ef9ab4 1546 struct be_adapter *adapter = rxo->adapter;
6b7c5b94 1547 struct be_rx_page_info *rx_page_info;
3abcdeda 1548 struct be_queue_info *rxq = &rxo->q;
0b0ef1d0 1549 u16 frag_idx = rxq->tail;
6b7c5b94 1550
3abcdeda 1551 rx_page_info = &rxo->page_info_tbl[frag_idx];
6b7c5b94
SP
1552 BUG_ON(!rx_page_info->page);
1553
e50287be 1554 if (rx_page_info->last_frag) {
2b7bcebf
IV
1555 dma_unmap_page(&adapter->pdev->dev,
1556 dma_unmap_addr(rx_page_info, bus),
1557 adapter->big_page_size, DMA_FROM_DEVICE);
e50287be
SP
1558 rx_page_info->last_frag = false;
1559 } else {
1560 dma_sync_single_for_cpu(&adapter->pdev->dev,
1561 dma_unmap_addr(rx_page_info, bus),
1562 rx_frag_size, DMA_FROM_DEVICE);
205859a2 1563 }
6b7c5b94 1564
0b0ef1d0 1565 queue_tail_inc(rxq);
6b7c5b94
SP
1566 atomic_dec(&rxq->used);
1567 return rx_page_info;
1568}
1569
1570/* Throwaway the data in the Rx completion */
10ef9ab4
SP
1571static void be_rx_compl_discard(struct be_rx_obj *rxo,
1572 struct be_rx_compl_info *rxcp)
6b7c5b94 1573{
6b7c5b94 1574 struct be_rx_page_info *page_info;
2e588f84 1575 u16 i, num_rcvd = rxcp->num_rcvd;
6b7c5b94 1576
e80d9da6 1577 for (i = 0; i < num_rcvd; i++) {
0b0ef1d0 1578 page_info = get_rx_page_info(rxo);
e80d9da6
PR
1579 put_page(page_info->page);
1580 memset(page_info, 0, sizeof(*page_info));
6b7c5b94
SP
1581 }
1582}
1583
1584/*
1585 * skb_fill_rx_data forms a complete skb for an ether frame
1586 * indicated by rxcp.
1587 */
10ef9ab4
SP
1588static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
1589 struct be_rx_compl_info *rxcp)
6b7c5b94 1590{
6b7c5b94 1591 struct be_rx_page_info *page_info;
2e588f84
SP
1592 u16 i, j;
1593 u16 hdr_len, curr_frag_len, remaining;
6b7c5b94 1594 u8 *start;
6b7c5b94 1595
0b0ef1d0 1596 page_info = get_rx_page_info(rxo);
6b7c5b94
SP
1597 start = page_address(page_info->page) + page_info->page_offset;
1598 prefetch(start);
1599
1600 /* Copy data in the first descriptor of this completion */
2e588f84 1601 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
6b7c5b94 1602
6b7c5b94
SP
1603 skb->len = curr_frag_len;
1604 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
ac1ae5f3 1605 memcpy(skb->data, start, curr_frag_len);
6b7c5b94
SP
1606 /* Complete packet has now been moved to data */
1607 put_page(page_info->page);
1608 skb->data_len = 0;
1609 skb->tail += curr_frag_len;
1610 } else {
ac1ae5f3
ED
1611 hdr_len = ETH_HLEN;
1612 memcpy(skb->data, start, hdr_len);
6b7c5b94 1613 skb_shinfo(skb)->nr_frags = 1;
b061b39e 1614 skb_frag_set_page(skb, 0, page_info->page);
6b7c5b94
SP
1615 skb_shinfo(skb)->frags[0].page_offset =
1616 page_info->page_offset + hdr_len;
748b539a
SP
1617 skb_frag_size_set(&skb_shinfo(skb)->frags[0],
1618 curr_frag_len - hdr_len);
6b7c5b94 1619 skb->data_len = curr_frag_len - hdr_len;
bdb28a97 1620 skb->truesize += rx_frag_size;
6b7c5b94
SP
1621 skb->tail += hdr_len;
1622 }
205859a2 1623 page_info->page = NULL;
6b7c5b94 1624
2e588f84
SP
1625 if (rxcp->pkt_size <= rx_frag_size) {
1626 BUG_ON(rxcp->num_rcvd != 1);
1627 return;
6b7c5b94
SP
1628 }
1629
1630 /* More frags present for this completion */
2e588f84
SP
1631 remaining = rxcp->pkt_size - curr_frag_len;
1632 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
0b0ef1d0 1633 page_info = get_rx_page_info(rxo);
2e588f84 1634 curr_frag_len = min(remaining, rx_frag_size);
6b7c5b94 1635
bd46cb6c
AK
1636 /* Coalesce all frags from the same physical page in one slot */
1637 if (page_info->page_offset == 0) {
1638 /* Fresh page */
1639 j++;
b061b39e 1640 skb_frag_set_page(skb, j, page_info->page);
bd46cb6c
AK
1641 skb_shinfo(skb)->frags[j].page_offset =
1642 page_info->page_offset;
9e903e08 1643 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
bd46cb6c
AK
1644 skb_shinfo(skb)->nr_frags++;
1645 } else {
1646 put_page(page_info->page);
1647 }
1648
9e903e08 1649 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
6b7c5b94
SP
1650 skb->len += curr_frag_len;
1651 skb->data_len += curr_frag_len;
bdb28a97 1652 skb->truesize += rx_frag_size;
2e588f84 1653 remaining -= curr_frag_len;
205859a2 1654 page_info->page = NULL;
6b7c5b94 1655 }
bd46cb6c 1656 BUG_ON(j > MAX_SKB_FRAGS);
6b7c5b94
SP
1657}
1658
5be93b9a 1659/* Process the RX completion indicated by rxcp when GRO is disabled */
6384a4d0 1660static void be_rx_compl_process(struct be_rx_obj *rxo, struct napi_struct *napi,
10ef9ab4 1661 struct be_rx_compl_info *rxcp)
6b7c5b94 1662{
10ef9ab4 1663 struct be_adapter *adapter = rxo->adapter;
6332c8d3 1664 struct net_device *netdev = adapter->netdev;
6b7c5b94 1665 struct sk_buff *skb;
89420424 1666
bb349bb4 1667 skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE);
a058a632 1668 if (unlikely(!skb)) {
ac124ff9 1669 rx_stats(rxo)->rx_drops_no_skbs++;
10ef9ab4 1670 be_rx_compl_discard(rxo, rxcp);
6b7c5b94
SP
1671 return;
1672 }
1673
10ef9ab4 1674 skb_fill_rx_data(rxo, skb, rxcp);
6b7c5b94 1675
6332c8d3 1676 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
728a9972 1677 skb->ip_summed = CHECKSUM_UNNECESSARY;
c6ce2f4b
SK
1678 else
1679 skb_checksum_none_assert(skb);
6b7c5b94 1680
6332c8d3 1681 skb->protocol = eth_type_trans(skb, netdev);
aaa6daec 1682 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
10ef9ab4 1683 if (netdev->features & NETIF_F_RXHASH)
d2464c8c 1684 skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
c9c47142 1685
b6c0e89d 1686 skb->csum_level = rxcp->tunneled;
6384a4d0 1687 skb_mark_napi_id(skb, napi);
6b7c5b94 1688
343e43c0 1689 if (rxcp->vlanf)
86a9bad3 1690 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
4c5102f9
AK
1691
1692 netif_receive_skb(skb);
6b7c5b94
SP
1693}
1694
5be93b9a 1695/* Process the RX completion indicated by rxcp when GRO is enabled */
4188e7df
JH
1696static void be_rx_compl_process_gro(struct be_rx_obj *rxo,
1697 struct napi_struct *napi,
1698 struct be_rx_compl_info *rxcp)
6b7c5b94 1699{
10ef9ab4 1700 struct be_adapter *adapter = rxo->adapter;
6b7c5b94 1701 struct be_rx_page_info *page_info;
5be93b9a 1702 struct sk_buff *skb = NULL;
2e588f84
SP
1703 u16 remaining, curr_frag_len;
1704 u16 i, j;
3968fa1e 1705
10ef9ab4 1706 skb = napi_get_frags(napi);
5be93b9a 1707 if (!skb) {
10ef9ab4 1708 be_rx_compl_discard(rxo, rxcp);
5be93b9a
AK
1709 return;
1710 }
1711
2e588f84
SP
1712 remaining = rxcp->pkt_size;
1713 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
0b0ef1d0 1714 page_info = get_rx_page_info(rxo);
6b7c5b94
SP
1715
1716 curr_frag_len = min(remaining, rx_frag_size);
1717
bd46cb6c
AK
1718 /* Coalesce all frags from the same physical page in one slot */
1719 if (i == 0 || page_info->page_offset == 0) {
1720 /* First frag or Fresh page */
1721 j++;
b061b39e 1722 skb_frag_set_page(skb, j, page_info->page);
5be93b9a
AK
1723 skb_shinfo(skb)->frags[j].page_offset =
1724 page_info->page_offset;
9e903e08 1725 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
bd46cb6c
AK
1726 } else {
1727 put_page(page_info->page);
1728 }
9e903e08 1729 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
bdb28a97 1730 skb->truesize += rx_frag_size;
bd46cb6c 1731 remaining -= curr_frag_len;
6b7c5b94
SP
1732 memset(page_info, 0, sizeof(*page_info));
1733 }
bd46cb6c 1734 BUG_ON(j > MAX_SKB_FRAGS);
6b7c5b94 1735
5be93b9a 1736 skb_shinfo(skb)->nr_frags = j + 1;
2e588f84
SP
1737 skb->len = rxcp->pkt_size;
1738 skb->data_len = rxcp->pkt_size;
5be93b9a 1739 skb->ip_summed = CHECKSUM_UNNECESSARY;
aaa6daec 1740 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
4b972914 1741 if (adapter->netdev->features & NETIF_F_RXHASH)
d2464c8c 1742 skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
c9c47142 1743
b6c0e89d 1744 skb->csum_level = rxcp->tunneled;
6384a4d0 1745 skb_mark_napi_id(skb, napi);
5be93b9a 1746
343e43c0 1747 if (rxcp->vlanf)
86a9bad3 1748 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
4c5102f9 1749
10ef9ab4 1750 napi_gro_frags(napi);
2e588f84
SP
1751}
1752
10ef9ab4
SP
1753static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
1754 struct be_rx_compl_info *rxcp)
2e588f84
SP
1755{
1756 rxcp->pkt_size =
1757 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1758 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1759 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1760 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
9ecb42fd 1761 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
2e588f84
SP
1762 rxcp->ip_csum =
1763 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1764 rxcp->l4_csum =
1765 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1766 rxcp->ipv6 =
1767 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
2e588f84
SP
1768 rxcp->num_rcvd =
1769 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1770 rxcp->pkt_type =
1771 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
4b972914 1772 rxcp->rss_hash =
c297977e 1773 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, compl);
15d72184 1774 if (rxcp->vlanf) {
f93f160b 1775 rxcp->qnq = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, qnq,
3c709f8f 1776 compl);
748b539a
SP
1777 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1,
1778 vlan_tag, compl);
15d72184 1779 }
12004ae9 1780 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, port, compl);
c9c47142
SP
1781 rxcp->tunneled =
1782 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tunneled, compl);
2e588f84
SP
1783}
1784
10ef9ab4
SP
1785static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
1786 struct be_rx_compl_info *rxcp)
2e588f84
SP
1787{
1788 rxcp->pkt_size =
1789 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1790 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1791 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1792 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
9ecb42fd 1793 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
2e588f84
SP
1794 rxcp->ip_csum =
1795 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1796 rxcp->l4_csum =
1797 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1798 rxcp->ipv6 =
1799 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
2e588f84
SP
1800 rxcp->num_rcvd =
1801 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1802 rxcp->pkt_type =
1803 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
4b972914 1804 rxcp->rss_hash =
c297977e 1805 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, compl);
15d72184 1806 if (rxcp->vlanf) {
f93f160b 1807 rxcp->qnq = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, qnq,
3c709f8f 1808 compl);
748b539a
SP
1809 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0,
1810 vlan_tag, compl);
15d72184 1811 }
12004ae9 1812 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, port, compl);
e38b1706
SK
1813 rxcp->ip_frag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0,
1814 ip_frag, compl);
2e588f84
SP
1815}
1816
1817static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1818{
1819 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1820 struct be_rx_compl_info *rxcp = &rxo->rxcp;
1821 struct be_adapter *adapter = rxo->adapter;
6b7c5b94 1822
2e588f84
SP
1823 /* For checking the valid bit it is Ok to use either definition as the
1824 * valid bit is at the same position in both v0 and v1 Rx compl */
1825 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
1826 return NULL;
6b7c5b94 1827
2e588f84
SP
1828 rmb();
1829 be_dws_le_to_cpu(compl, sizeof(*compl));
6b7c5b94 1830
2e588f84 1831 if (adapter->be3_native)
10ef9ab4 1832 be_parse_rx_compl_v1(compl, rxcp);
2e588f84 1833 else
10ef9ab4 1834 be_parse_rx_compl_v0(compl, rxcp);
6b7c5b94 1835
e38b1706
SK
1836 if (rxcp->ip_frag)
1837 rxcp->l4_csum = 0;
1838
15d72184 1839 if (rxcp->vlanf) {
f93f160b
VV
1840 /* In QNQ modes, if qnq bit is not set, then the packet was
1841 * tagged only with the transparent outer vlan-tag and must
1842 * not be treated as a vlan packet by host
1843 */
1844 if (be_is_qnq_mode(adapter) && !rxcp->qnq)
15d72184 1845 rxcp->vlanf = 0;
6b7c5b94 1846
15d72184 1847 if (!lancer_chip(adapter))
3c709f8f 1848 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
6b7c5b94 1849
939cf306 1850 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
f6cbd364 1851 !test_bit(rxcp->vlan_tag, adapter->vids))
15d72184
SP
1852 rxcp->vlanf = 0;
1853 }
2e588f84
SP
1854
1855 /* As the compl has been parsed, reset it; we wont touch it again */
1856 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
6b7c5b94 1857
3abcdeda 1858 queue_tail_inc(&rxo->cq);
6b7c5b94
SP
1859 return rxcp;
1860}
1861
1829b086 1862static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
6b7c5b94 1863{
6b7c5b94 1864 u32 order = get_order(size);
1829b086 1865
6b7c5b94 1866 if (order > 0)
1829b086
ED
1867 gfp |= __GFP_COMP;
1868 return alloc_pages(gfp, order);
6b7c5b94
SP
1869}
1870
1871/*
1872 * Allocate a page, split it to fragments of size rx_frag_size and post as
1873 * receive buffers to BE
1874 */
1829b086 1875static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
6b7c5b94 1876{
3abcdeda 1877 struct be_adapter *adapter = rxo->adapter;
26d92f92 1878 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
3abcdeda 1879 struct be_queue_info *rxq = &rxo->q;
6b7c5b94 1880 struct page *pagep = NULL;
ba42fad0 1881 struct device *dev = &adapter->pdev->dev;
6b7c5b94
SP
1882 struct be_eth_rx_d *rxd;
1883 u64 page_dmaaddr = 0, frag_dmaaddr;
1884 u32 posted, page_offset = 0;
1885
3abcdeda 1886 page_info = &rxo->page_info_tbl[rxq->head];
6b7c5b94
SP
1887 for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1888 if (!pagep) {
1829b086 1889 pagep = be_alloc_pages(adapter->big_page_size, gfp);
6b7c5b94 1890 if (unlikely(!pagep)) {
ac124ff9 1891 rx_stats(rxo)->rx_post_fail++;
6b7c5b94
SP
1892 break;
1893 }
ba42fad0
IV
1894 page_dmaaddr = dma_map_page(dev, pagep, 0,
1895 adapter->big_page_size,
2b7bcebf 1896 DMA_FROM_DEVICE);
ba42fad0
IV
1897 if (dma_mapping_error(dev, page_dmaaddr)) {
1898 put_page(pagep);
1899 pagep = NULL;
1900 rx_stats(rxo)->rx_post_fail++;
1901 break;
1902 }
e50287be 1903 page_offset = 0;
6b7c5b94
SP
1904 } else {
1905 get_page(pagep);
e50287be 1906 page_offset += rx_frag_size;
6b7c5b94 1907 }
e50287be 1908 page_info->page_offset = page_offset;
6b7c5b94 1909 page_info->page = pagep;
6b7c5b94
SP
1910
1911 rxd = queue_head_node(rxq);
e50287be 1912 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
6b7c5b94
SP
1913 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1914 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
6b7c5b94
SP
1915
1916 /* Any space left in the current big page for another frag? */
1917 if ((page_offset + rx_frag_size + rx_frag_size) >
1918 adapter->big_page_size) {
1919 pagep = NULL;
e50287be
SP
1920 page_info->last_frag = true;
1921 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
1922 } else {
1923 dma_unmap_addr_set(page_info, bus, frag_dmaaddr);
6b7c5b94 1924 }
26d92f92
SP
1925
1926 prev_page_info = page_info;
1927 queue_head_inc(rxq);
10ef9ab4 1928 page_info = &rxo->page_info_tbl[rxq->head];
6b7c5b94 1929 }
e50287be
SP
1930
1931 /* Mark the last frag of a page when we break out of the above loop
1932 * with no more slots available in the RXQ
1933 */
1934 if (pagep) {
1935 prev_page_info->last_frag = true;
1936 dma_unmap_addr_set(prev_page_info, bus, page_dmaaddr);
1937 }
6b7c5b94
SP
1938
1939 if (posted) {
6b7c5b94 1940 atomic_add(posted, &rxq->used);
6384a4d0
SP
1941 if (rxo->rx_post_starved)
1942 rxo->rx_post_starved = false;
8788fdc2 1943 be_rxq_notify(adapter, rxq->id, posted);
ea1dae11
SP
1944 } else if (atomic_read(&rxq->used) == 0) {
1945 /* Let be_worker replenish when memory is available */
3abcdeda 1946 rxo->rx_post_starved = true;
6b7c5b94 1947 }
6b7c5b94
SP
1948}
1949
5fb379ee 1950static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
6b7c5b94 1951{
6b7c5b94
SP
1952 struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1953
1954 if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1955 return NULL;
1956
f3eb62d2 1957 rmb();
6b7c5b94
SP
1958 be_dws_le_to_cpu(txcp, sizeof(*txcp));
1959
1960 txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1961
1962 queue_tail_inc(tx_cq);
1963 return txcp;
1964}
1965
3c8def97 1966static u16 be_tx_compl_process(struct be_adapter *adapter,
748b539a 1967 struct be_tx_obj *txo, u16 last_index)
6b7c5b94 1968{
3c8def97 1969 struct be_queue_info *txq = &txo->q;
a73b796e 1970 struct be_eth_wrb *wrb;
3c8def97 1971 struct sk_buff **sent_skbs = txo->sent_skb_list;
6b7c5b94 1972 struct sk_buff *sent_skb;
ec43b1a6
SP
1973 u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1974 bool unmap_skb_hdr = true;
6b7c5b94 1975
ec43b1a6 1976 sent_skb = sent_skbs[txq->tail];
6b7c5b94 1977 BUG_ON(!sent_skb);
ec43b1a6
SP
1978 sent_skbs[txq->tail] = NULL;
1979
1980 /* skip header wrb */
a73b796e 1981 queue_tail_inc(txq);
6b7c5b94 1982
ec43b1a6 1983 do {
6b7c5b94 1984 cur_index = txq->tail;
a73b796e 1985 wrb = queue_tail_node(txq);
2b7bcebf
IV
1986 unmap_tx_frag(&adapter->pdev->dev, wrb,
1987 (unmap_skb_hdr && skb_headlen(sent_skb)));
ec43b1a6
SP
1988 unmap_skb_hdr = false;
1989
6b7c5b94
SP
1990 num_wrbs++;
1991 queue_tail_inc(txq);
ec43b1a6 1992 } while (cur_index != last_index);
6b7c5b94 1993
96d49225 1994 dev_consume_skb_any(sent_skb);
4d586b82 1995 return num_wrbs;
6b7c5b94
SP
1996}
1997
10ef9ab4
SP
1998/* Return the number of events in the event queue */
1999static inline int events_get(struct be_eq_obj *eqo)
859b1e4e 2000{
10ef9ab4
SP
2001 struct be_eq_entry *eqe;
2002 int num = 0;
859b1e4e 2003
10ef9ab4
SP
2004 do {
2005 eqe = queue_tail_node(&eqo->q);
2006 if (eqe->evt == 0)
2007 break;
859b1e4e 2008
10ef9ab4
SP
2009 rmb();
2010 eqe->evt = 0;
2011 num++;
2012 queue_tail_inc(&eqo->q);
2013 } while (true);
2014
2015 return num;
859b1e4e
SP
2016}
2017
10ef9ab4
SP
2018/* Leaves the EQ is disarmed state */
2019static void be_eq_clean(struct be_eq_obj *eqo)
859b1e4e 2020{
10ef9ab4 2021 int num = events_get(eqo);
859b1e4e 2022
10ef9ab4 2023 be_eq_notify(eqo->adapter, eqo->q.id, false, true, num);
859b1e4e
SP
2024}
2025
10ef9ab4 2026static void be_rx_cq_clean(struct be_rx_obj *rxo)
6b7c5b94
SP
2027{
2028 struct be_rx_page_info *page_info;
3abcdeda
SP
2029 struct be_queue_info *rxq = &rxo->q;
2030 struct be_queue_info *rx_cq = &rxo->cq;
2e588f84 2031 struct be_rx_compl_info *rxcp;
d23e946c
SP
2032 struct be_adapter *adapter = rxo->adapter;
2033 int flush_wait = 0;
6b7c5b94 2034
d23e946c
SP
2035 /* Consume pending rx completions.
2036 * Wait for the flush completion (identified by zero num_rcvd)
2037 * to arrive. Notify CQ even when there are no more CQ entries
2038 * for HW to flush partially coalesced CQ entries.
2039 * In Lancer, there is no need to wait for flush compl.
2040 */
2041 for (;;) {
2042 rxcp = be_rx_compl_get(rxo);
ddf1169f 2043 if (!rxcp) {
d23e946c
SP
2044 if (lancer_chip(adapter))
2045 break;
2046
2047 if (flush_wait++ > 10 || be_hw_error(adapter)) {
2048 dev_warn(&adapter->pdev->dev,
2049 "did not receive flush compl\n");
2050 break;
2051 }
2052 be_cq_notify(adapter, rx_cq->id, true, 0);
2053 mdelay(1);
2054 } else {
2055 be_rx_compl_discard(rxo, rxcp);
3f5dffe6 2056 be_cq_notify(adapter, rx_cq->id, false, 1);
d23e946c
SP
2057 if (rxcp->num_rcvd == 0)
2058 break;
2059 }
6b7c5b94
SP
2060 }
2061
d23e946c
SP
2062 /* After cleanup, leave the CQ in unarmed state */
2063 be_cq_notify(adapter, rx_cq->id, false, 0);
2064
2065 /* Then free posted rx buffers that were not used */
0b0ef1d0
SR
2066 while (atomic_read(&rxq->used) > 0) {
2067 page_info = get_rx_page_info(rxo);
6b7c5b94
SP
2068 put_page(page_info->page);
2069 memset(page_info, 0, sizeof(*page_info));
2070 }
2071 BUG_ON(atomic_read(&rxq->used));
482c9e79 2072 rxq->tail = rxq->head = 0;
6b7c5b94
SP
2073}
2074
0ae57bb3 2075static void be_tx_compl_clean(struct be_adapter *adapter)
6b7c5b94 2076{
0ae57bb3
SP
2077 struct be_tx_obj *txo;
2078 struct be_queue_info *txq;
a8e9179a 2079 struct be_eth_tx_compl *txcp;
4d586b82 2080 u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
b03388d6
SP
2081 struct sk_buff *sent_skb;
2082 bool dummy_wrb;
0ae57bb3 2083 int i, pending_txqs;
a8e9179a 2084
1a3d0717 2085 /* Stop polling for compls when HW has been silent for 10ms */
a8e9179a 2086 do {
0ae57bb3
SP
2087 pending_txqs = adapter->num_tx_qs;
2088
2089 for_all_tx_queues(adapter, txo, i) {
1a3d0717
VV
2090 cmpl = 0;
2091 num_wrbs = 0;
0ae57bb3
SP
2092 txq = &txo->q;
2093 while ((txcp = be_tx_compl_get(&txo->cq))) {
2094 end_idx =
2095 AMAP_GET_BITS(struct amap_eth_tx_compl,
2096 wrb_index, txcp);
2097 num_wrbs += be_tx_compl_process(adapter, txo,
2098 end_idx);
2099 cmpl++;
2100 }
2101 if (cmpl) {
2102 be_cq_notify(adapter, txo->cq.id, false, cmpl);
2103 atomic_sub(num_wrbs, &txq->used);
1a3d0717 2104 timeo = 0;
0ae57bb3
SP
2105 }
2106 if (atomic_read(&txq->used) == 0)
2107 pending_txqs--;
a8e9179a
SP
2108 }
2109
1a3d0717 2110 if (pending_txqs == 0 || ++timeo > 10 || be_hw_error(adapter))
a8e9179a
SP
2111 break;
2112
2113 mdelay(1);
2114 } while (true);
2115
0ae57bb3
SP
2116 for_all_tx_queues(adapter, txo, i) {
2117 txq = &txo->q;
2118 if (atomic_read(&txq->used))
2119 dev_err(&adapter->pdev->dev, "%d pending tx-compls\n",
2120 atomic_read(&txq->used));
2121
2122 /* free posted tx for which compls will never arrive */
2123 while (atomic_read(&txq->used)) {
2124 sent_skb = txo->sent_skb_list[txq->tail];
2125 end_idx = txq->tail;
2126 num_wrbs = wrb_cnt_for_skb(adapter, sent_skb,
2127 &dummy_wrb);
2128 index_adv(&end_idx, num_wrbs - 1, txq->len);
2129 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
2130 atomic_sub(num_wrbs, &txq->used);
2131 }
b03388d6 2132 }
6b7c5b94
SP
2133}
2134
10ef9ab4
SP
2135static void be_evt_queues_destroy(struct be_adapter *adapter)
2136{
2137 struct be_eq_obj *eqo;
2138 int i;
2139
2140 for_all_evt_queues(adapter, eqo, i) {
19d59aa7
PR
2141 if (eqo->q.created) {
2142 be_eq_clean(eqo);
10ef9ab4 2143 be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
6384a4d0 2144 napi_hash_del(&eqo->napi);
68d7bdcb 2145 netif_napi_del(&eqo->napi);
19d59aa7 2146 }
10ef9ab4
SP
2147 be_queue_free(adapter, &eqo->q);
2148 }
2149}
2150
2151static int be_evt_queues_create(struct be_adapter *adapter)
2152{
2153 struct be_queue_info *eq;
2154 struct be_eq_obj *eqo;
2632bafd 2155 struct be_aic_obj *aic;
10ef9ab4
SP
2156 int i, rc;
2157
92bf14ab
SP
2158 adapter->num_evt_qs = min_t(u16, num_irqs(adapter),
2159 adapter->cfg_num_qs);
10ef9ab4
SP
2160
2161 for_all_evt_queues(adapter, eqo, i) {
68d7bdcb
SP
2162 netif_napi_add(adapter->netdev, &eqo->napi, be_poll,
2163 BE_NAPI_WEIGHT);
6384a4d0 2164 napi_hash_add(&eqo->napi);
2632bafd 2165 aic = &adapter->aic_obj[i];
10ef9ab4
SP
2166 eqo->adapter = adapter;
2167 eqo->tx_budget = BE_TX_BUDGET;
2168 eqo->idx = i;
2632bafd
SP
2169 aic->max_eqd = BE_MAX_EQD;
2170 aic->enable = true;
10ef9ab4
SP
2171
2172 eq = &eqo->q;
2173 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
748b539a 2174 sizeof(struct be_eq_entry));
10ef9ab4
SP
2175 if (rc)
2176 return rc;
2177
f2f781a7 2178 rc = be_cmd_eq_create(adapter, eqo);
10ef9ab4
SP
2179 if (rc)
2180 return rc;
2181 }
1cfafab9 2182 return 0;
10ef9ab4
SP
2183}
2184
5fb379ee
SP
2185static void be_mcc_queues_destroy(struct be_adapter *adapter)
2186{
2187 struct be_queue_info *q;
5fb379ee 2188
8788fdc2 2189 q = &adapter->mcc_obj.q;
5fb379ee 2190 if (q->created)
8788fdc2 2191 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
5fb379ee
SP
2192 be_queue_free(adapter, q);
2193
8788fdc2 2194 q = &adapter->mcc_obj.cq;
5fb379ee 2195 if (q->created)
8788fdc2 2196 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
5fb379ee
SP
2197 be_queue_free(adapter, q);
2198}
2199
2200/* Must be called only after TX qs are created as MCC shares TX EQ */
2201static int be_mcc_queues_create(struct be_adapter *adapter)
2202{
2203 struct be_queue_info *q, *cq;
5fb379ee 2204
8788fdc2 2205 cq = &adapter->mcc_obj.cq;
5fb379ee 2206 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
748b539a 2207 sizeof(struct be_mcc_compl)))
5fb379ee
SP
2208 goto err;
2209
10ef9ab4
SP
2210 /* Use the default EQ for MCC completions */
2211 if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0))
5fb379ee
SP
2212 goto mcc_cq_free;
2213
8788fdc2 2214 q = &adapter->mcc_obj.q;
5fb379ee
SP
2215 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
2216 goto mcc_cq_destroy;
2217
8788fdc2 2218 if (be_cmd_mccq_create(adapter, q, cq))
5fb379ee
SP
2219 goto mcc_q_free;
2220
2221 return 0;
2222
2223mcc_q_free:
2224 be_queue_free(adapter, q);
2225mcc_cq_destroy:
8788fdc2 2226 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
5fb379ee
SP
2227mcc_cq_free:
2228 be_queue_free(adapter, cq);
2229err:
2230 return -1;
2231}
2232
6b7c5b94
SP
2233static void be_tx_queues_destroy(struct be_adapter *adapter)
2234{
2235 struct be_queue_info *q;
3c8def97
SP
2236 struct be_tx_obj *txo;
2237 u8 i;
6b7c5b94 2238
3c8def97
SP
2239 for_all_tx_queues(adapter, txo, i) {
2240 q = &txo->q;
2241 if (q->created)
2242 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
2243 be_queue_free(adapter, q);
6b7c5b94 2244
3c8def97
SP
2245 q = &txo->cq;
2246 if (q->created)
2247 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2248 be_queue_free(adapter, q);
2249 }
6b7c5b94
SP
2250}
2251
7707133c 2252static int be_tx_qs_create(struct be_adapter *adapter)
6b7c5b94 2253{
10ef9ab4 2254 struct be_queue_info *cq, *eq;
3c8def97 2255 struct be_tx_obj *txo;
92bf14ab 2256 int status, i;
6b7c5b94 2257
92bf14ab 2258 adapter->num_tx_qs = min(adapter->num_evt_qs, be_max_txqs(adapter));
dafc0fe3 2259
10ef9ab4
SP
2260 for_all_tx_queues(adapter, txo, i) {
2261 cq = &txo->cq;
2262 status = be_queue_alloc(adapter, cq, TX_CQ_LEN,
2263 sizeof(struct be_eth_tx_compl));
2264 if (status)
2265 return status;
3c8def97 2266
827da44c
JS
2267 u64_stats_init(&txo->stats.sync);
2268 u64_stats_init(&txo->stats.sync_compl);
2269
10ef9ab4
SP
2270 /* If num_evt_qs is less than num_tx_qs, then more than
2271 * one txq share an eq
2272 */
2273 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2274 status = be_cmd_cq_create(adapter, cq, eq, false, 3);
2275 if (status)
2276 return status;
6b7c5b94 2277
10ef9ab4
SP
2278 status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
2279 sizeof(struct be_eth_wrb));
2280 if (status)
2281 return status;
6b7c5b94 2282
94d73aaa 2283 status = be_cmd_txq_create(adapter, txo);
10ef9ab4
SP
2284 if (status)
2285 return status;
3c8def97 2286 }
6b7c5b94 2287
d379142b
SP
2288 dev_info(&adapter->pdev->dev, "created %d TX queue(s)\n",
2289 adapter->num_tx_qs);
10ef9ab4 2290 return 0;
6b7c5b94
SP
2291}
2292
10ef9ab4 2293static void be_rx_cqs_destroy(struct be_adapter *adapter)
6b7c5b94
SP
2294{
2295 struct be_queue_info *q;
3abcdeda
SP
2296 struct be_rx_obj *rxo;
2297 int i;
2298
2299 for_all_rx_queues(adapter, rxo, i) {
3abcdeda
SP
2300 q = &rxo->cq;
2301 if (q->created)
2302 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2303 be_queue_free(adapter, q);
ac6a0c4a
SP
2304 }
2305}
2306
10ef9ab4 2307static int be_rx_cqs_create(struct be_adapter *adapter)
6b7c5b94 2308{
10ef9ab4 2309 struct be_queue_info *eq, *cq;
3abcdeda
SP
2310 struct be_rx_obj *rxo;
2311 int rc, i;
6b7c5b94 2312
92bf14ab
SP
2313 /* We can create as many RSS rings as there are EQs. */
2314 adapter->num_rx_qs = adapter->num_evt_qs;
2315
2316 /* We'll use RSS only if atleast 2 RSS rings are supported.
2317 * When RSS is used, we'll need a default RXQ for non-IP traffic.
10ef9ab4 2318 */
92bf14ab
SP
2319 if (adapter->num_rx_qs > 1)
2320 adapter->num_rx_qs++;
2321
6b7c5b94 2322 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
3abcdeda
SP
2323 for_all_rx_queues(adapter, rxo, i) {
2324 rxo->adapter = adapter;
3abcdeda
SP
2325 cq = &rxo->cq;
2326 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
748b539a 2327 sizeof(struct be_eth_rx_compl));
3abcdeda 2328 if (rc)
10ef9ab4 2329 return rc;
3abcdeda 2330
827da44c 2331 u64_stats_init(&rxo->stats.sync);
10ef9ab4
SP
2332 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2333 rc = be_cmd_cq_create(adapter, cq, eq, false, 3);
3abcdeda 2334 if (rc)
10ef9ab4 2335 return rc;
3abcdeda 2336 }
6b7c5b94 2337
d379142b
SP
2338 dev_info(&adapter->pdev->dev,
2339 "created %d RSS queue(s) and 1 default RX queue\n",
2340 adapter->num_rx_qs - 1);
10ef9ab4 2341 return 0;
b628bde2
SP
2342}
2343
6b7c5b94
SP
2344static irqreturn_t be_intx(int irq, void *dev)
2345{
e49cc34f
SP
2346 struct be_eq_obj *eqo = dev;
2347 struct be_adapter *adapter = eqo->adapter;
2348 int num_evts = 0;
6b7c5b94 2349
d0b9cec3
SP
2350 /* IRQ is not expected when NAPI is scheduled as the EQ
2351 * will not be armed.
2352 * But, this can happen on Lancer INTx where it takes
2353 * a while to de-assert INTx or in BE2 where occasionaly
2354 * an interrupt may be raised even when EQ is unarmed.
2355 * If NAPI is already scheduled, then counting & notifying
2356 * events will orphan them.
e49cc34f 2357 */
d0b9cec3 2358 if (napi_schedule_prep(&eqo->napi)) {
e49cc34f 2359 num_evts = events_get(eqo);
d0b9cec3
SP
2360 __napi_schedule(&eqo->napi);
2361 if (num_evts)
2362 eqo->spurious_intr = 0;
2363 }
2364 be_eq_notify(adapter, eqo->q.id, false, true, num_evts);
e49cc34f 2365
d0b9cec3
SP
2366 /* Return IRQ_HANDLED only for the the first spurious intr
2367 * after a valid intr to stop the kernel from branding
2368 * this irq as a bad one!
e49cc34f 2369 */
d0b9cec3
SP
2370 if (num_evts || eqo->spurious_intr++ == 0)
2371 return IRQ_HANDLED;
2372 else
2373 return IRQ_NONE;
6b7c5b94
SP
2374}
2375
10ef9ab4 2376static irqreturn_t be_msix(int irq, void *dev)
6b7c5b94 2377{
10ef9ab4 2378 struct be_eq_obj *eqo = dev;
6b7c5b94 2379
0b545a62
SP
2380 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
2381 napi_schedule(&eqo->napi);
6b7c5b94
SP
2382 return IRQ_HANDLED;
2383}
2384
2e588f84 2385static inline bool do_gro(struct be_rx_compl_info *rxcp)
6b7c5b94 2386{
e38b1706 2387 return (rxcp->tcpf && !rxcp->err && rxcp->l4_csum) ? true : false;
6b7c5b94
SP
2388}
2389
10ef9ab4 2390static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
748b539a 2391 int budget, int polling)
6b7c5b94 2392{
3abcdeda
SP
2393 struct be_adapter *adapter = rxo->adapter;
2394 struct be_queue_info *rx_cq = &rxo->cq;
2e588f84 2395 struct be_rx_compl_info *rxcp;
6b7c5b94
SP
2396 u32 work_done;
2397
2398 for (work_done = 0; work_done < budget; work_done++) {
3abcdeda 2399 rxcp = be_rx_compl_get(rxo);
6b7c5b94
SP
2400 if (!rxcp)
2401 break;
2402
12004ae9
SP
2403 /* Is it a flush compl that has no data */
2404 if (unlikely(rxcp->num_rcvd == 0))
2405 goto loop_continue;
2406
2407 /* Discard compl with partial DMA Lancer B0 */
2408 if (unlikely(!rxcp->pkt_size)) {
10ef9ab4 2409 be_rx_compl_discard(rxo, rxcp);
12004ae9
SP
2410 goto loop_continue;
2411 }
2412
2413 /* On BE drop pkts that arrive due to imperfect filtering in
2414 * promiscuous mode on some skews
2415 */
2416 if (unlikely(rxcp->port != adapter->port_num &&
748b539a 2417 !lancer_chip(adapter))) {
10ef9ab4 2418 be_rx_compl_discard(rxo, rxcp);
12004ae9 2419 goto loop_continue;
64642811 2420 }
009dd872 2421
6384a4d0
SP
2422 /* Don't do gro when we're busy_polling */
2423 if (do_gro(rxcp) && polling != BUSY_POLLING)
10ef9ab4 2424 be_rx_compl_process_gro(rxo, napi, rxcp);
12004ae9 2425 else
6384a4d0
SP
2426 be_rx_compl_process(rxo, napi, rxcp);
2427
12004ae9 2428loop_continue:
2e588f84 2429 be_rx_stats_update(rxo, rxcp);
6b7c5b94
SP
2430 }
2431
10ef9ab4
SP
2432 if (work_done) {
2433 be_cq_notify(adapter, rx_cq->id, true, work_done);
9372cacb 2434
6384a4d0
SP
2435 /* When an rx-obj gets into post_starved state, just
2436 * let be_worker do the posting.
2437 */
2438 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM &&
2439 !rxo->rx_post_starved)
10ef9ab4 2440 be_post_rx_frags(rxo, GFP_ATOMIC);
6b7c5b94 2441 }
10ef9ab4 2442
6b7c5b94
SP
2443 return work_done;
2444}
2445
10ef9ab4
SP
2446static bool be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
2447 int budget, int idx)
6b7c5b94 2448{
6b7c5b94 2449 struct be_eth_tx_compl *txcp;
10ef9ab4 2450 int num_wrbs = 0, work_done;
3c8def97 2451
10ef9ab4
SP
2452 for (work_done = 0; work_done < budget; work_done++) {
2453 txcp = be_tx_compl_get(&txo->cq);
2454 if (!txcp)
2455 break;
2456 num_wrbs += be_tx_compl_process(adapter, txo,
748b539a
SP
2457 AMAP_GET_BITS(struct
2458 amap_eth_tx_compl,
2459 wrb_index, txcp));
10ef9ab4 2460 }
6b7c5b94 2461
10ef9ab4
SP
2462 if (work_done) {
2463 be_cq_notify(adapter, txo->cq.id, true, work_done);
2464 atomic_sub(num_wrbs, &txo->q.used);
3c8def97 2465
10ef9ab4
SP
2466 /* As Tx wrbs have been freed up, wake up netdev queue
2467 * if it was stopped due to lack of tx wrbs. */
2468 if (__netif_subqueue_stopped(adapter->netdev, idx) &&
748b539a 2469 atomic_read(&txo->q.used) < txo->q.len / 2) {
10ef9ab4 2470 netif_wake_subqueue(adapter->netdev, idx);
3c8def97 2471 }
10ef9ab4
SP
2472
2473 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
2474 tx_stats(txo)->tx_compl += work_done;
2475 u64_stats_update_end(&tx_stats(txo)->sync_compl);
6b7c5b94 2476 }
10ef9ab4
SP
2477 return (work_done < budget); /* Done */
2478}
6b7c5b94 2479
68d7bdcb 2480int be_poll(struct napi_struct *napi, int budget)
10ef9ab4
SP
2481{
2482 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2483 struct be_adapter *adapter = eqo->adapter;
0b545a62 2484 int max_work = 0, work, i, num_evts;
6384a4d0 2485 struct be_rx_obj *rxo;
10ef9ab4 2486 bool tx_done;
f31e50a8 2487
0b545a62
SP
2488 num_evts = events_get(eqo);
2489
10ef9ab4
SP
2490 /* Process all TXQs serviced by this EQ */
2491 for (i = eqo->idx; i < adapter->num_tx_qs; i += adapter->num_evt_qs) {
2492 tx_done = be_process_tx(adapter, &adapter->tx_obj[i],
2493 eqo->tx_budget, i);
2494 if (!tx_done)
2495 max_work = budget;
f31e50a8
SP
2496 }
2497
6384a4d0
SP
2498 if (be_lock_napi(eqo)) {
2499 /* This loop will iterate twice for EQ0 in which
2500 * completions of the last RXQ (default one) are also processed
2501 * For other EQs the loop iterates only once
2502 */
2503 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
2504 work = be_process_rx(rxo, napi, budget, NAPI_POLLING);
2505 max_work = max(work, max_work);
2506 }
2507 be_unlock_napi(eqo);
2508 } else {
2509 max_work = budget;
10ef9ab4 2510 }
6b7c5b94 2511
10ef9ab4
SP
2512 if (is_mcc_eqo(eqo))
2513 be_process_mcc(adapter);
93c86700 2514
10ef9ab4
SP
2515 if (max_work < budget) {
2516 napi_complete(napi);
0b545a62 2517 be_eq_notify(adapter, eqo->q.id, true, false, num_evts);
10ef9ab4
SP
2518 } else {
2519 /* As we'll continue in polling mode, count and clear events */
0b545a62 2520 be_eq_notify(adapter, eqo->q.id, false, false, num_evts);
93c86700 2521 }
10ef9ab4 2522 return max_work;
6b7c5b94
SP
2523}
2524
6384a4d0
SP
2525#ifdef CONFIG_NET_RX_BUSY_POLL
2526static int be_busy_poll(struct napi_struct *napi)
2527{
2528 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2529 struct be_adapter *adapter = eqo->adapter;
2530 struct be_rx_obj *rxo;
2531 int i, work = 0;
2532
2533 if (!be_lock_busy_poll(eqo))
2534 return LL_FLUSH_BUSY;
2535
2536 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
2537 work = be_process_rx(rxo, napi, 4, BUSY_POLLING);
2538 if (work)
2539 break;
2540 }
2541
2542 be_unlock_busy_poll(eqo);
2543 return work;
2544}
2545#endif
2546
f67ef7ba 2547void be_detect_error(struct be_adapter *adapter)
7c185276 2548{
e1cfb67a
PR
2549 u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
2550 u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
7c185276 2551 u32 i;
eb0eecc1
SK
2552 bool error_detected = false;
2553 struct device *dev = &adapter->pdev->dev;
2554 struct net_device *netdev = adapter->netdev;
7c185276 2555
d23e946c 2556 if (be_hw_error(adapter))
72f02485
SP
2557 return;
2558
e1cfb67a
PR
2559 if (lancer_chip(adapter)) {
2560 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2561 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2562 sliport_err1 = ioread32(adapter->db +
748b539a 2563 SLIPORT_ERROR1_OFFSET);
e1cfb67a 2564 sliport_err2 = ioread32(adapter->db +
748b539a 2565 SLIPORT_ERROR2_OFFSET);
eb0eecc1
SK
2566 adapter->hw_error = true;
2567 /* Do not log error messages if its a FW reset */
2568 if (sliport_err1 == SLIPORT_ERROR_FW_RESET1 &&
2569 sliport_err2 == SLIPORT_ERROR_FW_RESET2) {
2570 dev_info(dev, "Firmware update in progress\n");
2571 } else {
2572 error_detected = true;
2573 dev_err(dev, "Error detected in the card\n");
2574 dev_err(dev, "ERR: sliport status 0x%x\n",
2575 sliport_status);
2576 dev_err(dev, "ERR: sliport error1 0x%x\n",
2577 sliport_err1);
2578 dev_err(dev, "ERR: sliport error2 0x%x\n",
2579 sliport_err2);
2580 }
e1cfb67a
PR
2581 }
2582 } else {
2583 pci_read_config_dword(adapter->pdev,
748b539a 2584 PCICFG_UE_STATUS_LOW, &ue_lo);
e1cfb67a 2585 pci_read_config_dword(adapter->pdev,
748b539a 2586 PCICFG_UE_STATUS_HIGH, &ue_hi);
e1cfb67a 2587 pci_read_config_dword(adapter->pdev,
748b539a 2588 PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask);
e1cfb67a 2589 pci_read_config_dword(adapter->pdev,
748b539a 2590 PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
e1cfb67a 2591
f67ef7ba
PR
2592 ue_lo = (ue_lo & ~ue_lo_mask);
2593 ue_hi = (ue_hi & ~ue_hi_mask);
7c185276 2594
eb0eecc1
SK
2595 /* On certain platforms BE hardware can indicate spurious UEs.
2596 * Allow HW to stop working completely in case of a real UE.
2597 * Hence not setting the hw_error for UE detection.
2598 */
f67ef7ba 2599
eb0eecc1
SK
2600 if (ue_lo || ue_hi) {
2601 error_detected = true;
2602 dev_err(dev,
2603 "Unrecoverable Error detected in the adapter");
2604 dev_err(dev, "Please reboot server to recover");
2605 if (skyhawk_chip(adapter))
2606 adapter->hw_error = true;
2607 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
2608 if (ue_lo & 1)
2609 dev_err(dev, "UE: %s bit set\n",
2610 ue_status_low_desc[i]);
2611 }
2612 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
2613 if (ue_hi & 1)
2614 dev_err(dev, "UE: %s bit set\n",
2615 ue_status_hi_desc[i]);
2616 }
7c185276
AK
2617 }
2618 }
eb0eecc1
SK
2619 if (error_detected)
2620 netif_carrier_off(netdev);
7c185276
AK
2621}
2622
8d56ff11
SP
2623static void be_msix_disable(struct be_adapter *adapter)
2624{
ac6a0c4a 2625 if (msix_enabled(adapter)) {
8d56ff11 2626 pci_disable_msix(adapter->pdev);
ac6a0c4a 2627 adapter->num_msix_vec = 0;
68d7bdcb 2628 adapter->num_msix_roce_vec = 0;
3abcdeda
SP
2629 }
2630}
2631
c2bba3df 2632static int be_msix_enable(struct be_adapter *adapter)
6b7c5b94 2633{
7dc4c064 2634 int i, num_vec;
d379142b 2635 struct device *dev = &adapter->pdev->dev;
6b7c5b94 2636
92bf14ab
SP
2637 /* If RoCE is supported, program the max number of NIC vectors that
2638 * may be configured via set-channels, along with vectors needed for
2639 * RoCe. Else, just program the number we'll use initially.
2640 */
2641 if (be_roce_supported(adapter))
2642 num_vec = min_t(int, 2 * be_max_eqs(adapter),
2643 2 * num_online_cpus());
2644 else
2645 num_vec = adapter->cfg_num_qs;
3abcdeda 2646
ac6a0c4a 2647 for (i = 0; i < num_vec; i++)
6b7c5b94
SP
2648 adapter->msix_entries[i].entry = i;
2649
7dc4c064
AG
2650 num_vec = pci_enable_msix_range(adapter->pdev, adapter->msix_entries,
2651 MIN_MSIX_VECTORS, num_vec);
2652 if (num_vec < 0)
2653 goto fail;
92bf14ab 2654
92bf14ab
SP
2655 if (be_roce_supported(adapter) && num_vec > MIN_MSIX_VECTORS) {
2656 adapter->num_msix_roce_vec = num_vec / 2;
2657 dev_info(dev, "enabled %d MSI-x vector(s) for RoCE\n",
2658 adapter->num_msix_roce_vec);
2659 }
2660
2661 adapter->num_msix_vec = num_vec - adapter->num_msix_roce_vec;
2662
2663 dev_info(dev, "enabled %d MSI-x vector(s) for NIC\n",
2664 adapter->num_msix_vec);
c2bba3df 2665 return 0;
7dc4c064
AG
2666
2667fail:
2668 dev_warn(dev, "MSIx enable failed\n");
2669
2670 /* INTx is not supported in VFs, so fail probe if enable_msix fails */
2671 if (!be_physfn(adapter))
2672 return num_vec;
2673 return 0;
6b7c5b94
SP
2674}
2675
fe6d2a38 2676static inline int be_msix_vec_get(struct be_adapter *adapter,
748b539a 2677 struct be_eq_obj *eqo)
b628bde2 2678{
f2f781a7 2679 return adapter->msix_entries[eqo->msix_idx].vector;
b628bde2 2680}
6b7c5b94 2681
b628bde2
SP
2682static int be_msix_register(struct be_adapter *adapter)
2683{
10ef9ab4
SP
2684 struct net_device *netdev = adapter->netdev;
2685 struct be_eq_obj *eqo;
2686 int status, i, vec;
6b7c5b94 2687
10ef9ab4
SP
2688 for_all_evt_queues(adapter, eqo, i) {
2689 sprintf(eqo->desc, "%s-q%d", netdev->name, i);
2690 vec = be_msix_vec_get(adapter, eqo);
2691 status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
3abcdeda
SP
2692 if (status)
2693 goto err_msix;
2694 }
b628bde2 2695
6b7c5b94 2696 return 0;
3abcdeda 2697err_msix:
10ef9ab4
SP
2698 for (i--, eqo = &adapter->eq_obj[i]; i >= 0; i--, eqo--)
2699 free_irq(be_msix_vec_get(adapter, eqo), eqo);
2700 dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
748b539a 2701 status);
ac6a0c4a 2702 be_msix_disable(adapter);
6b7c5b94
SP
2703 return status;
2704}
2705
2706static int be_irq_register(struct be_adapter *adapter)
2707{
2708 struct net_device *netdev = adapter->netdev;
2709 int status;
2710
ac6a0c4a 2711 if (msix_enabled(adapter)) {
6b7c5b94
SP
2712 status = be_msix_register(adapter);
2713 if (status == 0)
2714 goto done;
ba343c77
SB
2715 /* INTx is not supported for VF */
2716 if (!be_physfn(adapter))
2717 return status;
6b7c5b94
SP
2718 }
2719
e49cc34f 2720 /* INTx: only the first EQ is used */
6b7c5b94
SP
2721 netdev->irq = adapter->pdev->irq;
2722 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
e49cc34f 2723 &adapter->eq_obj[0]);
6b7c5b94
SP
2724 if (status) {
2725 dev_err(&adapter->pdev->dev,
2726 "INTx request IRQ failed - err %d\n", status);
2727 return status;
2728 }
2729done:
2730 adapter->isr_registered = true;
2731 return 0;
2732}
2733
2734static void be_irq_unregister(struct be_adapter *adapter)
2735{
2736 struct net_device *netdev = adapter->netdev;
10ef9ab4 2737 struct be_eq_obj *eqo;
3abcdeda 2738 int i;
6b7c5b94
SP
2739
2740 if (!adapter->isr_registered)
2741 return;
2742
2743 /* INTx */
ac6a0c4a 2744 if (!msix_enabled(adapter)) {
e49cc34f 2745 free_irq(netdev->irq, &adapter->eq_obj[0]);
6b7c5b94
SP
2746 goto done;
2747 }
2748
2749 /* MSIx */
10ef9ab4
SP
2750 for_all_evt_queues(adapter, eqo, i)
2751 free_irq(be_msix_vec_get(adapter, eqo), eqo);
3abcdeda 2752
6b7c5b94
SP
2753done:
2754 adapter->isr_registered = false;
6b7c5b94
SP
2755}
2756
10ef9ab4 2757static void be_rx_qs_destroy(struct be_adapter *adapter)
482c9e79
SP
2758{
2759 struct be_queue_info *q;
2760 struct be_rx_obj *rxo;
2761 int i;
2762
2763 for_all_rx_queues(adapter, rxo, i) {
2764 q = &rxo->q;
2765 if (q->created) {
2766 be_cmd_rxq_destroy(adapter, q);
10ef9ab4 2767 be_rx_cq_clean(rxo);
482c9e79 2768 }
10ef9ab4 2769 be_queue_free(adapter, q);
482c9e79
SP
2770 }
2771}
2772
889cd4b2
SP
2773static int be_close(struct net_device *netdev)
2774{
2775 struct be_adapter *adapter = netdev_priv(netdev);
10ef9ab4
SP
2776 struct be_eq_obj *eqo;
2777 int i;
889cd4b2 2778
e1ad8e33
KA
2779 /* This protection is needed as be_close() may be called even when the
2780 * adapter is in cleared state (after eeh perm failure)
2781 */
2782 if (!(adapter->flags & BE_FLAGS_SETUP_DONE))
2783 return 0;
2784
045508a8
PP
2785 be_roce_dev_close(adapter);
2786
dff345c5
IV
2787 if (adapter->flags & BE_FLAGS_NAPI_ENABLED) {
2788 for_all_evt_queues(adapter, eqo, i) {
04d3d624 2789 napi_disable(&eqo->napi);
6384a4d0
SP
2790 be_disable_busy_poll(eqo);
2791 }
71237b6f 2792 adapter->flags &= ~BE_FLAGS_NAPI_ENABLED;
04d3d624 2793 }
a323d9bf
SP
2794
2795 be_async_mcc_disable(adapter);
2796
2797 /* Wait for all pending tx completions to arrive so that
2798 * all tx skbs are freed.
2799 */
fba87559 2800 netif_tx_disable(netdev);
6e1f9975 2801 be_tx_compl_clean(adapter);
a323d9bf
SP
2802
2803 be_rx_qs_destroy(adapter);
2804
d11a347d
AK
2805 for (i = 1; i < (adapter->uc_macs + 1); i++)
2806 be_cmd_pmac_del(adapter, adapter->if_handle,
2807 adapter->pmac_id[i], 0);
2808 adapter->uc_macs = 0;
2809
a323d9bf 2810 for_all_evt_queues(adapter, eqo, i) {
10ef9ab4
SP
2811 if (msix_enabled(adapter))
2812 synchronize_irq(be_msix_vec_get(adapter, eqo));
2813 else
2814 synchronize_irq(netdev->irq);
2815 be_eq_clean(eqo);
63fcb27f
PR
2816 }
2817
889cd4b2
SP
2818 be_irq_unregister(adapter);
2819
482c9e79
SP
2820 return 0;
2821}
2822
10ef9ab4 2823static int be_rx_qs_create(struct be_adapter *adapter)
482c9e79
SP
2824{
2825 struct be_rx_obj *rxo;
e9008ee9 2826 int rc, i, j;
e2557877
VD
2827 u8 rss_hkey[RSS_HASH_KEY_LEN];
2828 struct rss_info *rss = &adapter->rss_info;
482c9e79
SP
2829
2830 for_all_rx_queues(adapter, rxo, i) {
10ef9ab4
SP
2831 rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
2832 sizeof(struct be_eth_rx_d));
2833 if (rc)
2834 return rc;
2835 }
2836
2837 /* The FW would like the default RXQ to be created first */
2838 rxo = default_rxo(adapter);
2839 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id, rx_frag_size,
2840 adapter->if_handle, false, &rxo->rss_id);
2841 if (rc)
2842 return rc;
2843
2844 for_all_rss_queues(adapter, rxo, i) {
482c9e79 2845 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
10ef9ab4
SP
2846 rx_frag_size, adapter->if_handle,
2847 true, &rxo->rss_id);
482c9e79
SP
2848 if (rc)
2849 return rc;
2850 }
2851
2852 if (be_multi_rxq(adapter)) {
e2557877
VD
2853 for (j = 0; j < RSS_INDIR_TABLE_LEN;
2854 j += adapter->num_rx_qs - 1) {
e9008ee9 2855 for_all_rss_queues(adapter, rxo, i) {
e2557877 2856 if ((j + i) >= RSS_INDIR_TABLE_LEN)
e9008ee9 2857 break;
e2557877
VD
2858 rss->rsstable[j + i] = rxo->rss_id;
2859 rss->rss_queue[j + i] = i;
e9008ee9
PR
2860 }
2861 }
e2557877
VD
2862 rss->rss_flags = RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_IPV4 |
2863 RSS_ENABLE_TCP_IPV6 | RSS_ENABLE_IPV6;
594ad54a
SR
2864
2865 if (!BEx_chip(adapter))
e2557877
VD
2866 rss->rss_flags |= RSS_ENABLE_UDP_IPV4 |
2867 RSS_ENABLE_UDP_IPV6;
da1388d6
VV
2868 } else {
2869 /* Disable RSS, if only default RX Q is created */
e2557877 2870 rss->rss_flags = RSS_ENABLE_NONE;
da1388d6 2871 }
594ad54a 2872
e2557877 2873 get_random_bytes(rss_hkey, RSS_HASH_KEY_LEN);
748b539a 2874 rc = be_cmd_rss_config(adapter, rss->rsstable, rss->rss_flags,
e2557877 2875 128, rss_hkey);
da1388d6 2876 if (rc) {
e2557877 2877 rss->rss_flags = RSS_ENABLE_NONE;
da1388d6 2878 return rc;
482c9e79
SP
2879 }
2880
e2557877
VD
2881 memcpy(rss->rss_hkey, rss_hkey, RSS_HASH_KEY_LEN);
2882
482c9e79 2883 /* First time posting */
10ef9ab4 2884 for_all_rx_queues(adapter, rxo, i)
482c9e79 2885 be_post_rx_frags(rxo, GFP_KERNEL);
889cd4b2
SP
2886 return 0;
2887}
2888
6b7c5b94
SP
2889static int be_open(struct net_device *netdev)
2890{
2891 struct be_adapter *adapter = netdev_priv(netdev);
10ef9ab4 2892 struct be_eq_obj *eqo;
3abcdeda 2893 struct be_rx_obj *rxo;
10ef9ab4 2894 struct be_tx_obj *txo;
b236916a 2895 u8 link_status;
3abcdeda 2896 int status, i;
5fb379ee 2897
10ef9ab4 2898 status = be_rx_qs_create(adapter);
482c9e79
SP
2899 if (status)
2900 goto err;
2901
c2bba3df
SK
2902 status = be_irq_register(adapter);
2903 if (status)
2904 goto err;
5fb379ee 2905
10ef9ab4 2906 for_all_rx_queues(adapter, rxo, i)
3abcdeda 2907 be_cq_notify(adapter, rxo->cq.id, true, 0);
5fb379ee 2908
10ef9ab4
SP
2909 for_all_tx_queues(adapter, txo, i)
2910 be_cq_notify(adapter, txo->cq.id, true, 0);
2911
7a1e9b20
SP
2912 be_async_mcc_enable(adapter);
2913
10ef9ab4
SP
2914 for_all_evt_queues(adapter, eqo, i) {
2915 napi_enable(&eqo->napi);
6384a4d0 2916 be_enable_busy_poll(eqo);
4cad9f3b 2917 be_eq_notify(adapter, eqo->q.id, true, true, 0);
10ef9ab4 2918 }
04d3d624 2919 adapter->flags |= BE_FLAGS_NAPI_ENABLED;
10ef9ab4 2920
323ff71e 2921 status = be_cmd_link_status_query(adapter, NULL, &link_status, 0);
b236916a
AK
2922 if (!status)
2923 be_link_status_update(adapter, link_status);
2924
fba87559 2925 netif_tx_start_all_queues(netdev);
045508a8 2926 be_roce_dev_open(adapter);
c9c47142 2927
c5abe7c0 2928#ifdef CONFIG_BE2NET_VXLAN
c9c47142
SP
2929 if (skyhawk_chip(adapter))
2930 vxlan_get_rx_port(netdev);
c5abe7c0
SP
2931#endif
2932
889cd4b2
SP
2933 return 0;
2934err:
2935 be_close(adapter->netdev);
2936 return -EIO;
5fb379ee
SP
2937}
2938
71d8d1b5
AK
2939static int be_setup_wol(struct be_adapter *adapter, bool enable)
2940{
2941 struct be_dma_mem cmd;
2942 int status = 0;
2943 u8 mac[ETH_ALEN];
2944
2945 memset(mac, 0, ETH_ALEN);
2946
2947 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
ede23fa8
JP
2948 cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2949 GFP_KERNEL);
ddf1169f 2950 if (!cmd.va)
6b568689 2951 return -ENOMEM;
71d8d1b5
AK
2952
2953 if (enable) {
2954 status = pci_write_config_dword(adapter->pdev,
748b539a
SP
2955 PCICFG_PM_CONTROL_OFFSET,
2956 PCICFG_PM_CONTROL_MASK);
71d8d1b5
AK
2957 if (status) {
2958 dev_err(&adapter->pdev->dev,
2381a55c 2959 "Could not enable Wake-on-lan\n");
2b7bcebf
IV
2960 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2961 cmd.dma);
71d8d1b5
AK
2962 return status;
2963 }
2964 status = be_cmd_enable_magic_wol(adapter,
748b539a
SP
2965 adapter->netdev->dev_addr,
2966 &cmd);
71d8d1b5
AK
2967 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2968 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2969 } else {
2970 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2971 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2972 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2973 }
2974
2b7bcebf 2975 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
71d8d1b5
AK
2976 return status;
2977}
2978
6d87f5c3
AK
2979/*
2980 * Generate a seed MAC address from the PF MAC Address using jhash.
2981 * MAC Address for VFs are assigned incrementally starting from the seed.
2982 * These addresses are programmed in the ASIC by the PF and the VF driver
2983 * queries for the MAC address during its probe.
2984 */
4c876616 2985static int be_vf_eth_addr_config(struct be_adapter *adapter)
6d87f5c3 2986{
f9449ab7 2987 u32 vf;
3abcdeda 2988 int status = 0;
6d87f5c3 2989 u8 mac[ETH_ALEN];
11ac75ed 2990 struct be_vf_cfg *vf_cfg;
6d87f5c3
AK
2991
2992 be_vf_eth_addr_generate(adapter, mac);
2993
11ac75ed 2994 for_all_vfs(adapter, vf_cfg, vf) {
3175d8c2 2995 if (BEx_chip(adapter))
590c391d 2996 status = be_cmd_pmac_add(adapter, mac,
11ac75ed
SP
2997 vf_cfg->if_handle,
2998 &vf_cfg->pmac_id, vf + 1);
3175d8c2
SP
2999 else
3000 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
3001 vf + 1);
590c391d 3002
6d87f5c3
AK
3003 if (status)
3004 dev_err(&adapter->pdev->dev,
748b539a
SP
3005 "Mac address assignment failed for VF %d\n",
3006 vf);
6d87f5c3 3007 else
11ac75ed 3008 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
6d87f5c3
AK
3009
3010 mac[5] += 1;
3011 }
3012 return status;
3013}
3014
4c876616
SP
3015static int be_vfs_mac_query(struct be_adapter *adapter)
3016{
3017 int status, vf;
3018 u8 mac[ETH_ALEN];
3019 struct be_vf_cfg *vf_cfg;
4c876616
SP
3020
3021 for_all_vfs(adapter, vf_cfg, vf) {
b188f090
SR
3022 status = be_cmd_get_active_mac(adapter, vf_cfg->pmac_id,
3023 mac, vf_cfg->if_handle,
3024 false, vf+1);
4c876616
SP
3025 if (status)
3026 return status;
3027 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
3028 }
3029 return 0;
3030}
3031
f9449ab7 3032static void be_vf_clear(struct be_adapter *adapter)
6d87f5c3 3033{
11ac75ed 3034 struct be_vf_cfg *vf_cfg;
6d87f5c3
AK
3035 u32 vf;
3036
257a3feb 3037 if (pci_vfs_assigned(adapter->pdev)) {
4c876616
SP
3038 dev_warn(&adapter->pdev->dev,
3039 "VFs are assigned to VMs: not disabling VFs\n");
39f1d94d
SP
3040 goto done;
3041 }
3042
b4c1df93
SP
3043 pci_disable_sriov(adapter->pdev);
3044
11ac75ed 3045 for_all_vfs(adapter, vf_cfg, vf) {
3175d8c2 3046 if (BEx_chip(adapter))
11ac75ed
SP
3047 be_cmd_pmac_del(adapter, vf_cfg->if_handle,
3048 vf_cfg->pmac_id, vf + 1);
3175d8c2
SP
3049 else
3050 be_cmd_set_mac(adapter, NULL, vf_cfg->if_handle,
3051 vf + 1);
f9449ab7 3052
11ac75ed
SP
3053 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
3054 }
39f1d94d
SP
3055done:
3056 kfree(adapter->vf_cfg);
3057 adapter->num_vfs = 0;
f174c7ec 3058 adapter->flags &= ~BE_FLAGS_SRIOV_ENABLED;
6d87f5c3
AK
3059}
3060
7707133c
SP
3061static void be_clear_queues(struct be_adapter *adapter)
3062{
3063 be_mcc_queues_destroy(adapter);
3064 be_rx_cqs_destroy(adapter);
3065 be_tx_queues_destroy(adapter);
3066 be_evt_queues_destroy(adapter);
3067}
3068
68d7bdcb 3069static void be_cancel_worker(struct be_adapter *adapter)
a54769f5 3070{
191eb756
SP
3071 if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) {
3072 cancel_delayed_work_sync(&adapter->work);
3073 adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED;
3074 }
68d7bdcb
SP
3075}
3076
b05004ad 3077static void be_mac_clear(struct be_adapter *adapter)
68d7bdcb
SP
3078{
3079 int i;
3080
b05004ad
SK
3081 if (adapter->pmac_id) {
3082 for (i = 0; i < (adapter->uc_macs + 1); i++)
3083 be_cmd_pmac_del(adapter, adapter->if_handle,
3084 adapter->pmac_id[i], 0);
3085 adapter->uc_macs = 0;
3086
3087 kfree(adapter->pmac_id);
3088 adapter->pmac_id = NULL;
3089 }
3090}
3091
c5abe7c0 3092#ifdef CONFIG_BE2NET_VXLAN
c9c47142
SP
3093static void be_disable_vxlan_offloads(struct be_adapter *adapter)
3094{
3095 if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS)
3096 be_cmd_manage_iface(adapter, adapter->if_handle,
3097 OP_CONVERT_TUNNEL_TO_NORMAL);
3098
3099 if (adapter->vxlan_port)
3100 be_cmd_set_vxlan_port(adapter, 0);
3101
3102 adapter->flags &= ~BE_FLAGS_VXLAN_OFFLOADS;
3103 adapter->vxlan_port = 0;
3104}
c5abe7c0 3105#endif
c9c47142 3106
b05004ad
SK
3107static int be_clear(struct be_adapter *adapter)
3108{
68d7bdcb 3109 be_cancel_worker(adapter);
191eb756 3110
11ac75ed 3111 if (sriov_enabled(adapter))
f9449ab7
SP
3112 be_vf_clear(adapter);
3113
bec84e6b
VV
3114 /* Re-configure FW to distribute resources evenly across max-supported
3115 * number of VFs, only when VFs are not already enabled.
3116 */
3117 if (be_physfn(adapter) && !pci_vfs_assigned(adapter->pdev))
3118 be_cmd_set_sriov_config(adapter, adapter->pool_res,
3119 pci_sriov_get_totalvfs(adapter->pdev));
3120
c5abe7c0 3121#ifdef CONFIG_BE2NET_VXLAN
c9c47142 3122 be_disable_vxlan_offloads(adapter);
c5abe7c0 3123#endif
2d17f403 3124 /* delete the primary mac along with the uc-mac list */
b05004ad 3125 be_mac_clear(adapter);
fbc13f01 3126
f9449ab7 3127 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
a54769f5 3128
7707133c 3129 be_clear_queues(adapter);
a54769f5 3130
10ef9ab4 3131 be_msix_disable(adapter);
e1ad8e33 3132 adapter->flags &= ~BE_FLAGS_SETUP_DONE;
a54769f5
SP
3133 return 0;
3134}
3135
4c876616 3136static int be_vfs_if_create(struct be_adapter *adapter)
abb93951 3137{
92bf14ab 3138 struct be_resources res = {0};
4c876616
SP
3139 struct be_vf_cfg *vf_cfg;
3140 u32 cap_flags, en_flags, vf;
922bbe88 3141 int status = 0;
abb93951 3142
4c876616
SP
3143 cap_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
3144 BE_IF_FLAGS_MULTICAST;
abb93951 3145
4c876616 3146 for_all_vfs(adapter, vf_cfg, vf) {
92bf14ab
SP
3147 if (!BE3_chip(adapter)) {
3148 status = be_cmd_get_profile_config(adapter, &res,
3149 vf + 1);
3150 if (!status)
3151 cap_flags = res.if_cap_flags;
3152 }
4c876616
SP
3153
3154 /* If a FW profile exists, then cap_flags are updated */
3155 en_flags = cap_flags & (BE_IF_FLAGS_UNTAGGED |
748b539a
SP
3156 BE_IF_FLAGS_BROADCAST |
3157 BE_IF_FLAGS_MULTICAST);
3158 status =
3159 be_cmd_if_create(adapter, cap_flags, en_flags,
3160 &vf_cfg->if_handle, vf + 1);
4c876616
SP
3161 if (status)
3162 goto err;
3163 }
3164err:
3165 return status;
abb93951
PR
3166}
3167
39f1d94d 3168static int be_vf_setup_init(struct be_adapter *adapter)
30128031 3169{
11ac75ed 3170 struct be_vf_cfg *vf_cfg;
30128031
SP
3171 int vf;
3172
39f1d94d
SP
3173 adapter->vf_cfg = kcalloc(adapter->num_vfs, sizeof(*vf_cfg),
3174 GFP_KERNEL);
3175 if (!adapter->vf_cfg)
3176 return -ENOMEM;
3177
11ac75ed
SP
3178 for_all_vfs(adapter, vf_cfg, vf) {
3179 vf_cfg->if_handle = -1;
3180 vf_cfg->pmac_id = -1;
30128031 3181 }
39f1d94d 3182 return 0;
30128031
SP
3183}
3184
f9449ab7
SP
3185static int be_vf_setup(struct be_adapter *adapter)
3186{
c502224e 3187 struct device *dev = &adapter->pdev->dev;
11ac75ed 3188 struct be_vf_cfg *vf_cfg;
4c876616 3189 int status, old_vfs, vf;
04a06028 3190 u32 privileges;
39f1d94d 3191
257a3feb 3192 old_vfs = pci_num_vf(adapter->pdev);
39f1d94d
SP
3193
3194 status = be_vf_setup_init(adapter);
3195 if (status)
3196 goto err;
30128031 3197
4c876616
SP
3198 if (old_vfs) {
3199 for_all_vfs(adapter, vf_cfg, vf) {
3200 status = be_cmd_get_if_id(adapter, vf_cfg, vf);
3201 if (status)
3202 goto err;
3203 }
f9449ab7 3204
4c876616
SP
3205 status = be_vfs_mac_query(adapter);
3206 if (status)
3207 goto err;
3208 } else {
bec84e6b
VV
3209 status = be_vfs_if_create(adapter);
3210 if (status)
3211 goto err;
3212
39f1d94d
SP
3213 status = be_vf_eth_addr_config(adapter);
3214 if (status)
3215 goto err;
3216 }
f9449ab7 3217
11ac75ed 3218 for_all_vfs(adapter, vf_cfg, vf) {
04a06028
SP
3219 /* Allow VFs to programs MAC/VLAN filters */
3220 status = be_cmd_get_fn_privileges(adapter, &privileges, vf + 1);
3221 if (!status && !(privileges & BE_PRIV_FILTMGMT)) {
3222 status = be_cmd_set_fn_privileges(adapter,
3223 privileges |
3224 BE_PRIV_FILTMGMT,
3225 vf + 1);
3226 if (!status)
3227 dev_info(dev, "VF%d has FILTMGMT privilege\n",
3228 vf);
3229 }
3230
0f77ba73
RN
3231 /* Allow full available bandwidth */
3232 if (!old_vfs)
3233 be_cmd_config_qos(adapter, 0, 0, vf + 1);
f1f3ee1b 3234
bdce2ad7 3235 if (!old_vfs) {
0599863d 3236 be_cmd_enable_vf(adapter, vf + 1);
bdce2ad7
SR
3237 be_cmd_set_logical_link_config(adapter,
3238 IFLA_VF_LINK_STATE_AUTO,
3239 vf+1);
3240 }
f9449ab7 3241 }
b4c1df93
SP
3242
3243 if (!old_vfs) {
3244 status = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
3245 if (status) {
3246 dev_err(dev, "SRIOV enable failed\n");
3247 adapter->num_vfs = 0;
3248 goto err;
3249 }
3250 }
f174c7ec
VV
3251
3252 adapter->flags |= BE_FLAGS_SRIOV_ENABLED;
f9449ab7
SP
3253 return 0;
3254err:
4c876616
SP
3255 dev_err(dev, "VF setup failed\n");
3256 be_vf_clear(adapter);
f9449ab7
SP
3257 return status;
3258}
3259
f93f160b
VV
3260/* Converting function_mode bits on BE3 to SH mc_type enums */
3261
3262static u8 be_convert_mc_type(u32 function_mode)
3263{
66064dbc 3264 if (function_mode & VNIC_MODE && function_mode & QNQ_MODE)
f93f160b 3265 return vNIC1;
66064dbc 3266 else if (function_mode & QNQ_MODE)
f93f160b
VV
3267 return FLEX10;
3268 else if (function_mode & VNIC_MODE)
3269 return vNIC2;
3270 else if (function_mode & UMC_ENABLED)
3271 return UMC;
3272 else
3273 return MC_NONE;
3274}
3275
92bf14ab
SP
3276/* On BE2/BE3 FW does not suggest the supported limits */
3277static void BEx_get_resources(struct be_adapter *adapter,
3278 struct be_resources *res)
3279{
bec84e6b 3280 bool use_sriov = adapter->num_vfs ? 1 : 0;
92bf14ab
SP
3281
3282 if (be_physfn(adapter))
3283 res->max_uc_mac = BE_UC_PMAC_COUNT;
3284 else
3285 res->max_uc_mac = BE_VF_UC_PMAC_COUNT;
3286
f93f160b
VV
3287 adapter->mc_type = be_convert_mc_type(adapter->function_mode);
3288
3289 if (be_is_mc(adapter)) {
3290 /* Assuming that there are 4 channels per port,
3291 * when multi-channel is enabled
3292 */
3293 if (be_is_qnq_mode(adapter))
3294 res->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
3295 else
3296 /* In a non-qnq multichannel mode, the pvid
3297 * takes up one vlan entry
3298 */
3299 res->max_vlans = (BE_NUM_VLANS_SUPPORTED / 4) - 1;
3300 } else {
92bf14ab 3301 res->max_vlans = BE_NUM_VLANS_SUPPORTED;
f93f160b
VV
3302 }
3303
92bf14ab
SP
3304 res->max_mcast_mac = BE_MAX_MC;
3305
a5243dab
VV
3306 /* 1) For BE3 1Gb ports, FW does not support multiple TXQs
3307 * 2) Create multiple TX rings on a BE3-R multi-channel interface
3308 * *only* if it is RSS-capable.
3309 */
3310 if (BE2_chip(adapter) || use_sriov || (adapter->port_num > 1) ||
3311 !be_physfn(adapter) || (be_is_mc(adapter) &&
3312 !(adapter->function_caps & BE_FUNCTION_CAPS_RSS)))
92bf14ab
SP
3313 res->max_tx_qs = 1;
3314 else
3315 res->max_tx_qs = BE3_MAX_TX_QS;
3316
3317 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
3318 !use_sriov && be_physfn(adapter))
3319 res->max_rss_qs = (adapter->be3_native) ?
3320 BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
3321 res->max_rx_qs = res->max_rss_qs + 1;
3322
e3dc867c 3323 if (be_physfn(adapter))
d3518e21 3324 res->max_evt_qs = (be_max_vfs(adapter) > 0) ?
e3dc867c
SR
3325 BE3_SRIOV_MAX_EVT_QS : BE3_MAX_EVT_QS;
3326 else
3327 res->max_evt_qs = 1;
92bf14ab
SP
3328
3329 res->if_cap_flags = BE_IF_CAP_FLAGS_WANT;
3330 if (!(adapter->function_caps & BE_FUNCTION_CAPS_RSS))
3331 res->if_cap_flags &= ~BE_IF_FLAGS_RSS;
3332}
3333
30128031
SP
3334static void be_setup_init(struct be_adapter *adapter)
3335{
3336 adapter->vlan_prio_bmap = 0xff;
42f11cf2 3337 adapter->phy.link_speed = -1;
30128031
SP
3338 adapter->if_handle = -1;
3339 adapter->be3_native = false;
3340 adapter->promiscuous = false;
f25b119c
PR
3341 if (be_physfn(adapter))
3342 adapter->cmd_privileges = MAX_PRIVILEGES;
3343 else
3344 adapter->cmd_privileges = MIN_PRIVILEGES;
30128031
SP
3345}
3346
bec84e6b
VV
3347static int be_get_sriov_config(struct be_adapter *adapter)
3348{
3349 struct device *dev = &adapter->pdev->dev;
3350 struct be_resources res = {0};
d3d18312 3351 int max_vfs, old_vfs;
bec84e6b
VV
3352
3353 /* Some old versions of BE3 FW don't report max_vfs value */
d3d18312
SP
3354 be_cmd_get_profile_config(adapter, &res, 0);
3355
bec84e6b
VV
3356 if (BE3_chip(adapter) && !res.max_vfs) {
3357 max_vfs = pci_sriov_get_totalvfs(adapter->pdev);
3358 res.max_vfs = max_vfs > 0 ? min(MAX_VFS, max_vfs) : 0;
3359 }
3360
d3d18312 3361 adapter->pool_res = res;
bec84e6b
VV
3362
3363 if (!be_max_vfs(adapter)) {
3364 if (num_vfs)
3365 dev_warn(dev, "device doesn't support SRIOV\n");
3366 adapter->num_vfs = 0;
3367 return 0;
3368 }
3369
d3d18312
SP
3370 pci_sriov_set_totalvfs(adapter->pdev, be_max_vfs(adapter));
3371
bec84e6b
VV
3372 /* validate num_vfs module param */
3373 old_vfs = pci_num_vf(adapter->pdev);
3374 if (old_vfs) {
3375 dev_info(dev, "%d VFs are already enabled\n", old_vfs);
3376 if (old_vfs != num_vfs)
3377 dev_warn(dev, "Ignoring num_vfs=%d setting\n", num_vfs);
3378 adapter->num_vfs = old_vfs;
3379 } else {
3380 if (num_vfs > be_max_vfs(adapter)) {
3381 dev_info(dev, "Resources unavailable to init %d VFs\n",
3382 num_vfs);
3383 dev_info(dev, "Limiting to %d VFs\n",
3384 be_max_vfs(adapter));
3385 }
3386 adapter->num_vfs = min_t(u16, num_vfs, be_max_vfs(adapter));
3387 }
3388
3389 return 0;
3390}
3391
92bf14ab 3392static int be_get_resources(struct be_adapter *adapter)
abb93951 3393{
92bf14ab
SP
3394 struct device *dev = &adapter->pdev->dev;
3395 struct be_resources res = {0};
3396 int status;
abb93951 3397
92bf14ab
SP
3398 if (BEx_chip(adapter)) {
3399 BEx_get_resources(adapter, &res);
3400 adapter->res = res;
abb93951
PR
3401 }
3402
92bf14ab
SP
3403 /* For Lancer, SH etc read per-function resource limits from FW.
3404 * GET_FUNC_CONFIG returns per function guaranteed limits.
3405 * GET_PROFILE_CONFIG returns PCI-E related limits PF-pool limits
3406 */
3407 if (!BEx_chip(adapter)) {
3408 status = be_cmd_get_func_config(adapter, &res);
3409 if (status)
3410 return status;
abb93951 3411
92bf14ab
SP
3412 /* If RoCE may be enabled stash away half the EQs for RoCE */
3413 if (be_roce_supported(adapter))
3414 res.max_evt_qs /= 2;
3415 adapter->res = res;
abb93951 3416
92bf14ab
SP
3417 dev_info(dev, "Max: txqs %d, rxqs %d, rss %d, eqs %d, vfs %d\n",
3418 be_max_txqs(adapter), be_max_rxqs(adapter),
3419 be_max_rss(adapter), be_max_eqs(adapter),
3420 be_max_vfs(adapter));
3421 dev_info(dev, "Max: uc-macs %d, mc-macs %d, vlans %d\n",
3422 be_max_uc(adapter), be_max_mc(adapter),
3423 be_max_vlans(adapter));
abb93951 3424 }
4c876616 3425
92bf14ab 3426 return 0;
abb93951
PR
3427}
3428
d3d18312
SP
3429static void be_sriov_config(struct be_adapter *adapter)
3430{
3431 struct device *dev = &adapter->pdev->dev;
3432 int status;
3433
3434 status = be_get_sriov_config(adapter);
3435 if (status) {
3436 dev_err(dev, "Failed to query SR-IOV configuration\n");
3437 dev_err(dev, "SR-IOV cannot be enabled\n");
3438 return;
3439 }
3440
3441 /* When the HW is in SRIOV capable configuration, the PF-pool
3442 * resources are equally distributed across the max-number of
3443 * VFs. The user may request only a subset of the max-vfs to be
3444 * enabled. Based on num_vfs, redistribute the resources across
3445 * num_vfs so that each VF will have access to more number of
3446 * resources. This facility is not available in BE3 FW.
3447 * Also, this is done by FW in Lancer chip.
3448 */
3449 if (be_max_vfs(adapter) && !pci_num_vf(adapter->pdev)) {
3450 status = be_cmd_set_sriov_config(adapter,
3451 adapter->pool_res,
3452 adapter->num_vfs);
3453 if (status)
3454 dev_err(dev, "Failed to optimize SR-IOV resources\n");
3455 }
3456}
3457
39f1d94d
SP
3458static int be_get_config(struct be_adapter *adapter)
3459{
542963b7 3460 u16 profile_id;
4c876616 3461 int status;
39f1d94d 3462
e97e3cda 3463 status = be_cmd_query_fw_cfg(adapter);
abb93951 3464 if (status)
92bf14ab 3465 return status;
abb93951 3466
542963b7
VV
3467 if (be_physfn(adapter)) {
3468 status = be_cmd_get_active_profile(adapter, &profile_id);
3469 if (!status)
3470 dev_info(&adapter->pdev->dev,
3471 "Using profile 0x%x\n", profile_id);
962bcb75 3472 }
bec84e6b 3473
d3d18312
SP
3474 if (!BE2_chip(adapter) && be_physfn(adapter))
3475 be_sriov_config(adapter);
542963b7 3476
92bf14ab
SP
3477 status = be_get_resources(adapter);
3478 if (status)
3479 return status;
abb93951 3480
46ee9c14
RN
3481 adapter->pmac_id = kcalloc(be_max_uc(adapter),
3482 sizeof(*adapter->pmac_id), GFP_KERNEL);
92bf14ab
SP
3483 if (!adapter->pmac_id)
3484 return -ENOMEM;
abb93951 3485
92bf14ab
SP
3486 /* Sanitize cfg_num_qs based on HW and platform limits */
3487 adapter->cfg_num_qs = min(adapter->cfg_num_qs, be_max_qs(adapter));
3488
3489 return 0;
39f1d94d
SP
3490}
3491
95046b92
SP
3492static int be_mac_setup(struct be_adapter *adapter)
3493{
3494 u8 mac[ETH_ALEN];
3495 int status;
3496
3497 if (is_zero_ether_addr(adapter->netdev->dev_addr)) {
3498 status = be_cmd_get_perm_mac(adapter, mac);
3499 if (status)
3500 return status;
3501
3502 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
3503 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
3504 } else {
3505 /* Maybe the HW was reset; dev_addr must be re-programmed */
3506 memcpy(mac, adapter->netdev->dev_addr, ETH_ALEN);
3507 }
3508
2c7a9dc1
AK
3509 /* For BE3-R VFs, the PF programs the initial MAC address */
3510 if (!(BEx_chip(adapter) && be_virtfn(adapter)))
3511 be_cmd_pmac_add(adapter, mac, adapter->if_handle,
3512 &adapter->pmac_id[0], 0);
95046b92
SP
3513 return 0;
3514}
3515
68d7bdcb
SP
3516static void be_schedule_worker(struct be_adapter *adapter)
3517{
3518 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
3519 adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
3520}
3521
7707133c 3522static int be_setup_queues(struct be_adapter *adapter)
5fb379ee 3523{
68d7bdcb 3524 struct net_device *netdev = adapter->netdev;
10ef9ab4 3525 int status;
ba343c77 3526
7707133c 3527 status = be_evt_queues_create(adapter);
abb93951
PR
3528 if (status)
3529 goto err;
73d540f2 3530
7707133c 3531 status = be_tx_qs_create(adapter);
c2bba3df
SK
3532 if (status)
3533 goto err;
10ef9ab4 3534
7707133c 3535 status = be_rx_cqs_create(adapter);
10ef9ab4 3536 if (status)
a54769f5 3537 goto err;
6b7c5b94 3538
7707133c 3539 status = be_mcc_queues_create(adapter);
10ef9ab4
SP
3540 if (status)
3541 goto err;
3542
68d7bdcb
SP
3543 status = netif_set_real_num_rx_queues(netdev, adapter->num_rx_qs);
3544 if (status)
3545 goto err;
3546
3547 status = netif_set_real_num_tx_queues(netdev, adapter->num_tx_qs);
3548 if (status)
3549 goto err;
3550
7707133c
SP
3551 return 0;
3552err:
3553 dev_err(&adapter->pdev->dev, "queue_setup failed\n");
3554 return status;
3555}
3556
68d7bdcb
SP
3557int be_update_queues(struct be_adapter *adapter)
3558{
3559 struct net_device *netdev = adapter->netdev;
3560 int status;
3561
3562 if (netif_running(netdev))
3563 be_close(netdev);
3564
3565 be_cancel_worker(adapter);
3566
3567 /* If any vectors have been shared with RoCE we cannot re-program
3568 * the MSIx table.
3569 */
3570 if (!adapter->num_msix_roce_vec)
3571 be_msix_disable(adapter);
3572
3573 be_clear_queues(adapter);
3574
3575 if (!msix_enabled(adapter)) {
3576 status = be_msix_enable(adapter);
3577 if (status)
3578 return status;
3579 }
3580
3581 status = be_setup_queues(adapter);
3582 if (status)
3583 return status;
3584
3585 be_schedule_worker(adapter);
3586
3587 if (netif_running(netdev))
3588 status = be_open(netdev);
3589
3590 return status;
3591}
3592
7707133c
SP
3593static int be_setup(struct be_adapter *adapter)
3594{
3595 struct device *dev = &adapter->pdev->dev;
3596 u32 tx_fc, rx_fc, en_flags;
3597 int status;
3598
3599 be_setup_init(adapter);
3600
3601 if (!lancer_chip(adapter))
3602 be_cmd_req_native_mode(adapter);
3603
3604 status = be_get_config(adapter);
10ef9ab4 3605 if (status)
a54769f5 3606 goto err;
6b7c5b94 3607
7707133c 3608 status = be_msix_enable(adapter);
10ef9ab4 3609 if (status)
a54769f5 3610 goto err;
6b7c5b94 3611
f9449ab7 3612 en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
7707133c 3613 BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
abb93951 3614 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS)
f9449ab7 3615 en_flags |= BE_IF_FLAGS_RSS;
92bf14ab
SP
3616 en_flags = en_flags & be_if_cap_flags(adapter);
3617 status = be_cmd_if_create(adapter, be_if_cap_flags(adapter), en_flags,
1578e777 3618 &adapter->if_handle, 0);
7707133c 3619 if (status)
a54769f5 3620 goto err;
6b7c5b94 3621
68d7bdcb
SP
3622 /* Updating real_num_tx/rx_queues() requires rtnl_lock() */
3623 rtnl_lock();
7707133c 3624 status = be_setup_queues(adapter);
68d7bdcb 3625 rtnl_unlock();
95046b92 3626 if (status)
1578e777
PR
3627 goto err;
3628
7707133c 3629 be_cmd_get_fn_privileges(adapter, &adapter->cmd_privileges, 0);
7707133c
SP
3630
3631 status = be_mac_setup(adapter);
10ef9ab4
SP
3632 if (status)
3633 goto err;
3634
e97e3cda 3635 be_cmd_get_fw_ver(adapter);
5a56eb10 3636
e9e2a904
SK
3637 if (BE2_chip(adapter) && fw_major_num(adapter->fw_ver) < 4) {
3638 dev_err(dev, "Firmware on card is old(%s), IRQs may not work.",
3639 adapter->fw_ver);
3640 dev_err(dev, "Please upgrade firmware to version >= 4.0\n");
3641 }
3642
1d1e9a46 3643 if (adapter->vlans_added)
10329df8 3644 be_vid_config(adapter);
7ab8b0b4 3645
a54769f5 3646 be_set_rx_mode(adapter->netdev);
5fb379ee 3647
76a9e08e
SR
3648 be_cmd_get_acpi_wol_cap(adapter);
3649
ddc3f5cb 3650 be_cmd_get_flow_control(adapter, &tx_fc, &rx_fc);
590c391d 3651
ddc3f5cb
AK
3652 if (rx_fc != adapter->rx_fc || tx_fc != adapter->tx_fc)
3653 be_cmd_set_flow_control(adapter, adapter->tx_fc,
a54769f5 3654 adapter->rx_fc);
2dc1deb6 3655
bdce2ad7
SR
3656 if (be_physfn(adapter))
3657 be_cmd_set_logical_link_config(adapter,
3658 IFLA_VF_LINK_STATE_AUTO, 0);
3659
bec84e6b
VV
3660 if (adapter->num_vfs)
3661 be_vf_setup(adapter);
f9449ab7 3662
f25b119c
PR
3663 status = be_cmd_get_phy_info(adapter);
3664 if (!status && be_pause_supported(adapter))
42f11cf2
AK
3665 adapter->phy.fc_autoneg = 1;
3666
68d7bdcb 3667 be_schedule_worker(adapter);
e1ad8e33 3668 adapter->flags |= BE_FLAGS_SETUP_DONE;
f9449ab7 3669 return 0;
a54769f5
SP
3670err:
3671 be_clear(adapter);
3672 return status;
3673}
6b7c5b94 3674
66268739
IV
3675#ifdef CONFIG_NET_POLL_CONTROLLER
3676static void be_netpoll(struct net_device *netdev)
3677{
3678 struct be_adapter *adapter = netdev_priv(netdev);
10ef9ab4 3679 struct be_eq_obj *eqo;
66268739
IV
3680 int i;
3681
e49cc34f
SP
3682 for_all_evt_queues(adapter, eqo, i) {
3683 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
3684 napi_schedule(&eqo->napi);
3685 }
10ef9ab4
SP
3686
3687 return;
66268739
IV
3688}
3689#endif
3690
96c9b2e4 3691static char flash_cookie[2][16] = {"*** SE FLAS", "H DIRECTORY *** "};
fa9a6fed 3692
306f1348
SP
3693static bool phy_flashing_required(struct be_adapter *adapter)
3694{
42f11cf2
AK
3695 return (adapter->phy.phy_type == TN_8022 &&
3696 adapter->phy.interface_type == PHY_TYPE_BASET_10GB);
306f1348
SP
3697}
3698
c165541e
PR
3699static bool is_comp_in_ufi(struct be_adapter *adapter,
3700 struct flash_section_info *fsec, int type)
3701{
3702 int i = 0, img_type = 0;
3703 struct flash_section_info_g2 *fsec_g2 = NULL;
3704
ca34fe38 3705 if (BE2_chip(adapter))
c165541e
PR
3706 fsec_g2 = (struct flash_section_info_g2 *)fsec;
3707
3708 for (i = 0; i < MAX_FLASH_COMP; i++) {
3709 if (fsec_g2)
3710 img_type = le32_to_cpu(fsec_g2->fsec_entry[i].type);
3711 else
3712 img_type = le32_to_cpu(fsec->fsec_entry[i].type);
3713
3714 if (img_type == type)
3715 return true;
3716 }
3717 return false;
3718
3719}
3720
4188e7df 3721static struct flash_section_info *get_fsec_info(struct be_adapter *adapter,
748b539a
SP
3722 int header_size,
3723 const struct firmware *fw)
c165541e
PR
3724{
3725 struct flash_section_info *fsec = NULL;
3726 const u8 *p = fw->data;
3727
3728 p += header_size;
3729 while (p < (fw->data + fw->size)) {
3730 fsec = (struct flash_section_info *)p;
3731 if (!memcmp(flash_cookie, fsec->cookie, sizeof(flash_cookie)))
3732 return fsec;
3733 p += 32;
3734 }
3735 return NULL;
3736}
3737
96c9b2e4
VV
3738static int be_check_flash_crc(struct be_adapter *adapter, const u8 *p,
3739 u32 img_offset, u32 img_size, int hdr_size,
3740 u16 img_optype, bool *crc_match)
3741{
3742 u32 crc_offset;
3743 int status;
3744 u8 crc[4];
3745
3746 status = be_cmd_get_flash_crc(adapter, crc, img_optype, img_size - 4);
3747 if (status)
3748 return status;
3749
3750 crc_offset = hdr_size + img_offset + img_size - 4;
3751
3752 /* Skip flashing, if crc of flashed region matches */
3753 if (!memcmp(crc, p + crc_offset, 4))
3754 *crc_match = true;
3755 else
3756 *crc_match = false;
3757
3758 return status;
3759}
3760
773a2d7c 3761static int be_flash(struct be_adapter *adapter, const u8 *img,
748b539a 3762 struct be_dma_mem *flash_cmd, int optype, int img_size)
773a2d7c 3763{
773a2d7c 3764 struct be_cmd_write_flashrom *req = flash_cmd->va;
96c9b2e4
VV
3765 u32 total_bytes, flash_op, num_bytes;
3766 int status;
773a2d7c
PR
3767
3768 total_bytes = img_size;
3769 while (total_bytes) {
3770 num_bytes = min_t(u32, 32*1024, total_bytes);
3771
3772 total_bytes -= num_bytes;
3773
3774 if (!total_bytes) {
3775 if (optype == OPTYPE_PHY_FW)
3776 flash_op = FLASHROM_OPER_PHY_FLASH;
3777 else
3778 flash_op = FLASHROM_OPER_FLASH;
3779 } else {
3780 if (optype == OPTYPE_PHY_FW)
3781 flash_op = FLASHROM_OPER_PHY_SAVE;
3782 else
3783 flash_op = FLASHROM_OPER_SAVE;
3784 }
3785
be716446 3786 memcpy(req->data_buf, img, num_bytes);
773a2d7c
PR
3787 img += num_bytes;
3788 status = be_cmd_write_flashrom(adapter, flash_cmd, optype,
748b539a 3789 flash_op, num_bytes);
4c60005f 3790 if (base_status(status) == MCC_STATUS_ILLEGAL_REQUEST &&
96c9b2e4
VV
3791 optype == OPTYPE_PHY_FW)
3792 break;
3793 else if (status)
773a2d7c 3794 return status;
773a2d7c
PR
3795 }
3796 return 0;
3797}
3798
0ad3157e 3799/* For BE2, BE3 and BE3-R */
ca34fe38 3800static int be_flash_BEx(struct be_adapter *adapter,
748b539a
SP
3801 const struct firmware *fw,
3802 struct be_dma_mem *flash_cmd, int num_of_images)
84517482 3803{
c165541e 3804 int img_hdrs_size = (num_of_images * sizeof(struct image_hdr));
96c9b2e4 3805 struct device *dev = &adapter->pdev->dev;
c165541e 3806 struct flash_section_info *fsec = NULL;
96c9b2e4
VV
3807 int status, i, filehdr_size, num_comp;
3808 const struct flash_comp *pflashcomp;
3809 bool crc_match;
3810 const u8 *p;
c165541e
PR
3811
3812 struct flash_comp gen3_flash_types[] = {
3813 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, OPTYPE_ISCSI_ACTIVE,
3814 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_iSCSI},
3815 { FLASH_REDBOOT_START_g3, OPTYPE_REDBOOT,
3816 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3, IMAGE_BOOT_CODE},
3817 { FLASH_iSCSI_BIOS_START_g3, OPTYPE_BIOS,
3818 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_ISCSI},
3819 { FLASH_PXE_BIOS_START_g3, OPTYPE_PXE_BIOS,
3820 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_PXE},
3821 { FLASH_FCoE_BIOS_START_g3, OPTYPE_FCOE_BIOS,
3822 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_FCoE},
3823 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, OPTYPE_ISCSI_BACKUP,
3824 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_iSCSI},
3825 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, OPTYPE_FCOE_FW_ACTIVE,
3826 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_FCoE},
3827 { FLASH_FCoE_BACKUP_IMAGE_START_g3, OPTYPE_FCOE_FW_BACKUP,
3828 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_FCoE},
3829 { FLASH_NCSI_START_g3, OPTYPE_NCSI_FW,
3830 FLASH_NCSI_IMAGE_MAX_SIZE_g3, IMAGE_NCSI},
3831 { FLASH_PHY_FW_START_g3, OPTYPE_PHY_FW,
3832 FLASH_PHY_FW_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_PHY}
3f0d4560 3833 };
c165541e
PR
3834
3835 struct flash_comp gen2_flash_types[] = {
3836 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, OPTYPE_ISCSI_ACTIVE,
3837 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_iSCSI},
3838 { FLASH_REDBOOT_START_g2, OPTYPE_REDBOOT,
3839 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2, IMAGE_BOOT_CODE},
3840 { FLASH_iSCSI_BIOS_START_g2, OPTYPE_BIOS,
3841 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_ISCSI},
3842 { FLASH_PXE_BIOS_START_g2, OPTYPE_PXE_BIOS,
3843 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_PXE},
3844 { FLASH_FCoE_BIOS_START_g2, OPTYPE_FCOE_BIOS,
3845 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_FCoE},
3846 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, OPTYPE_ISCSI_BACKUP,
3847 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_iSCSI},
3848 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, OPTYPE_FCOE_FW_ACTIVE,
3849 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_FCoE},
3850 { FLASH_FCoE_BACKUP_IMAGE_START_g2, OPTYPE_FCOE_FW_BACKUP,
3851 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_FCoE}
3f0d4560
AK
3852 };
3853
ca34fe38 3854 if (BE3_chip(adapter)) {
3f0d4560
AK
3855 pflashcomp = gen3_flash_types;
3856 filehdr_size = sizeof(struct flash_file_hdr_g3);
215faf9c 3857 num_comp = ARRAY_SIZE(gen3_flash_types);
3f0d4560
AK
3858 } else {
3859 pflashcomp = gen2_flash_types;
3860 filehdr_size = sizeof(struct flash_file_hdr_g2);
215faf9c 3861 num_comp = ARRAY_SIZE(gen2_flash_types);
84517482 3862 }
ca34fe38 3863
c165541e
PR
3864 /* Get flash section info*/
3865 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3866 if (!fsec) {
96c9b2e4 3867 dev_err(dev, "Invalid Cookie. FW image may be corrupted\n");
c165541e
PR
3868 return -1;
3869 }
9fe96934 3870 for (i = 0; i < num_comp; i++) {
c165541e 3871 if (!is_comp_in_ufi(adapter, fsec, pflashcomp[i].img_type))
9fe96934 3872 continue;
c165541e
PR
3873
3874 if ((pflashcomp[i].optype == OPTYPE_NCSI_FW) &&
3875 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
3876 continue;
3877
773a2d7c
PR
3878 if (pflashcomp[i].optype == OPTYPE_PHY_FW &&
3879 !phy_flashing_required(adapter))
306f1348 3880 continue;
c165541e 3881
773a2d7c 3882 if (pflashcomp[i].optype == OPTYPE_REDBOOT) {
96c9b2e4
VV
3883 status = be_check_flash_crc(adapter, fw->data,
3884 pflashcomp[i].offset,
3885 pflashcomp[i].size,
3886 filehdr_size +
3887 img_hdrs_size,
3888 OPTYPE_REDBOOT, &crc_match);
3889 if (status) {
3890 dev_err(dev,
3891 "Could not get CRC for 0x%x region\n",
3892 pflashcomp[i].optype);
3893 continue;
3894 }
3895
3896 if (crc_match)
773a2d7c
PR
3897 continue;
3898 }
c165541e 3899
96c9b2e4
VV
3900 p = fw->data + filehdr_size + pflashcomp[i].offset +
3901 img_hdrs_size;
306f1348
SP
3902 if (p + pflashcomp[i].size > fw->data + fw->size)
3903 return -1;
773a2d7c
PR
3904
3905 status = be_flash(adapter, p, flash_cmd, pflashcomp[i].optype,
748b539a 3906 pflashcomp[i].size);
773a2d7c 3907 if (status) {
96c9b2e4 3908 dev_err(dev, "Flashing section type 0x%x failed\n",
773a2d7c
PR
3909 pflashcomp[i].img_type);
3910 return status;
84517482 3911 }
84517482 3912 }
84517482
AK
3913 return 0;
3914}
3915
96c9b2e4
VV
3916static u16 be_get_img_optype(struct flash_section_entry fsec_entry)
3917{
3918 u32 img_type = le32_to_cpu(fsec_entry.type);
3919 u16 img_optype = le16_to_cpu(fsec_entry.optype);
3920
3921 if (img_optype != 0xFFFF)
3922 return img_optype;
3923
3924 switch (img_type) {
3925 case IMAGE_FIRMWARE_iSCSI:
3926 img_optype = OPTYPE_ISCSI_ACTIVE;
3927 break;
3928 case IMAGE_BOOT_CODE:
3929 img_optype = OPTYPE_REDBOOT;
3930 break;
3931 case IMAGE_OPTION_ROM_ISCSI:
3932 img_optype = OPTYPE_BIOS;
3933 break;
3934 case IMAGE_OPTION_ROM_PXE:
3935 img_optype = OPTYPE_PXE_BIOS;
3936 break;
3937 case IMAGE_OPTION_ROM_FCoE:
3938 img_optype = OPTYPE_FCOE_BIOS;
3939 break;
3940 case IMAGE_FIRMWARE_BACKUP_iSCSI:
3941 img_optype = OPTYPE_ISCSI_BACKUP;
3942 break;
3943 case IMAGE_NCSI:
3944 img_optype = OPTYPE_NCSI_FW;
3945 break;
3946 case IMAGE_FLASHISM_JUMPVECTOR:
3947 img_optype = OPTYPE_FLASHISM_JUMPVECTOR;
3948 break;
3949 case IMAGE_FIRMWARE_PHY:
3950 img_optype = OPTYPE_SH_PHY_FW;
3951 break;
3952 case IMAGE_REDBOOT_DIR:
3953 img_optype = OPTYPE_REDBOOT_DIR;
3954 break;
3955 case IMAGE_REDBOOT_CONFIG:
3956 img_optype = OPTYPE_REDBOOT_CONFIG;
3957 break;
3958 case IMAGE_UFI_DIR:
3959 img_optype = OPTYPE_UFI_DIR;
3960 break;
3961 default:
3962 break;
3963 }
3964
3965 return img_optype;
3966}
3967
773a2d7c 3968static int be_flash_skyhawk(struct be_adapter *adapter,
748b539a
SP
3969 const struct firmware *fw,
3970 struct be_dma_mem *flash_cmd, int num_of_images)
3f0d4560 3971{
773a2d7c 3972 int img_hdrs_size = num_of_images * sizeof(struct image_hdr);
96c9b2e4 3973 struct device *dev = &adapter->pdev->dev;
773a2d7c 3974 struct flash_section_info *fsec = NULL;
96c9b2e4
VV
3975 u32 img_offset, img_size, img_type;
3976 int status, i, filehdr_size;
3977 bool crc_match, old_fw_img;
3978 u16 img_optype;
3979 const u8 *p;
773a2d7c
PR
3980
3981 filehdr_size = sizeof(struct flash_file_hdr_g3);
3982 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3983 if (!fsec) {
96c9b2e4 3984 dev_err(dev, "Invalid Cookie. FW image may be corrupted\n");
56ace3a0 3985 return -EINVAL;
773a2d7c
PR
3986 }
3987
3988 for (i = 0; i < le32_to_cpu(fsec->fsec_hdr.num_images); i++) {
3989 img_offset = le32_to_cpu(fsec->fsec_entry[i].offset);
3990 img_size = le32_to_cpu(fsec->fsec_entry[i].pad_size);
96c9b2e4
VV
3991 img_type = le32_to_cpu(fsec->fsec_entry[i].type);
3992 img_optype = be_get_img_optype(fsec->fsec_entry[i]);
3993 old_fw_img = fsec->fsec_entry[i].optype == 0xFFFF;
773a2d7c 3994
96c9b2e4 3995 if (img_optype == 0xFFFF)
773a2d7c 3996 continue;
96c9b2e4
VV
3997 /* Don't bother verifying CRC if an old FW image is being
3998 * flashed
3999 */
4000 if (old_fw_img)
4001 goto flash;
4002
4003 status = be_check_flash_crc(adapter, fw->data, img_offset,
4004 img_size, filehdr_size +
4005 img_hdrs_size, img_optype,
4006 &crc_match);
4007 /* The current FW image on the card does not recognize the new
4008 * FLASH op_type. The FW download is partially complete.
4009 * Reboot the server now to enable FW image to recognize the
4010 * new FLASH op_type. To complete the remaining process,
4011 * download the same FW again after the reboot.
4012 */
4c60005f
KA
4013 if (base_status(status) == MCC_STATUS_ILLEGAL_REQUEST ||
4014 base_status(status) == MCC_STATUS_ILLEGAL_FIELD) {
96c9b2e4
VV
4015 dev_err(dev, "Flash incomplete. Reset the server\n");
4016 dev_err(dev, "Download FW image again after reset\n");
4017 return -EAGAIN;
4018 } else if (status) {
4019 dev_err(dev, "Could not get CRC for 0x%x region\n",
4020 img_optype);
4021 return -EFAULT;
773a2d7c
PR
4022 }
4023
96c9b2e4
VV
4024 if (crc_match)
4025 continue;
773a2d7c 4026
96c9b2e4
VV
4027flash:
4028 p = fw->data + filehdr_size + img_offset + img_hdrs_size;
773a2d7c
PR
4029 if (p + img_size > fw->data + fw->size)
4030 return -1;
4031
4032 status = be_flash(adapter, p, flash_cmd, img_optype, img_size);
96c9b2e4
VV
4033 /* For old FW images ignore ILLEGAL_FIELD error or errors on
4034 * UFI_DIR region
4035 */
4c60005f
KA
4036 if (old_fw_img &&
4037 (base_status(status) == MCC_STATUS_ILLEGAL_FIELD ||
4038 (img_optype == OPTYPE_UFI_DIR &&
4039 base_status(status) == MCC_STATUS_FAILED))) {
96c9b2e4
VV
4040 continue;
4041 } else if (status) {
4042 dev_err(dev, "Flashing section type 0x%x failed\n",
4043 img_type);
4044 return -EFAULT;
773a2d7c
PR
4045 }
4046 }
4047 return 0;
3f0d4560
AK
4048}
4049
485bf569 4050static int lancer_fw_download(struct be_adapter *adapter,
748b539a 4051 const struct firmware *fw)
84517482 4052{
485bf569
SN
4053#define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024)
4054#define LANCER_FW_DOWNLOAD_LOCATION "/prg"
84517482 4055 struct be_dma_mem flash_cmd;
485bf569
SN
4056 const u8 *data_ptr = NULL;
4057 u8 *dest_image_ptr = NULL;
4058 size_t image_size = 0;
4059 u32 chunk_size = 0;
4060 u32 data_written = 0;
4061 u32 offset = 0;
4062 int status = 0;
4063 u8 add_status = 0;
f67ef7ba 4064 u8 change_status;
84517482 4065
485bf569 4066 if (!IS_ALIGNED(fw->size, sizeof(u32))) {
d9efd2af 4067 dev_err(&adapter->pdev->dev,
485bf569
SN
4068 "FW Image not properly aligned. "
4069 "Length must be 4 byte aligned.\n");
4070 status = -EINVAL;
4071 goto lancer_fw_exit;
d9efd2af
SB
4072 }
4073
485bf569
SN
4074 flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
4075 + LANCER_FW_DOWNLOAD_CHUNK;
4076 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
d0320f75 4077 &flash_cmd.dma, GFP_KERNEL);
485bf569
SN
4078 if (!flash_cmd.va) {
4079 status = -ENOMEM;
485bf569
SN
4080 goto lancer_fw_exit;
4081 }
84517482 4082
485bf569
SN
4083 dest_image_ptr = flash_cmd.va +
4084 sizeof(struct lancer_cmd_req_write_object);
4085 image_size = fw->size;
4086 data_ptr = fw->data;
4087
4088 while (image_size) {
4089 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
4090
4091 /* Copy the image chunk content. */
4092 memcpy(dest_image_ptr, data_ptr, chunk_size);
4093
4094 status = lancer_cmd_write_object(adapter, &flash_cmd,
f67ef7ba
PR
4095 chunk_size, offset,
4096 LANCER_FW_DOWNLOAD_LOCATION,
4097 &data_written, &change_status,
4098 &add_status);
485bf569
SN
4099 if (status)
4100 break;
4101
4102 offset += data_written;
4103 data_ptr += data_written;
4104 image_size -= data_written;
4105 }
4106
4107 if (!status) {
4108 /* Commit the FW written */
4109 status = lancer_cmd_write_object(adapter, &flash_cmd,
f67ef7ba
PR
4110 0, offset,
4111 LANCER_FW_DOWNLOAD_LOCATION,
4112 &data_written, &change_status,
4113 &add_status);
485bf569
SN
4114 }
4115
4116 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
748b539a 4117 flash_cmd.dma);
485bf569
SN
4118 if (status) {
4119 dev_err(&adapter->pdev->dev,
4120 "Firmware load error. "
4121 "Status code: 0x%x Additional Status: 0x%x\n",
4122 status, add_status);
4123 goto lancer_fw_exit;
4124 }
4125
f67ef7ba 4126 if (change_status == LANCER_FW_RESET_NEEDED) {
4bebb56a
SK
4127 dev_info(&adapter->pdev->dev,
4128 "Resetting adapter to activate new FW\n");
5c510811
SK
4129 status = lancer_physdev_ctrl(adapter,
4130 PHYSDEV_CONTROL_FW_RESET_MASK);
f67ef7ba
PR
4131 if (status) {
4132 dev_err(&adapter->pdev->dev,
4133 "Adapter busy for FW reset.\n"
4134 "New FW will not be active.\n");
4135 goto lancer_fw_exit;
4136 }
4137 } else if (change_status != LANCER_NO_RESET_NEEDED) {
748b539a
SP
4138 dev_err(&adapter->pdev->dev,
4139 "System reboot required for new FW to be active\n");
f67ef7ba
PR
4140 }
4141
485bf569
SN
4142 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
4143lancer_fw_exit:
4144 return status;
4145}
4146
ca34fe38
SP
4147#define UFI_TYPE2 2
4148#define UFI_TYPE3 3
0ad3157e 4149#define UFI_TYPE3R 10
ca34fe38
SP
4150#define UFI_TYPE4 4
4151static int be_get_ufi_type(struct be_adapter *adapter,
0ad3157e 4152 struct flash_file_hdr_g3 *fhdr)
773a2d7c 4153{
ddf1169f 4154 if (!fhdr)
773a2d7c
PR
4155 goto be_get_ufi_exit;
4156
ca34fe38
SP
4157 if (skyhawk_chip(adapter) && fhdr->build[0] == '4')
4158 return UFI_TYPE4;
0ad3157e
VV
4159 else if (BE3_chip(adapter) && fhdr->build[0] == '3') {
4160 if (fhdr->asic_type_rev == 0x10)
4161 return UFI_TYPE3R;
4162 else
4163 return UFI_TYPE3;
4164 } else if (BE2_chip(adapter) && fhdr->build[0] == '2')
ca34fe38 4165 return UFI_TYPE2;
773a2d7c
PR
4166
4167be_get_ufi_exit:
4168 dev_err(&adapter->pdev->dev,
4169 "UFI and Interface are not compatible for flashing\n");
4170 return -1;
4171}
4172
485bf569
SN
4173static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
4174{
485bf569
SN
4175 struct flash_file_hdr_g3 *fhdr3;
4176 struct image_hdr *img_hdr_ptr = NULL;
4177 struct be_dma_mem flash_cmd;
4178 const u8 *p;
773a2d7c 4179 int status = 0, i = 0, num_imgs = 0, ufi_type = 0;
84517482 4180
be716446 4181 flash_cmd.size = sizeof(struct be_cmd_write_flashrom);
2b7bcebf
IV
4182 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
4183 &flash_cmd.dma, GFP_KERNEL);
84517482
AK
4184 if (!flash_cmd.va) {
4185 status = -ENOMEM;
485bf569 4186 goto be_fw_exit;
84517482
AK
4187 }
4188
773a2d7c 4189 p = fw->data;
0ad3157e 4190 fhdr3 = (struct flash_file_hdr_g3 *)p;
773a2d7c 4191
0ad3157e 4192 ufi_type = be_get_ufi_type(adapter, fhdr3);
773a2d7c 4193
773a2d7c
PR
4194 num_imgs = le32_to_cpu(fhdr3->num_imgs);
4195 for (i = 0; i < num_imgs; i++) {
4196 img_hdr_ptr = (struct image_hdr *)(fw->data +
4197 (sizeof(struct flash_file_hdr_g3) +
4198 i * sizeof(struct image_hdr)));
4199 if (le32_to_cpu(img_hdr_ptr->imageid) == 1) {
0ad3157e
VV
4200 switch (ufi_type) {
4201 case UFI_TYPE4:
773a2d7c 4202 status = be_flash_skyhawk(adapter, fw,
748b539a 4203 &flash_cmd, num_imgs);
0ad3157e
VV
4204 break;
4205 case UFI_TYPE3R:
ca34fe38
SP
4206 status = be_flash_BEx(adapter, fw, &flash_cmd,
4207 num_imgs);
0ad3157e
VV
4208 break;
4209 case UFI_TYPE3:
4210 /* Do not flash this ufi on BE3-R cards */
4211 if (adapter->asic_rev < 0x10)
4212 status = be_flash_BEx(adapter, fw,
4213 &flash_cmd,
4214 num_imgs);
4215 else {
56ace3a0 4216 status = -EINVAL;
0ad3157e
VV
4217 dev_err(&adapter->pdev->dev,
4218 "Can't load BE3 UFI on BE3R\n");
4219 }
4220 }
3f0d4560 4221 }
773a2d7c
PR
4222 }
4223
ca34fe38
SP
4224 if (ufi_type == UFI_TYPE2)
4225 status = be_flash_BEx(adapter, fw, &flash_cmd, 0);
773a2d7c 4226 else if (ufi_type == -1)
56ace3a0 4227 status = -EINVAL;
84517482 4228
2b7bcebf
IV
4229 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
4230 flash_cmd.dma);
84517482
AK
4231 if (status) {
4232 dev_err(&adapter->pdev->dev, "Firmware load error\n");
485bf569 4233 goto be_fw_exit;
84517482
AK
4234 }
4235
af901ca1 4236 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
84517482 4237
485bf569
SN
4238be_fw_exit:
4239 return status;
4240}
4241
4242int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
4243{
4244 const struct firmware *fw;
4245 int status;
4246
4247 if (!netif_running(adapter->netdev)) {
4248 dev_err(&adapter->pdev->dev,
4249 "Firmware load not allowed (interface is down)\n");
940a3fcd 4250 return -ENETDOWN;
485bf569
SN
4251 }
4252
4253 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
4254 if (status)
4255 goto fw_exit;
4256
4257 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
4258
4259 if (lancer_chip(adapter))
4260 status = lancer_fw_download(adapter, fw);
4261 else
4262 status = be_fw_download(adapter, fw);
4263
eeb65ced 4264 if (!status)
e97e3cda 4265 be_cmd_get_fw_ver(adapter);
eeb65ced 4266
84517482
AK
4267fw_exit:
4268 release_firmware(fw);
4269 return status;
4270}
4271
748b539a 4272static int be_ndo_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh)
a77dcb8c
AK
4273{
4274 struct be_adapter *adapter = netdev_priv(dev);
4275 struct nlattr *attr, *br_spec;
4276 int rem;
4277 int status = 0;
4278 u16 mode = 0;
4279
4280 if (!sriov_enabled(adapter))
4281 return -EOPNOTSUPP;
4282
4283 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
4284
4285 nla_for_each_nested(attr, br_spec, rem) {
4286 if (nla_type(attr) != IFLA_BRIDGE_MODE)
4287 continue;
4288
4289 mode = nla_get_u16(attr);
4290 if (mode != BRIDGE_MODE_VEPA && mode != BRIDGE_MODE_VEB)
4291 return -EINVAL;
4292
4293 status = be_cmd_set_hsw_config(adapter, 0, 0,
4294 adapter->if_handle,
4295 mode == BRIDGE_MODE_VEPA ?
4296 PORT_FWD_TYPE_VEPA :
4297 PORT_FWD_TYPE_VEB);
4298 if (status)
4299 goto err;
4300
4301 dev_info(&adapter->pdev->dev, "enabled switch mode: %s\n",
4302 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
4303
4304 return status;
4305 }
4306err:
4307 dev_err(&adapter->pdev->dev, "Failed to set switch mode %s\n",
4308 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
4309
4310 return status;
4311}
4312
4313static int be_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
748b539a 4314 struct net_device *dev, u32 filter_mask)
a77dcb8c
AK
4315{
4316 struct be_adapter *adapter = netdev_priv(dev);
4317 int status = 0;
4318 u8 hsw_mode;
4319
4320 if (!sriov_enabled(adapter))
4321 return 0;
4322
4323 /* BE and Lancer chips support VEB mode only */
4324 if (BEx_chip(adapter) || lancer_chip(adapter)) {
4325 hsw_mode = PORT_FWD_TYPE_VEB;
4326 } else {
4327 status = be_cmd_get_hsw_config(adapter, NULL, 0,
4328 adapter->if_handle, &hsw_mode);
4329 if (status)
4330 return 0;
4331 }
4332
4333 return ndo_dflt_bridge_getlink(skb, pid, seq, dev,
4334 hsw_mode == PORT_FWD_TYPE_VEPA ?
4335 BRIDGE_MODE_VEPA : BRIDGE_MODE_VEB);
4336}
4337
c5abe7c0 4338#ifdef CONFIG_BE2NET_VXLAN
c9c47142
SP
4339static void be_add_vxlan_port(struct net_device *netdev, sa_family_t sa_family,
4340 __be16 port)
4341{
4342 struct be_adapter *adapter = netdev_priv(netdev);
4343 struct device *dev = &adapter->pdev->dev;
4344 int status;
4345
4346 if (lancer_chip(adapter) || BEx_chip(adapter))
4347 return;
4348
4349 if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS) {
4350 dev_warn(dev, "Cannot add UDP port %d for VxLAN offloads\n",
4351 be16_to_cpu(port));
4352 dev_info(dev,
4353 "Only one UDP port supported for VxLAN offloads\n");
4354 return;
4355 }
4356
4357 status = be_cmd_manage_iface(adapter, adapter->if_handle,
4358 OP_CONVERT_NORMAL_TO_TUNNEL);
4359 if (status) {
4360 dev_warn(dev, "Failed to convert normal interface to tunnel\n");
4361 goto err;
4362 }
4363
4364 status = be_cmd_set_vxlan_port(adapter, port);
4365 if (status) {
4366 dev_warn(dev, "Failed to add VxLAN port\n");
4367 goto err;
4368 }
4369 adapter->flags |= BE_FLAGS_VXLAN_OFFLOADS;
4370 adapter->vxlan_port = port;
4371
4372 dev_info(dev, "Enabled VxLAN offloads for UDP port %d\n",
4373 be16_to_cpu(port));
4374 return;
4375err:
4376 be_disable_vxlan_offloads(adapter);
4377 return;
4378}
4379
4380static void be_del_vxlan_port(struct net_device *netdev, sa_family_t sa_family,
4381 __be16 port)
4382{
4383 struct be_adapter *adapter = netdev_priv(netdev);
4384
4385 if (lancer_chip(adapter) || BEx_chip(adapter))
4386 return;
4387
4388 if (adapter->vxlan_port != port)
4389 return;
4390
4391 be_disable_vxlan_offloads(adapter);
4392
4393 dev_info(&adapter->pdev->dev,
4394 "Disabled VxLAN offloads for UDP port %d\n",
4395 be16_to_cpu(port));
4396}
c5abe7c0 4397#endif
c9c47142 4398
e5686ad8 4399static const struct net_device_ops be_netdev_ops = {
6b7c5b94
SP
4400 .ndo_open = be_open,
4401 .ndo_stop = be_close,
4402 .ndo_start_xmit = be_xmit,
a54769f5 4403 .ndo_set_rx_mode = be_set_rx_mode,
6b7c5b94
SP
4404 .ndo_set_mac_address = be_mac_addr_set,
4405 .ndo_change_mtu = be_change_mtu,
ab1594e9 4406 .ndo_get_stats64 = be_get_stats64,
6b7c5b94 4407 .ndo_validate_addr = eth_validate_addr,
6b7c5b94
SP
4408 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
4409 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
64600ea5 4410 .ndo_set_vf_mac = be_set_vf_mac,
1da87b7f 4411 .ndo_set_vf_vlan = be_set_vf_vlan,
ed616689 4412 .ndo_set_vf_rate = be_set_vf_tx_rate,
66268739 4413 .ndo_get_vf_config = be_get_vf_config,
bdce2ad7 4414 .ndo_set_vf_link_state = be_set_vf_link_state,
66268739
IV
4415#ifdef CONFIG_NET_POLL_CONTROLLER
4416 .ndo_poll_controller = be_netpoll,
4417#endif
a77dcb8c
AK
4418 .ndo_bridge_setlink = be_ndo_bridge_setlink,
4419 .ndo_bridge_getlink = be_ndo_bridge_getlink,
6384a4d0 4420#ifdef CONFIG_NET_RX_BUSY_POLL
c9c47142 4421 .ndo_busy_poll = be_busy_poll,
6384a4d0 4422#endif
c5abe7c0 4423#ifdef CONFIG_BE2NET_VXLAN
c9c47142
SP
4424 .ndo_add_vxlan_port = be_add_vxlan_port,
4425 .ndo_del_vxlan_port = be_del_vxlan_port,
c5abe7c0 4426#endif
6b7c5b94
SP
4427};
4428
4429static void be_netdev_init(struct net_device *netdev)
4430{
4431 struct be_adapter *adapter = netdev_priv(netdev);
4432
c9c47142
SP
4433 if (skyhawk_chip(adapter)) {
4434 netdev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
4435 NETIF_F_TSO | NETIF_F_TSO6 |
4436 NETIF_F_GSO_UDP_TUNNEL;
4437 netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL;
4438 }
6332c8d3 4439 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
8b8ddc68 4440 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
f646968f 4441 NETIF_F_HW_VLAN_CTAG_TX;
8b8ddc68
MM
4442 if (be_multi_rxq(adapter))
4443 netdev->hw_features |= NETIF_F_RXHASH;
6332c8d3
MM
4444
4445 netdev->features |= netdev->hw_features |
f646968f 4446 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER;
4b972914 4447
eb8a50d9 4448 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
79032644 4449 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
51c59870 4450
fbc13f01
AK
4451 netdev->priv_flags |= IFF_UNICAST_FLT;
4452
6b7c5b94
SP
4453 netdev->flags |= IFF_MULTICAST;
4454
b7e5887e 4455 netif_set_gso_max_size(netdev, 65535 - ETH_HLEN);
c190e3c8 4456
10ef9ab4 4457 netdev->netdev_ops = &be_netdev_ops;
6b7c5b94 4458
7ad24ea4 4459 netdev->ethtool_ops = &be_ethtool_ops;
6b7c5b94
SP
4460}
4461
4462static void be_unmap_pci_bars(struct be_adapter *adapter)
4463{
c5b3ad4c
SP
4464 if (adapter->csr)
4465 pci_iounmap(adapter->pdev, adapter->csr);
8788fdc2 4466 if (adapter->db)
ce66f781 4467 pci_iounmap(adapter->pdev, adapter->db);
045508a8
PP
4468}
4469
ce66f781
SP
4470static int db_bar(struct be_adapter *adapter)
4471{
4472 if (lancer_chip(adapter) || !be_physfn(adapter))
4473 return 0;
4474 else
4475 return 4;
4476}
4477
4478static int be_roce_map_pci_bars(struct be_adapter *adapter)
045508a8 4479{
dbf0f2a7 4480 if (skyhawk_chip(adapter)) {
ce66f781
SP
4481 adapter->roce_db.size = 4096;
4482 adapter->roce_db.io_addr = pci_resource_start(adapter->pdev,
4483 db_bar(adapter));
4484 adapter->roce_db.total_size = pci_resource_len(adapter->pdev,
4485 db_bar(adapter));
4486 }
045508a8 4487 return 0;
6b7c5b94
SP
4488}
4489
4490static int be_map_pci_bars(struct be_adapter *adapter)
4491{
4492 u8 __iomem *addr;
fe6d2a38 4493
c5b3ad4c
SP
4494 if (BEx_chip(adapter) && be_physfn(adapter)) {
4495 adapter->csr = pci_iomap(adapter->pdev, 2, 0);
ddf1169f 4496 if (!adapter->csr)
c5b3ad4c
SP
4497 return -ENOMEM;
4498 }
4499
ce66f781 4500 addr = pci_iomap(adapter->pdev, db_bar(adapter), 0);
ddf1169f 4501 if (!addr)
6b7c5b94 4502 goto pci_map_err;
ba343c77 4503 adapter->db = addr;
ce66f781
SP
4504
4505 be_roce_map_pci_bars(adapter);
6b7c5b94 4506 return 0;
ce66f781 4507
6b7c5b94
SP
4508pci_map_err:
4509 be_unmap_pci_bars(adapter);
4510 return -ENOMEM;
4511}
4512
6b7c5b94
SP
4513static void be_ctrl_cleanup(struct be_adapter *adapter)
4514{
8788fdc2 4515 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
6b7c5b94
SP
4516
4517 be_unmap_pci_bars(adapter);
4518
4519 if (mem->va)
2b7bcebf
IV
4520 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
4521 mem->dma);
e7b909a6 4522
5b8821b7 4523 mem = &adapter->rx_filter;
e7b909a6 4524 if (mem->va)
2b7bcebf
IV
4525 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
4526 mem->dma);
6b7c5b94
SP
4527}
4528
6b7c5b94
SP
4529static int be_ctrl_init(struct be_adapter *adapter)
4530{
8788fdc2
SP
4531 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
4532 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
5b8821b7 4533 struct be_dma_mem *rx_filter = &adapter->rx_filter;
ce66f781 4534 u32 sli_intf;
6b7c5b94 4535 int status;
6b7c5b94 4536
ce66f781
SP
4537 pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
4538 adapter->sli_family = (sli_intf & SLI_INTF_FAMILY_MASK) >>
4539 SLI_INTF_FAMILY_SHIFT;
4540 adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
4541
6b7c5b94
SP
4542 status = be_map_pci_bars(adapter);
4543 if (status)
e7b909a6 4544 goto done;
6b7c5b94
SP
4545
4546 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
2b7bcebf
IV
4547 mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
4548 mbox_mem_alloc->size,
4549 &mbox_mem_alloc->dma,
4550 GFP_KERNEL);
6b7c5b94 4551 if (!mbox_mem_alloc->va) {
e7b909a6
SP
4552 status = -ENOMEM;
4553 goto unmap_pci_bars;
6b7c5b94
SP
4554 }
4555 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
4556 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
4557 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
4558 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
e7b909a6 4559
5b8821b7 4560 rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
ede23fa8
JP
4561 rx_filter->va = dma_zalloc_coherent(&adapter->pdev->dev,
4562 rx_filter->size, &rx_filter->dma,
4563 GFP_KERNEL);
ddf1169f 4564 if (!rx_filter->va) {
e7b909a6
SP
4565 status = -ENOMEM;
4566 goto free_mbox;
4567 }
1f9061d2 4568
2984961c 4569 mutex_init(&adapter->mbox_lock);
8788fdc2
SP
4570 spin_lock_init(&adapter->mcc_lock);
4571 spin_lock_init(&adapter->mcc_cq_lock);
a8f447bd 4572
5eeff635 4573 init_completion(&adapter->et_cmd_compl);
cf588477 4574 pci_save_state(adapter->pdev);
6b7c5b94 4575 return 0;
e7b909a6
SP
4576
4577free_mbox:
2b7bcebf
IV
4578 dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
4579 mbox_mem_alloc->va, mbox_mem_alloc->dma);
e7b909a6
SP
4580
4581unmap_pci_bars:
4582 be_unmap_pci_bars(adapter);
4583
4584done:
4585 return status;
6b7c5b94
SP
4586}
4587
4588static void be_stats_cleanup(struct be_adapter *adapter)
4589{
3abcdeda 4590 struct be_dma_mem *cmd = &adapter->stats_cmd;
6b7c5b94
SP
4591
4592 if (cmd->va)
2b7bcebf
IV
4593 dma_free_coherent(&adapter->pdev->dev, cmd->size,
4594 cmd->va, cmd->dma);
6b7c5b94
SP
4595}
4596
4597static int be_stats_init(struct be_adapter *adapter)
4598{
3abcdeda 4599 struct be_dma_mem *cmd = &adapter->stats_cmd;
6b7c5b94 4600
ca34fe38
SP
4601 if (lancer_chip(adapter))
4602 cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
4603 else if (BE2_chip(adapter))
89a88ab8 4604 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
61000861 4605 else if (BE3_chip(adapter))
ca34fe38 4606 cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
61000861
AK
4607 else
4608 /* ALL non-BE ASICs */
4609 cmd->size = sizeof(struct be_cmd_req_get_stats_v2);
ca34fe38 4610
ede23fa8
JP
4611 cmd->va = dma_zalloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
4612 GFP_KERNEL);
ddf1169f 4613 if (!cmd->va)
6b568689 4614 return -ENOMEM;
6b7c5b94
SP
4615 return 0;
4616}
4617
3bc6b06c 4618static void be_remove(struct pci_dev *pdev)
6b7c5b94
SP
4619{
4620 struct be_adapter *adapter = pci_get_drvdata(pdev);
8d56ff11 4621
6b7c5b94
SP
4622 if (!adapter)
4623 return;
4624
045508a8 4625 be_roce_dev_remove(adapter);
8cef7a78 4626 be_intr_set(adapter, false);
045508a8 4627
f67ef7ba
PR
4628 cancel_delayed_work_sync(&adapter->func_recovery_work);
4629
6b7c5b94
SP
4630 unregister_netdev(adapter->netdev);
4631
5fb379ee
SP
4632 be_clear(adapter);
4633
bf99e50d
PR
4634 /* tell fw we're done with firing cmds */
4635 be_cmd_fw_clean(adapter);
4636
6b7c5b94
SP
4637 be_stats_cleanup(adapter);
4638
4639 be_ctrl_cleanup(adapter);
4640
d6b6d987
SP
4641 pci_disable_pcie_error_reporting(pdev);
4642
6b7c5b94
SP
4643 pci_release_regions(pdev);
4644 pci_disable_device(pdev);
4645
4646 free_netdev(adapter->netdev);
4647}
4648
39f1d94d 4649static int be_get_initial_config(struct be_adapter *adapter)
6b7c5b94 4650{
baaa08d1 4651 int status, level;
6b7c5b94 4652
9e1453c5
AK
4653 status = be_cmd_get_cntl_attributes(adapter);
4654 if (status)
4655 return status;
4656
7aeb2156
PR
4657 /* Must be a power of 2 or else MODULO will BUG_ON */
4658 adapter->be_get_temp_freq = 64;
4659
baaa08d1
VV
4660 if (BEx_chip(adapter)) {
4661 level = be_cmd_get_fw_log_level(adapter);
4662 adapter->msg_enable =
4663 level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0;
4664 }
941a77d5 4665
92bf14ab 4666 adapter->cfg_num_qs = netif_get_num_default_rss_queues();
2243e2e9 4667 return 0;
6b7c5b94
SP
4668}
4669
f67ef7ba 4670static int lancer_recover_func(struct be_adapter *adapter)
d8110f62 4671{
01e5b2c4 4672 struct device *dev = &adapter->pdev->dev;
d8110f62 4673 int status;
d8110f62 4674
f67ef7ba
PR
4675 status = lancer_test_and_set_rdy_state(adapter);
4676 if (status)
4677 goto err;
d8110f62 4678
f67ef7ba
PR
4679 if (netif_running(adapter->netdev))
4680 be_close(adapter->netdev);
d8110f62 4681
f67ef7ba
PR
4682 be_clear(adapter);
4683
01e5b2c4 4684 be_clear_all_error(adapter);
f67ef7ba
PR
4685
4686 status = be_setup(adapter);
4687 if (status)
4688 goto err;
d8110f62 4689
f67ef7ba
PR
4690 if (netif_running(adapter->netdev)) {
4691 status = be_open(adapter->netdev);
d8110f62
PR
4692 if (status)
4693 goto err;
f67ef7ba 4694 }
d8110f62 4695
4bebb56a 4696 dev_err(dev, "Adapter recovery successful\n");
f67ef7ba
PR
4697 return 0;
4698err:
01e5b2c4
SK
4699 if (status == -EAGAIN)
4700 dev_err(dev, "Waiting for resource provisioning\n");
4701 else
4bebb56a 4702 dev_err(dev, "Adapter recovery failed\n");
d8110f62 4703
f67ef7ba
PR
4704 return status;
4705}
4706
4707static void be_func_recovery_task(struct work_struct *work)
4708{
4709 struct be_adapter *adapter =
4710 container_of(work, struct be_adapter, func_recovery_work.work);
01e5b2c4 4711 int status = 0;
d8110f62 4712
f67ef7ba 4713 be_detect_error(adapter);
d8110f62 4714
f67ef7ba 4715 if (adapter->hw_error && lancer_chip(adapter)) {
d8110f62 4716
f67ef7ba
PR
4717 rtnl_lock();
4718 netif_device_detach(adapter->netdev);
4719 rtnl_unlock();
d8110f62 4720
f67ef7ba 4721 status = lancer_recover_func(adapter);
f67ef7ba
PR
4722 if (!status)
4723 netif_device_attach(adapter->netdev);
d8110f62 4724 }
f67ef7ba 4725
01e5b2c4
SK
4726 /* In Lancer, for all errors other than provisioning error (-EAGAIN),
4727 * no need to attempt further recovery.
4728 */
4729 if (!status || status == -EAGAIN)
4730 schedule_delayed_work(&adapter->func_recovery_work,
4731 msecs_to_jiffies(1000));
d8110f62
PR
4732}
4733
4734static void be_worker(struct work_struct *work)
4735{
4736 struct be_adapter *adapter =
4737 container_of(work, struct be_adapter, work.work);
4738 struct be_rx_obj *rxo;
4739 int i;
4740
d8110f62
PR
4741 /* when interrupts are not yet enabled, just reap any pending
4742 * mcc completions */
4743 if (!netif_running(adapter->netdev)) {
072a9c48 4744 local_bh_disable();
10ef9ab4 4745 be_process_mcc(adapter);
072a9c48 4746 local_bh_enable();
d8110f62
PR
4747 goto reschedule;
4748 }
4749
4750 if (!adapter->stats_cmd_sent) {
4751 if (lancer_chip(adapter))
4752 lancer_cmd_get_pport_stats(adapter,
4753 &adapter->stats_cmd);
4754 else
4755 be_cmd_get_stats(adapter, &adapter->stats_cmd);
4756 }
4757
d696b5e2
VV
4758 if (be_physfn(adapter) &&
4759 MODULO(adapter->work_counter, adapter->be_get_temp_freq) == 0)
7aeb2156
PR
4760 be_cmd_get_die_temperature(adapter);
4761
d8110f62 4762 for_all_rx_queues(adapter, rxo, i) {
6384a4d0
SP
4763 /* Replenish RX-queues starved due to memory
4764 * allocation failures.
4765 */
4766 if (rxo->rx_post_starved)
d8110f62 4767 be_post_rx_frags(rxo, GFP_KERNEL);
d8110f62
PR
4768 }
4769
2632bafd 4770 be_eqd_update(adapter);
10ef9ab4 4771
d8110f62
PR
4772reschedule:
4773 adapter->work_counter++;
4774 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
4775}
4776
257a3feb 4777/* If any VFs are already enabled don't FLR the PF */
39f1d94d
SP
4778static bool be_reset_required(struct be_adapter *adapter)
4779{
257a3feb 4780 return pci_num_vf(adapter->pdev) ? false : true;
39f1d94d
SP
4781}
4782
d379142b
SP
4783static char *mc_name(struct be_adapter *adapter)
4784{
f93f160b
VV
4785 char *str = ""; /* default */
4786
4787 switch (adapter->mc_type) {
4788 case UMC:
4789 str = "UMC";
4790 break;
4791 case FLEX10:
4792 str = "FLEX10";
4793 break;
4794 case vNIC1:
4795 str = "vNIC-1";
4796 break;
4797 case nPAR:
4798 str = "nPAR";
4799 break;
4800 case UFP:
4801 str = "UFP";
4802 break;
4803 case vNIC2:
4804 str = "vNIC-2";
4805 break;
4806 default:
4807 str = "";
4808 }
4809
4810 return str;
d379142b
SP
4811}
4812
4813static inline char *func_name(struct be_adapter *adapter)
4814{
4815 return be_physfn(adapter) ? "PF" : "VF";
4816}
4817
1dd06ae8 4818static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id)
6b7c5b94
SP
4819{
4820 int status = 0;
4821 struct be_adapter *adapter;
4822 struct net_device *netdev;
b4e32a71 4823 char port_name;
6b7c5b94
SP
4824
4825 status = pci_enable_device(pdev);
4826 if (status)
4827 goto do_none;
4828
4829 status = pci_request_regions(pdev, DRV_NAME);
4830 if (status)
4831 goto disable_dev;
4832 pci_set_master(pdev);
4833
7f640062 4834 netdev = alloc_etherdev_mqs(sizeof(*adapter), MAX_TX_QS, MAX_RX_QS);
ddf1169f 4835 if (!netdev) {
6b7c5b94
SP
4836 status = -ENOMEM;
4837 goto rel_reg;
4838 }
4839 adapter = netdev_priv(netdev);
4840 adapter->pdev = pdev;
4841 pci_set_drvdata(pdev, adapter);
4842 adapter->netdev = netdev;
2243e2e9 4843 SET_NETDEV_DEV(netdev, &pdev->dev);
6b7c5b94 4844
4c15c243 4845 status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
6b7c5b94
SP
4846 if (!status) {
4847 netdev->features |= NETIF_F_HIGHDMA;
4848 } else {
4c15c243 4849 status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
6b7c5b94
SP
4850 if (status) {
4851 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
4852 goto free_netdev;
4853 }
4854 }
4855
ea58c180
AK
4856 if (be_physfn(adapter)) {
4857 status = pci_enable_pcie_error_reporting(pdev);
4858 if (!status)
4859 dev_info(&pdev->dev, "PCIe error reporting enabled\n");
4860 }
d6b6d987 4861
6b7c5b94
SP
4862 status = be_ctrl_init(adapter);
4863 if (status)
39f1d94d 4864 goto free_netdev;
6b7c5b94 4865
2243e2e9 4866 /* sync up with fw's ready state */
ba343c77 4867 if (be_physfn(adapter)) {
bf99e50d 4868 status = be_fw_wait_ready(adapter);
ba343c77
SB
4869 if (status)
4870 goto ctrl_clean;
ba343c77 4871 }
6b7c5b94 4872
39f1d94d
SP
4873 if (be_reset_required(adapter)) {
4874 status = be_cmd_reset_function(adapter);
4875 if (status)
4876 goto ctrl_clean;
556ae191 4877
2d177be8
KA
4878 /* Wait for interrupts to quiesce after an FLR */
4879 msleep(100);
4880 }
8cef7a78
SK
4881
4882 /* Allow interrupts for other ULPs running on NIC function */
4883 be_intr_set(adapter, true);
10ef9ab4 4884
2d177be8
KA
4885 /* tell fw we're ready to fire cmds */
4886 status = be_cmd_fw_init(adapter);
4887 if (status)
4888 goto ctrl_clean;
4889
2243e2e9
SP
4890 status = be_stats_init(adapter);
4891 if (status)
4892 goto ctrl_clean;
4893
39f1d94d 4894 status = be_get_initial_config(adapter);
6b7c5b94
SP
4895 if (status)
4896 goto stats_clean;
6b7c5b94
SP
4897
4898 INIT_DELAYED_WORK(&adapter->work, be_worker);
f67ef7ba 4899 INIT_DELAYED_WORK(&adapter->func_recovery_work, be_func_recovery_task);
a54769f5 4900 adapter->rx_fc = adapter->tx_fc = true;
6b7c5b94 4901
5fb379ee
SP
4902 status = be_setup(adapter);
4903 if (status)
55f5c3c5 4904 goto stats_clean;
2243e2e9 4905
3abcdeda 4906 be_netdev_init(netdev);
6b7c5b94
SP
4907 status = register_netdev(netdev);
4908 if (status != 0)
5fb379ee 4909 goto unsetup;
6b7c5b94 4910
045508a8
PP
4911 be_roce_dev_add(adapter);
4912
f67ef7ba
PR
4913 schedule_delayed_work(&adapter->func_recovery_work,
4914 msecs_to_jiffies(1000));
b4e32a71
PR
4915
4916 be_cmd_query_port_name(adapter, &port_name);
4917
d379142b
SP
4918 dev_info(&pdev->dev, "%s: %s %s port %c\n", nic_name(pdev),
4919 func_name(adapter), mc_name(adapter), port_name);
34b1ef04 4920
6b7c5b94
SP
4921 return 0;
4922
5fb379ee
SP
4923unsetup:
4924 be_clear(adapter);
6b7c5b94
SP
4925stats_clean:
4926 be_stats_cleanup(adapter);
4927ctrl_clean:
4928 be_ctrl_cleanup(adapter);
f9449ab7 4929free_netdev:
fe6d2a38 4930 free_netdev(netdev);
6b7c5b94
SP
4931rel_reg:
4932 pci_release_regions(pdev);
4933disable_dev:
4934 pci_disable_device(pdev);
4935do_none:
c4ca2374 4936 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
6b7c5b94
SP
4937 return status;
4938}
4939
4940static int be_suspend(struct pci_dev *pdev, pm_message_t state)
4941{
4942 struct be_adapter *adapter = pci_get_drvdata(pdev);
4943 struct net_device *netdev = adapter->netdev;
4944
76a9e08e 4945 if (adapter->wol_en)
71d8d1b5
AK
4946 be_setup_wol(adapter, true);
4947
d4360d6f 4948 be_intr_set(adapter, false);
f67ef7ba
PR
4949 cancel_delayed_work_sync(&adapter->func_recovery_work);
4950
6b7c5b94
SP
4951 netif_device_detach(netdev);
4952 if (netif_running(netdev)) {
4953 rtnl_lock();
4954 be_close(netdev);
4955 rtnl_unlock();
4956 }
9b0365f1 4957 be_clear(adapter);
6b7c5b94
SP
4958
4959 pci_save_state(pdev);
4960 pci_disable_device(pdev);
4961 pci_set_power_state(pdev, pci_choose_state(pdev, state));
4962 return 0;
4963}
4964
4965static int be_resume(struct pci_dev *pdev)
4966{
4967 int status = 0;
4968 struct be_adapter *adapter = pci_get_drvdata(pdev);
4969 struct net_device *netdev = adapter->netdev;
4970
4971 netif_device_detach(netdev);
4972
4973 status = pci_enable_device(pdev);
4974 if (status)
4975 return status;
4976
1ca01512 4977 pci_set_power_state(pdev, PCI_D0);
6b7c5b94
SP
4978 pci_restore_state(pdev);
4979
dd5746bf
SB
4980 status = be_fw_wait_ready(adapter);
4981 if (status)
4982 return status;
4983
d4360d6f 4984 be_intr_set(adapter, true);
2243e2e9
SP
4985 /* tell fw we're ready to fire cmds */
4986 status = be_cmd_fw_init(adapter);
4987 if (status)
4988 return status;
4989
9b0365f1 4990 be_setup(adapter);
6b7c5b94
SP
4991 if (netif_running(netdev)) {
4992 rtnl_lock();
4993 be_open(netdev);
4994 rtnl_unlock();
4995 }
f67ef7ba
PR
4996
4997 schedule_delayed_work(&adapter->func_recovery_work,
4998 msecs_to_jiffies(1000));
6b7c5b94 4999 netif_device_attach(netdev);
71d8d1b5 5000
76a9e08e 5001 if (adapter->wol_en)
71d8d1b5 5002 be_setup_wol(adapter, false);
a4ca055f 5003
6b7c5b94
SP
5004 return 0;
5005}
5006
82456b03
SP
5007/*
5008 * An FLR will stop BE from DMAing any data.
5009 */
5010static void be_shutdown(struct pci_dev *pdev)
5011{
5012 struct be_adapter *adapter = pci_get_drvdata(pdev);
82456b03 5013
2d5d4154
AK
5014 if (!adapter)
5015 return;
82456b03 5016
d114f99a 5017 be_roce_dev_shutdown(adapter);
0f4a6828 5018 cancel_delayed_work_sync(&adapter->work);
f67ef7ba 5019 cancel_delayed_work_sync(&adapter->func_recovery_work);
a4ca055f 5020
2d5d4154 5021 netif_device_detach(adapter->netdev);
82456b03 5022
57841869
AK
5023 be_cmd_reset_function(adapter);
5024
82456b03 5025 pci_disable_device(pdev);
82456b03
SP
5026}
5027
cf588477 5028static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
748b539a 5029 pci_channel_state_t state)
cf588477
SP
5030{
5031 struct be_adapter *adapter = pci_get_drvdata(pdev);
5032 struct net_device *netdev = adapter->netdev;
5033
5034 dev_err(&adapter->pdev->dev, "EEH error detected\n");
5035
01e5b2c4
SK
5036 if (!adapter->eeh_error) {
5037 adapter->eeh_error = true;
cf588477 5038
01e5b2c4 5039 cancel_delayed_work_sync(&adapter->func_recovery_work);
cf588477 5040
cf588477 5041 rtnl_lock();
01e5b2c4
SK
5042 netif_device_detach(netdev);
5043 if (netif_running(netdev))
5044 be_close(netdev);
cf588477 5045 rtnl_unlock();
01e5b2c4
SK
5046
5047 be_clear(adapter);
cf588477 5048 }
cf588477
SP
5049
5050 if (state == pci_channel_io_perm_failure)
5051 return PCI_ERS_RESULT_DISCONNECT;
5052
5053 pci_disable_device(pdev);
5054
eeb7fc7b
SK
5055 /* The error could cause the FW to trigger a flash debug dump.
5056 * Resetting the card while flash dump is in progress
c8a54163
PR
5057 * can cause it not to recover; wait for it to finish.
5058 * Wait only for first function as it is needed only once per
5059 * adapter.
eeb7fc7b 5060 */
c8a54163
PR
5061 if (pdev->devfn == 0)
5062 ssleep(30);
5063
cf588477
SP
5064 return PCI_ERS_RESULT_NEED_RESET;
5065}
5066
5067static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
5068{
5069 struct be_adapter *adapter = pci_get_drvdata(pdev);
5070 int status;
5071
5072 dev_info(&adapter->pdev->dev, "EEH reset\n");
cf588477
SP
5073
5074 status = pci_enable_device(pdev);
5075 if (status)
5076 return PCI_ERS_RESULT_DISCONNECT;
5077
5078 pci_set_master(pdev);
1ca01512 5079 pci_set_power_state(pdev, PCI_D0);
cf588477
SP
5080 pci_restore_state(pdev);
5081
5082 /* Check if card is ok and fw is ready */
c5b3ad4c
SP
5083 dev_info(&adapter->pdev->dev,
5084 "Waiting for FW to be ready after EEH reset\n");
bf99e50d 5085 status = be_fw_wait_ready(adapter);
cf588477
SP
5086 if (status)
5087 return PCI_ERS_RESULT_DISCONNECT;
5088
d6b6d987 5089 pci_cleanup_aer_uncorrect_error_status(pdev);
01e5b2c4 5090 be_clear_all_error(adapter);
cf588477
SP
5091 return PCI_ERS_RESULT_RECOVERED;
5092}
5093
5094static void be_eeh_resume(struct pci_dev *pdev)
5095{
5096 int status = 0;
5097 struct be_adapter *adapter = pci_get_drvdata(pdev);
5098 struct net_device *netdev = adapter->netdev;
5099
5100 dev_info(&adapter->pdev->dev, "EEH resume\n");
5101
5102 pci_save_state(pdev);
5103
2d177be8 5104 status = be_cmd_reset_function(adapter);
cf588477
SP
5105 if (status)
5106 goto err;
5107
03a58baa
KA
5108 /* On some BE3 FW versions, after a HW reset,
5109 * interrupts will remain disabled for each function.
5110 * So, explicitly enable interrupts
5111 */
5112 be_intr_set(adapter, true);
5113
2d177be8
KA
5114 /* tell fw we're ready to fire cmds */
5115 status = be_cmd_fw_init(adapter);
bf99e50d
PR
5116 if (status)
5117 goto err;
5118
cf588477
SP
5119 status = be_setup(adapter);
5120 if (status)
5121 goto err;
5122
5123 if (netif_running(netdev)) {
5124 status = be_open(netdev);
5125 if (status)
5126 goto err;
5127 }
f67ef7ba
PR
5128
5129 schedule_delayed_work(&adapter->func_recovery_work,
5130 msecs_to_jiffies(1000));
cf588477
SP
5131 netif_device_attach(netdev);
5132 return;
5133err:
5134 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
cf588477
SP
5135}
5136
3646f0e5 5137static const struct pci_error_handlers be_eeh_handlers = {
cf588477
SP
5138 .error_detected = be_eeh_err_detected,
5139 .slot_reset = be_eeh_reset,
5140 .resume = be_eeh_resume,
5141};
5142
6b7c5b94
SP
5143static struct pci_driver be_driver = {
5144 .name = DRV_NAME,
5145 .id_table = be_dev_ids,
5146 .probe = be_probe,
5147 .remove = be_remove,
5148 .suspend = be_suspend,
cf588477 5149 .resume = be_resume,
82456b03 5150 .shutdown = be_shutdown,
cf588477 5151 .err_handler = &be_eeh_handlers
6b7c5b94
SP
5152};
5153
5154static int __init be_init_module(void)
5155{
8e95a202
JP
5156 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
5157 rx_frag_size != 2048) {
6b7c5b94
SP
5158 printk(KERN_WARNING DRV_NAME
5159 " : Module param rx_frag_size must be 2048/4096/8192."
5160 " Using 2048\n");
5161 rx_frag_size = 2048;
5162 }
6b7c5b94
SP
5163
5164 return pci_register_driver(&be_driver);
5165}
5166module_init(be_init_module);
5167
5168static void __exit be_exit_module(void)
5169{
5170 pci_unregister_driver(&be_driver);
5171}
5172module_exit(be_exit_module);