Merge remote-tracking branches 'asoc/fix/rt5677', 'asoc/fix/st', 'asoc/fix/sun4i...
[linux-2.6-block.git] / drivers / net / ethernet / emulex / benet / be_main.c
CommitLineData
6b7c5b94 1/*
d19261b8 2 * Copyright (C) 2005 - 2015 Emulex
6b7c5b94
SP
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
d2145cde 11 * linux-drivers@emulex.com
6b7c5b94 12 *
d2145cde
AK
13 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
6b7c5b94
SP
16 */
17
70c71606 18#include <linux/prefetch.h>
9d9779e7 19#include <linux/module.h>
6b7c5b94 20#include "be.h"
8788fdc2 21#include "be_cmds.h"
65f71b8b 22#include <asm/div64.h>
d6b6d987 23#include <linux/aer.h>
a77dcb8c 24#include <linux/if_bridge.h>
6384a4d0 25#include <net/busy_poll.h>
c9c47142 26#include <net/vxlan.h>
6b7c5b94
SP
27
28MODULE_VERSION(DRV_VER);
6b7c5b94 29MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
00d3d51e 30MODULE_AUTHOR("Emulex Corporation");
6b7c5b94
SP
31MODULE_LICENSE("GPL");
32
ace40aff
VV
33/* num_vfs module param is obsolete.
34 * Use sysfs method to enable/disable VFs.
35 */
ba343c77 36static unsigned int num_vfs;
ba343c77 37module_param(num_vfs, uint, S_IRUGO);
ba343c77 38MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
6b7c5b94 39
11ac75ed
SP
40static ushort rx_frag_size = 2048;
41module_param(rx_frag_size, ushort, S_IRUGO);
42MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
43
9baa3c34 44static const struct pci_device_id be_dev_ids[] = {
c4ca2374 45 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
59fd5d87 46 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
c4ca2374
AK
47 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
48 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
fe6d2a38 49 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
12f4d0a8 50 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
ecedb6ae 51 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
76b73530 52 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID6)},
6b7c5b94
SP
53 { 0 }
54};
55MODULE_DEVICE_TABLE(pci, be_dev_ids);
7c185276 56/* UE Status Low CSR */
42c8b11e 57static const char * const ue_status_low_desc[] = {
7c185276
AK
58 "CEV",
59 "CTX",
60 "DBUF",
61 "ERX",
62 "Host",
63 "MPU",
64 "NDMA",
65 "PTC ",
66 "RDMA ",
67 "RXF ",
68 "RXIPS ",
69 "RXULP0 ",
70 "RXULP1 ",
71 "RXULP2 ",
72 "TIM ",
73 "TPOST ",
74 "TPRE ",
75 "TXIPS ",
76 "TXULP0 ",
77 "TXULP1 ",
78 "UC ",
79 "WDMA ",
80 "TXULP2 ",
81 "HOST1 ",
82 "P0_OB_LINK ",
83 "P1_OB_LINK ",
84 "HOST_GPIO ",
85 "MBOX ",
6bdf8f55
VV
86 "ERX2 ",
87 "SPARE ",
88 "JTAG ",
89 "MPU_INTPEND "
7c185276 90};
e2fb1afa 91
7c185276 92/* UE Status High CSR */
42c8b11e 93static const char * const ue_status_hi_desc[] = {
7c185276
AK
94 "LPCMEMHOST",
95 "MGMT_MAC",
96 "PCS0ONLINE",
97 "MPU_IRAM",
98 "PCS1ONLINE",
99 "PCTL0",
100 "PCTL1",
101 "PMEM",
102 "RR",
103 "TXPB",
104 "RXPP",
105 "XAUI",
106 "TXP",
107 "ARM",
108 "IPC",
109 "HOST2",
110 "HOST3",
111 "HOST4",
112 "HOST5",
113 "HOST6",
114 "HOST7",
6bdf8f55
VV
115 "ECRC",
116 "Poison TLP",
42c8b11e 117 "NETC",
6bdf8f55
VV
118 "PERIPH",
119 "LLTXULP",
120 "D2P",
121 "RCON",
122 "LDMA",
123 "LLTXP",
124 "LLTXPB",
7c185276
AK
125 "Unknown"
126};
6b7c5b94
SP
127
128static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
129{
130 struct be_dma_mem *mem = &q->dma_mem;
03d28ffe 131
1cfafab9 132 if (mem->va) {
2b7bcebf
IV
133 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
134 mem->dma);
1cfafab9
SP
135 mem->va = NULL;
136 }
6b7c5b94
SP
137}
138
139static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
748b539a 140 u16 len, u16 entry_size)
6b7c5b94
SP
141{
142 struct be_dma_mem *mem = &q->dma_mem;
143
144 memset(q, 0, sizeof(*q));
145 q->len = len;
146 q->entry_size = entry_size;
147 mem->size = len * entry_size;
ede23fa8
JP
148 mem->va = dma_zalloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
149 GFP_KERNEL);
6b7c5b94 150 if (!mem->va)
10ef9ab4 151 return -ENOMEM;
6b7c5b94
SP
152 return 0;
153}
154
68c45a2d 155static void be_reg_intr_set(struct be_adapter *adapter, bool enable)
6b7c5b94 156{
db3ea781 157 u32 reg, enabled;
5f0b849e 158
db3ea781 159 pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
748b539a 160 &reg);
db3ea781
SP
161 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
162
5f0b849e 163 if (!enabled && enable)
6b7c5b94 164 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
5f0b849e 165 else if (enabled && !enable)
6b7c5b94 166 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
5f0b849e 167 else
6b7c5b94 168 return;
5f0b849e 169
db3ea781 170 pci_write_config_dword(adapter->pdev,
748b539a 171 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
6b7c5b94
SP
172}
173
68c45a2d
SK
174static void be_intr_set(struct be_adapter *adapter, bool enable)
175{
176 int status = 0;
177
178 /* On lancer interrupts can't be controlled via this register */
179 if (lancer_chip(adapter))
180 return;
181
954f6825 182 if (be_check_error(adapter, BE_ERROR_EEH))
68c45a2d
SK
183 return;
184
185 status = be_cmd_intr_set(adapter, enable);
186 if (status)
187 be_reg_intr_set(adapter, enable);
188}
189
8788fdc2 190static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
6b7c5b94
SP
191{
192 u32 val = 0;
03d28ffe 193
954f6825
VD
194 if (be_check_error(adapter, BE_ERROR_HW))
195 return;
196
6b7c5b94
SP
197 val |= qid & DB_RQ_RING_ID_MASK;
198 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
f3eb62d2
SP
199
200 wmb();
8788fdc2 201 iowrite32(val, adapter->db + DB_RQ_OFFSET);
6b7c5b94
SP
202}
203
94d73aaa
VV
204static void be_txq_notify(struct be_adapter *adapter, struct be_tx_obj *txo,
205 u16 posted)
6b7c5b94
SP
206{
207 u32 val = 0;
03d28ffe 208
954f6825
VD
209 if (be_check_error(adapter, BE_ERROR_HW))
210 return;
211
94d73aaa 212 val |= txo->q.id & DB_TXULP_RING_ID_MASK;
6b7c5b94 213 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
f3eb62d2
SP
214
215 wmb();
94d73aaa 216 iowrite32(val, adapter->db + txo->db_offset);
6b7c5b94
SP
217}
218
8788fdc2 219static void be_eq_notify(struct be_adapter *adapter, u16 qid,
20947770
PR
220 bool arm, bool clear_int, u16 num_popped,
221 u32 eq_delay_mult_enc)
6b7c5b94
SP
222{
223 u32 val = 0;
03d28ffe 224
6b7c5b94 225 val |= qid & DB_EQ_RING_ID_MASK;
748b539a 226 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) << DB_EQ_RING_ID_EXT_MASK_SHIFT);
cf588477 227
954f6825 228 if (be_check_error(adapter, BE_ERROR_HW))
cf588477
SP
229 return;
230
6b7c5b94
SP
231 if (arm)
232 val |= 1 << DB_EQ_REARM_SHIFT;
233 if (clear_int)
234 val |= 1 << DB_EQ_CLR_SHIFT;
235 val |= 1 << DB_EQ_EVNT_SHIFT;
236 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
20947770 237 val |= eq_delay_mult_enc << DB_EQ_R2I_DLY_SHIFT;
8788fdc2 238 iowrite32(val, adapter->db + DB_EQ_OFFSET);
6b7c5b94
SP
239}
240
8788fdc2 241void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
6b7c5b94
SP
242{
243 u32 val = 0;
03d28ffe 244
6b7c5b94 245 val |= qid & DB_CQ_RING_ID_MASK;
fe6d2a38
SP
246 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
247 DB_CQ_RING_ID_EXT_MASK_SHIFT);
cf588477 248
954f6825 249 if (be_check_error(adapter, BE_ERROR_HW))
cf588477
SP
250 return;
251
6b7c5b94
SP
252 if (arm)
253 val |= 1 << DB_CQ_REARM_SHIFT;
254 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
8788fdc2 255 iowrite32(val, adapter->db + DB_CQ_OFFSET);
6b7c5b94
SP
256}
257
6b7c5b94
SP
258static int be_mac_addr_set(struct net_device *netdev, void *p)
259{
260 struct be_adapter *adapter = netdev_priv(netdev);
5a712c13 261 struct device *dev = &adapter->pdev->dev;
6b7c5b94 262 struct sockaddr *addr = p;
5a712c13
SP
263 int status;
264 u8 mac[ETH_ALEN];
265 u32 old_pmac_id = adapter->pmac_id[0], curr_pmac_id = 0;
6b7c5b94 266
ca9e4988
AK
267 if (!is_valid_ether_addr(addr->sa_data))
268 return -EADDRNOTAVAIL;
269
ff32f8ab
VV
270 /* Proceed further only if, User provided MAC is different
271 * from active MAC
272 */
273 if (ether_addr_equal(addr->sa_data, netdev->dev_addr))
274 return 0;
275
bcc84140
KA
276 /* if device is not running, copy MAC to netdev->dev_addr */
277 if (!netif_running(netdev))
278 goto done;
279
5a712c13
SP
280 /* The PMAC_ADD cmd may fail if the VF doesn't have FILTMGMT
281 * privilege or if PF did not provision the new MAC address.
282 * On BE3, this cmd will always fail if the VF doesn't have the
283 * FILTMGMT privilege. This failure is OK, only if the PF programmed
284 * the MAC for the VF.
704e4c88 285 */
5a712c13
SP
286 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
287 adapter->if_handle, &adapter->pmac_id[0], 0);
288 if (!status) {
289 curr_pmac_id = adapter->pmac_id[0];
290
291 /* Delete the old programmed MAC. This call may fail if the
292 * old MAC was already deleted by the PF driver.
293 */
294 if (adapter->pmac_id[0] != old_pmac_id)
295 be_cmd_pmac_del(adapter, adapter->if_handle,
296 old_pmac_id, 0);
704e4c88
PR
297 }
298
5a712c13
SP
299 /* Decide if the new MAC is successfully activated only after
300 * querying the FW
704e4c88 301 */
b188f090
SR
302 status = be_cmd_get_active_mac(adapter, curr_pmac_id, mac,
303 adapter->if_handle, true, 0);
a65027e4 304 if (status)
e3a7ae2c 305 goto err;
6b7c5b94 306
5a712c13
SP
307 /* The MAC change did not happen, either due to lack of privilege
308 * or PF didn't pre-provision.
309 */
61d23e9f 310 if (!ether_addr_equal(addr->sa_data, mac)) {
5a712c13
SP
311 status = -EPERM;
312 goto err;
313 }
bcc84140
KA
314done:
315 ether_addr_copy(netdev->dev_addr, addr->sa_data);
316 dev_info(dev, "MAC address changed to %pM\n", addr->sa_data);
e3a7ae2c
SK
317 return 0;
318err:
5a712c13 319 dev_warn(dev, "MAC address change to %pM failed\n", addr->sa_data);
6b7c5b94
SP
320 return status;
321}
322
ca34fe38
SP
323/* BE2 supports only v0 cmd */
324static void *hw_stats_from_cmd(struct be_adapter *adapter)
325{
326 if (BE2_chip(adapter)) {
327 struct be_cmd_resp_get_stats_v0 *cmd = adapter->stats_cmd.va;
328
329 return &cmd->hw_stats;
61000861 330 } else if (BE3_chip(adapter)) {
ca34fe38
SP
331 struct be_cmd_resp_get_stats_v1 *cmd = adapter->stats_cmd.va;
332
61000861
AK
333 return &cmd->hw_stats;
334 } else {
335 struct be_cmd_resp_get_stats_v2 *cmd = adapter->stats_cmd.va;
336
ca34fe38
SP
337 return &cmd->hw_stats;
338 }
339}
340
341/* BE2 supports only v0 cmd */
342static void *be_erx_stats_from_cmd(struct be_adapter *adapter)
343{
344 if (BE2_chip(adapter)) {
345 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
346
347 return &hw_stats->erx;
61000861 348 } else if (BE3_chip(adapter)) {
ca34fe38
SP
349 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
350
61000861
AK
351 return &hw_stats->erx;
352 } else {
353 struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
354
ca34fe38
SP
355 return &hw_stats->erx;
356 }
357}
358
359static void populate_be_v0_stats(struct be_adapter *adapter)
89a88ab8 360{
ac124ff9
SP
361 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
362 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
363 struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
89a88ab8 364 struct be_port_rxf_stats_v0 *port_stats =
ac124ff9
SP
365 &rxf_stats->port[adapter->port_num];
366 struct be_drv_stats *drvs = &adapter->drv_stats;
89a88ab8 367
ac124ff9 368 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
89a88ab8
AK
369 drvs->rx_pause_frames = port_stats->rx_pause_frames;
370 drvs->rx_crc_errors = port_stats->rx_crc_errors;
371 drvs->rx_control_frames = port_stats->rx_control_frames;
372 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
373 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
374 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
375 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
376 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
377 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
378 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
379 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
380 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
381 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
382 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
ac124ff9 383 drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
89a88ab8
AK
384 drvs->rx_dropped_header_too_small =
385 port_stats->rx_dropped_header_too_small;
18fb06a1
SR
386 drvs->rx_address_filtered =
387 port_stats->rx_address_filtered +
388 port_stats->rx_vlan_filtered;
89a88ab8
AK
389 drvs->rx_alignment_symbol_errors =
390 port_stats->rx_alignment_symbol_errors;
391
392 drvs->tx_pauseframes = port_stats->tx_pauseframes;
393 drvs->tx_controlframes = port_stats->tx_controlframes;
394
395 if (adapter->port_num)
ac124ff9 396 drvs->jabber_events = rxf_stats->port1_jabber_events;
89a88ab8 397 else
ac124ff9 398 drvs->jabber_events = rxf_stats->port0_jabber_events;
89a88ab8 399 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
89a88ab8 400 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
89a88ab8
AK
401 drvs->forwarded_packets = rxf_stats->forwarded_packets;
402 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
ac124ff9
SP
403 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
404 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
89a88ab8
AK
405 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
406}
407
ca34fe38 408static void populate_be_v1_stats(struct be_adapter *adapter)
89a88ab8 409{
ac124ff9
SP
410 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
411 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
412 struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
89a88ab8 413 struct be_port_rxf_stats_v1 *port_stats =
ac124ff9
SP
414 &rxf_stats->port[adapter->port_num];
415 struct be_drv_stats *drvs = &adapter->drv_stats;
89a88ab8 416
ac124ff9 417 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
02fe7027
AK
418 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
419 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
89a88ab8
AK
420 drvs->rx_pause_frames = port_stats->rx_pause_frames;
421 drvs->rx_crc_errors = port_stats->rx_crc_errors;
422 drvs->rx_control_frames = port_stats->rx_control_frames;
423 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
424 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
425 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
426 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
427 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
428 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
429 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
430 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
431 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
432 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
433 drvs->rx_dropped_header_too_small =
434 port_stats->rx_dropped_header_too_small;
435 drvs->rx_input_fifo_overflow_drop =
436 port_stats->rx_input_fifo_overflow_drop;
18fb06a1 437 drvs->rx_address_filtered = port_stats->rx_address_filtered;
89a88ab8
AK
438 drvs->rx_alignment_symbol_errors =
439 port_stats->rx_alignment_symbol_errors;
ac124ff9 440 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
89a88ab8
AK
441 drvs->tx_pauseframes = port_stats->tx_pauseframes;
442 drvs->tx_controlframes = port_stats->tx_controlframes;
b5adffc4 443 drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
89a88ab8
AK
444 drvs->jabber_events = port_stats->jabber_events;
445 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
89a88ab8 446 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
89a88ab8
AK
447 drvs->forwarded_packets = rxf_stats->forwarded_packets;
448 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
ac124ff9
SP
449 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
450 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
89a88ab8
AK
451 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
452}
453
61000861
AK
454static void populate_be_v2_stats(struct be_adapter *adapter)
455{
456 struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
457 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
458 struct be_rxf_stats_v2 *rxf_stats = &hw_stats->rxf;
459 struct be_port_rxf_stats_v2 *port_stats =
460 &rxf_stats->port[adapter->port_num];
461 struct be_drv_stats *drvs = &adapter->drv_stats;
462
463 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
464 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
465 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
466 drvs->rx_pause_frames = port_stats->rx_pause_frames;
467 drvs->rx_crc_errors = port_stats->rx_crc_errors;
468 drvs->rx_control_frames = port_stats->rx_control_frames;
469 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
470 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
471 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
472 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
473 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
474 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
475 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
476 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
477 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
478 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
479 drvs->rx_dropped_header_too_small =
480 port_stats->rx_dropped_header_too_small;
481 drvs->rx_input_fifo_overflow_drop =
482 port_stats->rx_input_fifo_overflow_drop;
483 drvs->rx_address_filtered = port_stats->rx_address_filtered;
484 drvs->rx_alignment_symbol_errors =
485 port_stats->rx_alignment_symbol_errors;
486 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
487 drvs->tx_pauseframes = port_stats->tx_pauseframes;
488 drvs->tx_controlframes = port_stats->tx_controlframes;
489 drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
490 drvs->jabber_events = port_stats->jabber_events;
491 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
492 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
493 drvs->forwarded_packets = rxf_stats->forwarded_packets;
494 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
495 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
496 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
497 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
748b539a 498 if (be_roce_supported(adapter)) {
461ae379
AK
499 drvs->rx_roce_bytes_lsd = port_stats->roce_bytes_received_lsd;
500 drvs->rx_roce_bytes_msd = port_stats->roce_bytes_received_msd;
501 drvs->rx_roce_frames = port_stats->roce_frames_received;
502 drvs->roce_drops_crc = port_stats->roce_drops_crc;
503 drvs->roce_drops_payload_len =
504 port_stats->roce_drops_payload_len;
505 }
61000861
AK
506}
507
005d5696
SX
508static void populate_lancer_stats(struct be_adapter *adapter)
509{
005d5696 510 struct be_drv_stats *drvs = &adapter->drv_stats;
748b539a 511 struct lancer_pport_stats *pport_stats = pport_stats_from_cmd(adapter);
ac124ff9
SP
512
513 be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
514 drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
515 drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
516 drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
005d5696 517 drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
ac124ff9 518 drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
005d5696
SX
519 drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
520 drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
521 drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
522 drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
523 drvs->rx_dropped_tcp_length =
524 pport_stats->rx_dropped_invalid_tcp_length;
525 drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
526 drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
527 drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
528 drvs->rx_dropped_header_too_small =
529 pport_stats->rx_dropped_header_too_small;
530 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
18fb06a1
SR
531 drvs->rx_address_filtered =
532 pport_stats->rx_address_filtered +
533 pport_stats->rx_vlan_filtered;
ac124ff9 534 drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
005d5696 535 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
ac124ff9
SP
536 drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
537 drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
005d5696 538 drvs->jabber_events = pport_stats->rx_jabbers;
ac124ff9
SP
539 drvs->forwarded_packets = pport_stats->num_forwards_lo;
540 drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
005d5696 541 drvs->rx_drops_too_many_frags =
ac124ff9 542 pport_stats->rx_drops_too_many_frags_lo;
005d5696 543}
89a88ab8 544
09c1c68f
SP
545static void accumulate_16bit_val(u32 *acc, u16 val)
546{
547#define lo(x) (x & 0xFFFF)
548#define hi(x) (x & 0xFFFF0000)
549 bool wrapped = val < lo(*acc);
550 u32 newacc = hi(*acc) + val;
551
552 if (wrapped)
553 newacc += 65536;
554 ACCESS_ONCE(*acc) = newacc;
555}
556
4188e7df 557static void populate_erx_stats(struct be_adapter *adapter,
748b539a 558 struct be_rx_obj *rxo, u32 erx_stat)
a6c578ef
AK
559{
560 if (!BEx_chip(adapter))
561 rx_stats(rxo)->rx_drops_no_frags = erx_stat;
562 else
563 /* below erx HW counter can actually wrap around after
564 * 65535. Driver accumulates a 32-bit value
565 */
566 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
567 (u16)erx_stat);
568}
569
89a88ab8
AK
570void be_parse_stats(struct be_adapter *adapter)
571{
61000861 572 struct be_erx_stats_v2 *erx = be_erx_stats_from_cmd(adapter);
ac124ff9
SP
573 struct be_rx_obj *rxo;
574 int i;
a6c578ef 575 u32 erx_stat;
ac124ff9 576
ca34fe38
SP
577 if (lancer_chip(adapter)) {
578 populate_lancer_stats(adapter);
005d5696 579 } else {
ca34fe38
SP
580 if (BE2_chip(adapter))
581 populate_be_v0_stats(adapter);
61000861
AK
582 else if (BE3_chip(adapter))
583 /* for BE3 */
ca34fe38 584 populate_be_v1_stats(adapter);
61000861
AK
585 else
586 populate_be_v2_stats(adapter);
d51ebd33 587
61000861 588 /* erx_v2 is longer than v0, v1. use v2 for v0, v1 access */
ca34fe38 589 for_all_rx_queues(adapter, rxo, i) {
a6c578ef
AK
590 erx_stat = erx->rx_drops_no_fragments[rxo->q.id];
591 populate_erx_stats(adapter, rxo, erx_stat);
ca34fe38 592 }
09c1c68f 593 }
89a88ab8
AK
594}
595
ab1594e9 596static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
748b539a 597 struct rtnl_link_stats64 *stats)
6b7c5b94 598{
ab1594e9 599 struct be_adapter *adapter = netdev_priv(netdev);
89a88ab8 600 struct be_drv_stats *drvs = &adapter->drv_stats;
3abcdeda 601 struct be_rx_obj *rxo;
3c8def97 602 struct be_tx_obj *txo;
ab1594e9
SP
603 u64 pkts, bytes;
604 unsigned int start;
3abcdeda 605 int i;
6b7c5b94 606
3abcdeda 607 for_all_rx_queues(adapter, rxo, i) {
ab1594e9 608 const struct be_rx_stats *rx_stats = rx_stats(rxo);
03d28ffe 609
ab1594e9 610 do {
57a7744e 611 start = u64_stats_fetch_begin_irq(&rx_stats->sync);
ab1594e9
SP
612 pkts = rx_stats(rxo)->rx_pkts;
613 bytes = rx_stats(rxo)->rx_bytes;
57a7744e 614 } while (u64_stats_fetch_retry_irq(&rx_stats->sync, start));
ab1594e9
SP
615 stats->rx_packets += pkts;
616 stats->rx_bytes += bytes;
617 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
618 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
619 rx_stats(rxo)->rx_drops_no_frags;
3abcdeda
SP
620 }
621
3c8def97 622 for_all_tx_queues(adapter, txo, i) {
ab1594e9 623 const struct be_tx_stats *tx_stats = tx_stats(txo);
03d28ffe 624
ab1594e9 625 do {
57a7744e 626 start = u64_stats_fetch_begin_irq(&tx_stats->sync);
ab1594e9
SP
627 pkts = tx_stats(txo)->tx_pkts;
628 bytes = tx_stats(txo)->tx_bytes;
57a7744e 629 } while (u64_stats_fetch_retry_irq(&tx_stats->sync, start));
ab1594e9
SP
630 stats->tx_packets += pkts;
631 stats->tx_bytes += bytes;
3c8def97 632 }
6b7c5b94
SP
633
634 /* bad pkts received */
ab1594e9 635 stats->rx_errors = drvs->rx_crc_errors +
89a88ab8
AK
636 drvs->rx_alignment_symbol_errors +
637 drvs->rx_in_range_errors +
638 drvs->rx_out_range_errors +
639 drvs->rx_frame_too_long +
640 drvs->rx_dropped_too_small +
641 drvs->rx_dropped_too_short +
642 drvs->rx_dropped_header_too_small +
643 drvs->rx_dropped_tcp_length +
ab1594e9 644 drvs->rx_dropped_runt;
68110868 645
6b7c5b94 646 /* detailed rx errors */
ab1594e9 647 stats->rx_length_errors = drvs->rx_in_range_errors +
89a88ab8
AK
648 drvs->rx_out_range_errors +
649 drvs->rx_frame_too_long;
68110868 650
ab1594e9 651 stats->rx_crc_errors = drvs->rx_crc_errors;
6b7c5b94
SP
652
653 /* frame alignment errors */
ab1594e9 654 stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
68110868 655
6b7c5b94
SP
656 /* receiver fifo overrun */
657 /* drops_no_pbuf is no per i/f, it's per BE card */
ab1594e9 658 stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
89a88ab8
AK
659 drvs->rx_input_fifo_overflow_drop +
660 drvs->rx_drops_no_pbuf;
ab1594e9 661 return stats;
6b7c5b94
SP
662}
663
b236916a 664void be_link_status_update(struct be_adapter *adapter, u8 link_status)
6b7c5b94 665{
6b7c5b94
SP
666 struct net_device *netdev = adapter->netdev;
667
b236916a 668 if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
ea172a01 669 netif_carrier_off(netdev);
b236916a 670 adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
6b7c5b94 671 }
b236916a 672
bdce2ad7 673 if (link_status)
b236916a
AK
674 netif_carrier_on(netdev);
675 else
676 netif_carrier_off(netdev);
18824894
IV
677
678 netdev_info(netdev, "Link is %s\n", link_status ? "Up" : "Down");
6b7c5b94
SP
679}
680
5f07b3c5 681static void be_tx_stats_update(struct be_tx_obj *txo, struct sk_buff *skb)
6b7c5b94 682{
3c8def97 683 struct be_tx_stats *stats = tx_stats(txo);
8670f2a5 684 u64 tx_pkts = skb_shinfo(skb)->gso_segs ? : 1;
3c8def97 685
ab1594e9 686 u64_stats_update_begin(&stats->sync);
ac124ff9 687 stats->tx_reqs++;
5f07b3c5 688 stats->tx_bytes += skb->len;
8670f2a5
SB
689 stats->tx_pkts += tx_pkts;
690 if (skb->encapsulation && skb->ip_summed == CHECKSUM_PARTIAL)
691 stats->tx_vxlan_offload_pkts += tx_pkts;
ab1594e9 692 u64_stats_update_end(&stats->sync);
6b7c5b94
SP
693}
694
5f07b3c5
SP
695/* Returns number of WRBs needed for the skb */
696static u32 skb_wrb_cnt(struct sk_buff *skb)
6b7c5b94 697{
5f07b3c5
SP
698 /* +1 for the header wrb */
699 return 1 + (skb_headlen(skb) ? 1 : 0) + skb_shinfo(skb)->nr_frags;
6b7c5b94
SP
700}
701
702static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
703{
f986afcb
SP
704 wrb->frag_pa_hi = cpu_to_le32(upper_32_bits(addr));
705 wrb->frag_pa_lo = cpu_to_le32(lower_32_bits(addr));
706 wrb->frag_len = cpu_to_le32(len & ETH_WRB_FRAG_LEN_MASK);
707 wrb->rsvd0 = 0;
708}
709
710/* A dummy wrb is just all zeros. Using a separate routine for dummy-wrb
711 * to avoid the swap and shift/mask operations in wrb_fill().
712 */
713static inline void wrb_fill_dummy(struct be_eth_wrb *wrb)
714{
715 wrb->frag_pa_hi = 0;
716 wrb->frag_pa_lo = 0;
717 wrb->frag_len = 0;
89b1f496 718 wrb->rsvd0 = 0;
6b7c5b94
SP
719}
720
1ded132d 721static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
748b539a 722 struct sk_buff *skb)
1ded132d
AK
723{
724 u8 vlan_prio;
725 u16 vlan_tag;
726
df8a39de 727 vlan_tag = skb_vlan_tag_get(skb);
1ded132d
AK
728 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
729 /* If vlan priority provided by OS is NOT in available bmap */
730 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
731 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
732 adapter->recommended_prio;
733
734 return vlan_tag;
735}
736
c9c47142
SP
737/* Used only for IP tunnel packets */
738static u16 skb_inner_ip_proto(struct sk_buff *skb)
739{
740 return (inner_ip_hdr(skb)->version == 4) ?
741 inner_ip_hdr(skb)->protocol : inner_ipv6_hdr(skb)->nexthdr;
742}
743
744static u16 skb_ip_proto(struct sk_buff *skb)
745{
746 return (ip_hdr(skb)->version == 4) ?
747 ip_hdr(skb)->protocol : ipv6_hdr(skb)->nexthdr;
748}
749
cf5671e6
SB
750static inline bool be_is_txq_full(struct be_tx_obj *txo)
751{
752 return atomic_read(&txo->q.used) + BE_MAX_TX_FRAG_COUNT >= txo->q.len;
753}
754
755static inline bool be_can_txq_wake(struct be_tx_obj *txo)
756{
757 return atomic_read(&txo->q.used) < txo->q.len / 2;
758}
759
760static inline bool be_is_tx_compl_pending(struct be_tx_obj *txo)
761{
762 return atomic_read(&txo->q.used) > txo->pend_wrb_cnt;
763}
764
804abcdb
SB
765static void be_get_wrb_params_from_skb(struct be_adapter *adapter,
766 struct sk_buff *skb,
767 struct be_wrb_params *wrb_params)
6b7c5b94 768{
804abcdb 769 u16 proto;
6b7c5b94 770
49e4b847 771 if (skb_is_gso(skb)) {
804abcdb
SB
772 BE_WRB_F_SET(wrb_params->features, LSO, 1);
773 wrb_params->lso_mss = skb_shinfo(skb)->gso_size;
fe6d2a38 774 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
804abcdb 775 BE_WRB_F_SET(wrb_params->features, LSO6, 1);
6b7c5b94 776 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
c9c47142 777 if (skb->encapsulation) {
804abcdb 778 BE_WRB_F_SET(wrb_params->features, IPCS, 1);
c9c47142
SP
779 proto = skb_inner_ip_proto(skb);
780 } else {
781 proto = skb_ip_proto(skb);
782 }
783 if (proto == IPPROTO_TCP)
804abcdb 784 BE_WRB_F_SET(wrb_params->features, TCPCS, 1);
c9c47142 785 else if (proto == IPPROTO_UDP)
804abcdb 786 BE_WRB_F_SET(wrb_params->features, UDPCS, 1);
6b7c5b94
SP
787 }
788
df8a39de 789 if (skb_vlan_tag_present(skb)) {
804abcdb
SB
790 BE_WRB_F_SET(wrb_params->features, VLAN, 1);
791 wrb_params->vlan_tag = be_get_tx_vlan_tag(adapter, skb);
6b7c5b94
SP
792 }
793
804abcdb
SB
794 BE_WRB_F_SET(wrb_params->features, CRC, 1);
795}
5f07b3c5 796
804abcdb
SB
797static void wrb_fill_hdr(struct be_adapter *adapter,
798 struct be_eth_hdr_wrb *hdr,
799 struct be_wrb_params *wrb_params,
800 struct sk_buff *skb)
801{
802 memset(hdr, 0, sizeof(*hdr));
803
804 SET_TX_WRB_HDR_BITS(crc, hdr,
805 BE_WRB_F_GET(wrb_params->features, CRC));
806 SET_TX_WRB_HDR_BITS(ipcs, hdr,
807 BE_WRB_F_GET(wrb_params->features, IPCS));
808 SET_TX_WRB_HDR_BITS(tcpcs, hdr,
809 BE_WRB_F_GET(wrb_params->features, TCPCS));
810 SET_TX_WRB_HDR_BITS(udpcs, hdr,
811 BE_WRB_F_GET(wrb_params->features, UDPCS));
812
813 SET_TX_WRB_HDR_BITS(lso, hdr,
814 BE_WRB_F_GET(wrb_params->features, LSO));
815 SET_TX_WRB_HDR_BITS(lso6, hdr,
816 BE_WRB_F_GET(wrb_params->features, LSO6));
817 SET_TX_WRB_HDR_BITS(lso_mss, hdr, wrb_params->lso_mss);
818
819 /* Hack to skip HW VLAN tagging needs evt = 1, compl = 0. When this
820 * hack is not needed, the evt bit is set while ringing DB.
5f07b3c5 821 */
804abcdb
SB
822 SET_TX_WRB_HDR_BITS(event, hdr,
823 BE_WRB_F_GET(wrb_params->features, VLAN_SKIP_HW));
824 SET_TX_WRB_HDR_BITS(vlan, hdr,
825 BE_WRB_F_GET(wrb_params->features, VLAN));
826 SET_TX_WRB_HDR_BITS(vlan_tag, hdr, wrb_params->vlan_tag);
827
828 SET_TX_WRB_HDR_BITS(num_wrb, hdr, skb_wrb_cnt(skb));
829 SET_TX_WRB_HDR_BITS(len, hdr, skb->len);
760c295e
VD
830 SET_TX_WRB_HDR_BITS(mgmt, hdr,
831 BE_WRB_F_GET(wrb_params->features, OS2BMC));
6b7c5b94
SP
832}
833
2b7bcebf 834static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
748b539a 835 bool unmap_single)
7101e111
SP
836{
837 dma_addr_t dma;
f986afcb 838 u32 frag_len = le32_to_cpu(wrb->frag_len);
7101e111 839
7101e111 840
f986afcb
SP
841 dma = (u64)le32_to_cpu(wrb->frag_pa_hi) << 32 |
842 (u64)le32_to_cpu(wrb->frag_pa_lo);
843 if (frag_len) {
7101e111 844 if (unmap_single)
f986afcb 845 dma_unmap_single(dev, dma, frag_len, DMA_TO_DEVICE);
7101e111 846 else
f986afcb 847 dma_unmap_page(dev, dma, frag_len, DMA_TO_DEVICE);
7101e111
SP
848 }
849}
6b7c5b94 850
79a0d7d8
SB
851/* Grab a WRB header for xmit */
852static u16 be_tx_get_wrb_hdr(struct be_tx_obj *txo)
853{
854 u16 head = txo->q.head;
855
856 queue_head_inc(&txo->q);
857 return head;
858}
859
860/* Set up the WRB header for xmit */
861static void be_tx_setup_wrb_hdr(struct be_adapter *adapter,
862 struct be_tx_obj *txo,
863 struct be_wrb_params *wrb_params,
864 struct sk_buff *skb, u16 head)
865{
866 u32 num_frags = skb_wrb_cnt(skb);
867 struct be_queue_info *txq = &txo->q;
868 struct be_eth_hdr_wrb *hdr = queue_index_node(txq, head);
869
870 wrb_fill_hdr(adapter, hdr, wrb_params, skb);
871 be_dws_cpu_to_le(hdr, sizeof(*hdr));
872
873 BUG_ON(txo->sent_skb_list[head]);
874 txo->sent_skb_list[head] = skb;
875 txo->last_req_hdr = head;
876 atomic_add(num_frags, &txq->used);
877 txo->last_req_wrb_cnt = num_frags;
878 txo->pend_wrb_cnt += num_frags;
879}
880
881/* Setup a WRB fragment (buffer descriptor) for xmit */
882static void be_tx_setup_wrb_frag(struct be_tx_obj *txo, dma_addr_t busaddr,
883 int len)
884{
885 struct be_eth_wrb *wrb;
886 struct be_queue_info *txq = &txo->q;
887
888 wrb = queue_head_node(txq);
889 wrb_fill(wrb, busaddr, len);
890 queue_head_inc(txq);
891}
892
893/* Bring the queue back to the state it was in before be_xmit_enqueue() routine
894 * was invoked. The producer index is restored to the previous packet and the
895 * WRBs of the current packet are unmapped. Invoked to handle tx setup errors.
896 */
897static void be_xmit_restore(struct be_adapter *adapter,
898 struct be_tx_obj *txo, u16 head, bool map_single,
899 u32 copied)
900{
901 struct device *dev;
902 struct be_eth_wrb *wrb;
903 struct be_queue_info *txq = &txo->q;
904
905 dev = &adapter->pdev->dev;
906 txq->head = head;
907
908 /* skip the first wrb (hdr); it's not mapped */
909 queue_head_inc(txq);
910 while (copied) {
911 wrb = queue_head_node(txq);
912 unmap_tx_frag(dev, wrb, map_single);
913 map_single = false;
914 copied -= le32_to_cpu(wrb->frag_len);
915 queue_head_inc(txq);
916 }
917
918 txq->head = head;
919}
920
921/* Enqueue the given packet for transmit. This routine allocates WRBs for the
922 * packet, dma maps the packet buffers and sets up the WRBs. Returns the number
923 * of WRBs used up by the packet.
924 */
5f07b3c5 925static u32 be_xmit_enqueue(struct be_adapter *adapter, struct be_tx_obj *txo,
804abcdb
SB
926 struct sk_buff *skb,
927 struct be_wrb_params *wrb_params)
6b7c5b94 928{
5f07b3c5 929 u32 i, copied = 0, wrb_cnt = skb_wrb_cnt(skb);
2b7bcebf 930 struct device *dev = &adapter->pdev->dev;
5f07b3c5 931 struct be_queue_info *txq = &txo->q;
7101e111 932 bool map_single = false;
5f07b3c5 933 u16 head = txq->head;
79a0d7d8
SB
934 dma_addr_t busaddr;
935 int len;
6b7c5b94 936
79a0d7d8 937 head = be_tx_get_wrb_hdr(txo);
6b7c5b94 938
ebc8d2ab 939 if (skb->len > skb->data_len) {
79a0d7d8 940 len = skb_headlen(skb);
03d28ffe 941
2b7bcebf
IV
942 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
943 if (dma_mapping_error(dev, busaddr))
7101e111
SP
944 goto dma_err;
945 map_single = true;
79a0d7d8 946 be_tx_setup_wrb_frag(txo, busaddr, len);
ebc8d2ab
DM
947 copied += len;
948 }
6b7c5b94 949
ebc8d2ab 950 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
748b539a 951 const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
79a0d7d8 952 len = skb_frag_size(frag);
03d28ffe 953
79a0d7d8 954 busaddr = skb_frag_dma_map(dev, frag, 0, len, DMA_TO_DEVICE);
2b7bcebf 955 if (dma_mapping_error(dev, busaddr))
7101e111 956 goto dma_err;
79a0d7d8
SB
957 be_tx_setup_wrb_frag(txo, busaddr, len);
958 copied += len;
6b7c5b94
SP
959 }
960
79a0d7d8 961 be_tx_setup_wrb_hdr(adapter, txo, wrb_params, skb, head);
6b7c5b94 962
5f07b3c5
SP
963 be_tx_stats_update(txo, skb);
964 return wrb_cnt;
6b7c5b94 965
7101e111 966dma_err:
79a0d7d8
SB
967 adapter->drv_stats.dma_map_errors++;
968 be_xmit_restore(adapter, txo, head, map_single, copied);
7101e111 969 return 0;
6b7c5b94
SP
970}
971
f7062ee5
SP
972static inline int qnq_async_evt_rcvd(struct be_adapter *adapter)
973{
974 return adapter->flags & BE_FLAGS_QNQ_ASYNC_EVT_RCVD;
975}
976
93040ae5 977static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
bc0c3405 978 struct sk_buff *skb,
804abcdb
SB
979 struct be_wrb_params
980 *wrb_params)
93040ae5
SK
981{
982 u16 vlan_tag = 0;
983
984 skb = skb_share_check(skb, GFP_ATOMIC);
985 if (unlikely(!skb))
986 return skb;
987
df8a39de 988 if (skb_vlan_tag_present(skb))
93040ae5 989 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
52fe29e4
SB
990
991 if (qnq_async_evt_rcvd(adapter) && adapter->pvid) {
992 if (!vlan_tag)
993 vlan_tag = adapter->pvid;
994 /* f/w workaround to set skip_hw_vlan = 1, informs the F/W to
995 * skip VLAN insertion
996 */
804abcdb 997 BE_WRB_F_SET(wrb_params->features, VLAN_SKIP_HW, 1);
52fe29e4 998 }
bc0c3405
AK
999
1000 if (vlan_tag) {
62749e2c
JP
1001 skb = vlan_insert_tag_set_proto(skb, htons(ETH_P_8021Q),
1002 vlan_tag);
bc0c3405
AK
1003 if (unlikely(!skb))
1004 return skb;
bc0c3405
AK
1005 skb->vlan_tci = 0;
1006 }
1007
1008 /* Insert the outer VLAN, if any */
1009 if (adapter->qnq_vid) {
1010 vlan_tag = adapter->qnq_vid;
62749e2c
JP
1011 skb = vlan_insert_tag_set_proto(skb, htons(ETH_P_8021Q),
1012 vlan_tag);
bc0c3405
AK
1013 if (unlikely(!skb))
1014 return skb;
804abcdb 1015 BE_WRB_F_SET(wrb_params->features, VLAN_SKIP_HW, 1);
bc0c3405
AK
1016 }
1017
93040ae5
SK
1018 return skb;
1019}
1020
bc0c3405
AK
1021static bool be_ipv6_exthdr_check(struct sk_buff *skb)
1022{
1023 struct ethhdr *eh = (struct ethhdr *)skb->data;
1024 u16 offset = ETH_HLEN;
1025
1026 if (eh->h_proto == htons(ETH_P_IPV6)) {
1027 struct ipv6hdr *ip6h = (struct ipv6hdr *)(skb->data + offset);
1028
1029 offset += sizeof(struct ipv6hdr);
1030 if (ip6h->nexthdr != NEXTHDR_TCP &&
1031 ip6h->nexthdr != NEXTHDR_UDP) {
1032 struct ipv6_opt_hdr *ehdr =
504fbf1e 1033 (struct ipv6_opt_hdr *)(skb->data + offset);
bc0c3405
AK
1034
1035 /* offending pkt: 2nd byte following IPv6 hdr is 0xff */
1036 if (ehdr->hdrlen == 0xff)
1037 return true;
1038 }
1039 }
1040 return false;
1041}
1042
1043static int be_vlan_tag_tx_chk(struct be_adapter *adapter, struct sk_buff *skb)
1044{
df8a39de 1045 return skb_vlan_tag_present(skb) || adapter->pvid || adapter->qnq_vid;
bc0c3405
AK
1046}
1047
748b539a 1048static int be_ipv6_tx_stall_chk(struct be_adapter *adapter, struct sk_buff *skb)
bc0c3405 1049{
ee9c799c 1050 return BE3_chip(adapter) && be_ipv6_exthdr_check(skb);
bc0c3405
AK
1051}
1052
ec495fac
VV
1053static struct sk_buff *be_lancer_xmit_workarounds(struct be_adapter *adapter,
1054 struct sk_buff *skb,
804abcdb
SB
1055 struct be_wrb_params
1056 *wrb_params)
6b7c5b94 1057{
d2cb6ce7 1058 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
ee9c799c
SP
1059 unsigned int eth_hdr_len;
1060 struct iphdr *ip;
93040ae5 1061
1297f9db
AK
1062 /* For padded packets, BE HW modifies tot_len field in IP header
1063 * incorrecly when VLAN tag is inserted by HW.
3904dcc4 1064 * For padded packets, Lancer computes incorrect checksum.
1ded132d 1065 */
ee9c799c
SP
1066 eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
1067 VLAN_ETH_HLEN : ETH_HLEN;
3904dcc4 1068 if (skb->len <= 60 &&
df8a39de 1069 (lancer_chip(adapter) || skb_vlan_tag_present(skb)) &&
ee9c799c 1070 is_ipv4_pkt(skb)) {
93040ae5
SK
1071 ip = (struct iphdr *)ip_hdr(skb);
1072 pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len));
1073 }
1ded132d 1074
d2cb6ce7 1075 /* If vlan tag is already inlined in the packet, skip HW VLAN
f93f160b 1076 * tagging in pvid-tagging mode
d2cb6ce7 1077 */
f93f160b 1078 if (be_pvid_tagging_enabled(adapter) &&
d2cb6ce7 1079 veh->h_vlan_proto == htons(ETH_P_8021Q))
804abcdb 1080 BE_WRB_F_SET(wrb_params->features, VLAN_SKIP_HW, 1);
d2cb6ce7 1081
93040ae5
SK
1082 /* HW has a bug wherein it will calculate CSUM for VLAN
1083 * pkts even though it is disabled.
1084 * Manually insert VLAN in pkt.
1085 */
1086 if (skb->ip_summed != CHECKSUM_PARTIAL &&
df8a39de 1087 skb_vlan_tag_present(skb)) {
804abcdb 1088 skb = be_insert_vlan_in_pkt(adapter, skb, wrb_params);
bc0c3405 1089 if (unlikely(!skb))
c9128951 1090 goto err;
bc0c3405
AK
1091 }
1092
1093 /* HW may lockup when VLAN HW tagging is requested on
1094 * certain ipv6 packets. Drop such pkts if the HW workaround to
1095 * skip HW tagging is not enabled by FW.
1096 */
1097 if (unlikely(be_ipv6_tx_stall_chk(adapter, skb) &&
cd3307aa
KA
1098 (adapter->pvid || adapter->qnq_vid) &&
1099 !qnq_async_evt_rcvd(adapter)))
bc0c3405
AK
1100 goto tx_drop;
1101
1102 /* Manual VLAN tag insertion to prevent:
1103 * ASIC lockup when the ASIC inserts VLAN tag into
1104 * certain ipv6 packets. Insert VLAN tags in driver,
1105 * and set event, completion, vlan bits accordingly
1106 * in the Tx WRB.
1107 */
1108 if (be_ipv6_tx_stall_chk(adapter, skb) &&
1109 be_vlan_tag_tx_chk(adapter, skb)) {
804abcdb 1110 skb = be_insert_vlan_in_pkt(adapter, skb, wrb_params);
1ded132d 1111 if (unlikely(!skb))
c9128951 1112 goto err;
1ded132d
AK
1113 }
1114
ee9c799c
SP
1115 return skb;
1116tx_drop:
1117 dev_kfree_skb_any(skb);
c9128951 1118err:
ee9c799c
SP
1119 return NULL;
1120}
1121
ec495fac
VV
1122static struct sk_buff *be_xmit_workarounds(struct be_adapter *adapter,
1123 struct sk_buff *skb,
804abcdb 1124 struct be_wrb_params *wrb_params)
ec495fac 1125{
8227e990
SR
1126 /* Lancer, SH and BE3 in SRIOV mode have a bug wherein
1127 * packets that are 32b or less may cause a transmit stall
1128 * on that port. The workaround is to pad such packets
1129 * (len <= 32 bytes) to a minimum length of 36b.
ec495fac 1130 */
8227e990 1131 if (skb->len <= 32) {
74b6939d 1132 if (skb_put_padto(skb, 36))
ec495fac 1133 return NULL;
ec495fac
VV
1134 }
1135
1136 if (BEx_chip(adapter) || lancer_chip(adapter)) {
804abcdb 1137 skb = be_lancer_xmit_workarounds(adapter, skb, wrb_params);
ec495fac
VV
1138 if (!skb)
1139 return NULL;
1140 }
1141
1142 return skb;
1143}
1144
5f07b3c5
SP
1145static void be_xmit_flush(struct be_adapter *adapter, struct be_tx_obj *txo)
1146{
1147 struct be_queue_info *txq = &txo->q;
1148 struct be_eth_hdr_wrb *hdr = queue_index_node(txq, txo->last_req_hdr);
1149
1150 /* Mark the last request eventable if it hasn't been marked already */
1151 if (!(hdr->dw[2] & cpu_to_le32(TX_HDR_WRB_EVT)))
1152 hdr->dw[2] |= cpu_to_le32(TX_HDR_WRB_EVT | TX_HDR_WRB_COMPL);
1153
1154 /* compose a dummy wrb if there are odd set of wrbs to notify */
1155 if (!lancer_chip(adapter) && (txo->pend_wrb_cnt & 1)) {
f986afcb 1156 wrb_fill_dummy(queue_head_node(txq));
5f07b3c5
SP
1157 queue_head_inc(txq);
1158 atomic_inc(&txq->used);
1159 txo->pend_wrb_cnt++;
1160 hdr->dw[2] &= ~cpu_to_le32(TX_HDR_WRB_NUM_MASK <<
1161 TX_HDR_WRB_NUM_SHIFT);
1162 hdr->dw[2] |= cpu_to_le32((txo->last_req_wrb_cnt + 1) <<
1163 TX_HDR_WRB_NUM_SHIFT);
1164 }
1165 be_txq_notify(adapter, txo, txo->pend_wrb_cnt);
1166 txo->pend_wrb_cnt = 0;
1167}
1168
760c295e
VD
1169/* OS2BMC related */
1170
1171#define DHCP_CLIENT_PORT 68
1172#define DHCP_SERVER_PORT 67
1173#define NET_BIOS_PORT1 137
1174#define NET_BIOS_PORT2 138
1175#define DHCPV6_RAS_PORT 547
1176
1177#define is_mc_allowed_on_bmc(adapter, eh) \
1178 (!is_multicast_filt_enabled(adapter) && \
1179 is_multicast_ether_addr(eh->h_dest) && \
1180 !is_broadcast_ether_addr(eh->h_dest))
1181
1182#define is_bc_allowed_on_bmc(adapter, eh) \
1183 (!is_broadcast_filt_enabled(adapter) && \
1184 is_broadcast_ether_addr(eh->h_dest))
1185
1186#define is_arp_allowed_on_bmc(adapter, skb) \
1187 (is_arp(skb) && is_arp_filt_enabled(adapter))
1188
1189#define is_broadcast_packet(eh, adapter) \
1190 (is_multicast_ether_addr(eh->h_dest) && \
1191 !compare_ether_addr(eh->h_dest, adapter->netdev->broadcast))
1192
1193#define is_arp(skb) (skb->protocol == htons(ETH_P_ARP))
1194
1195#define is_arp_filt_enabled(adapter) \
1196 (adapter->bmc_filt_mask & (BMC_FILT_BROADCAST_ARP))
1197
1198#define is_dhcp_client_filt_enabled(adapter) \
1199 (adapter->bmc_filt_mask & BMC_FILT_BROADCAST_DHCP_CLIENT)
1200
1201#define is_dhcp_srvr_filt_enabled(adapter) \
1202 (adapter->bmc_filt_mask & BMC_FILT_BROADCAST_DHCP_SERVER)
1203
1204#define is_nbios_filt_enabled(adapter) \
1205 (adapter->bmc_filt_mask & BMC_FILT_BROADCAST_NET_BIOS)
1206
1207#define is_ipv6_na_filt_enabled(adapter) \
1208 (adapter->bmc_filt_mask & \
1209 BMC_FILT_MULTICAST_IPV6_NEIGH_ADVER)
1210
1211#define is_ipv6_ra_filt_enabled(adapter) \
1212 (adapter->bmc_filt_mask & BMC_FILT_MULTICAST_IPV6_RA)
1213
1214#define is_ipv6_ras_filt_enabled(adapter) \
1215 (adapter->bmc_filt_mask & BMC_FILT_MULTICAST_IPV6_RAS)
1216
1217#define is_broadcast_filt_enabled(adapter) \
1218 (adapter->bmc_filt_mask & BMC_FILT_BROADCAST)
1219
1220#define is_multicast_filt_enabled(adapter) \
1221 (adapter->bmc_filt_mask & BMC_FILT_MULTICAST)
1222
1223static bool be_send_pkt_to_bmc(struct be_adapter *adapter,
1224 struct sk_buff **skb)
1225{
1226 struct ethhdr *eh = (struct ethhdr *)(*skb)->data;
1227 bool os2bmc = false;
1228
1229 if (!be_is_os2bmc_enabled(adapter))
1230 goto done;
1231
1232 if (!is_multicast_ether_addr(eh->h_dest))
1233 goto done;
1234
1235 if (is_mc_allowed_on_bmc(adapter, eh) ||
1236 is_bc_allowed_on_bmc(adapter, eh) ||
1237 is_arp_allowed_on_bmc(adapter, (*skb))) {
1238 os2bmc = true;
1239 goto done;
1240 }
1241
1242 if ((*skb)->protocol == htons(ETH_P_IPV6)) {
1243 struct ipv6hdr *hdr = ipv6_hdr((*skb));
1244 u8 nexthdr = hdr->nexthdr;
1245
1246 if (nexthdr == IPPROTO_ICMPV6) {
1247 struct icmp6hdr *icmp6 = icmp6_hdr((*skb));
1248
1249 switch (icmp6->icmp6_type) {
1250 case NDISC_ROUTER_ADVERTISEMENT:
1251 os2bmc = is_ipv6_ra_filt_enabled(adapter);
1252 goto done;
1253 case NDISC_NEIGHBOUR_ADVERTISEMENT:
1254 os2bmc = is_ipv6_na_filt_enabled(adapter);
1255 goto done;
1256 default:
1257 break;
1258 }
1259 }
1260 }
1261
1262 if (is_udp_pkt((*skb))) {
1263 struct udphdr *udp = udp_hdr((*skb));
1264
1645d997 1265 switch (ntohs(udp->dest)) {
760c295e
VD
1266 case DHCP_CLIENT_PORT:
1267 os2bmc = is_dhcp_client_filt_enabled(adapter);
1268 goto done;
1269 case DHCP_SERVER_PORT:
1270 os2bmc = is_dhcp_srvr_filt_enabled(adapter);
1271 goto done;
1272 case NET_BIOS_PORT1:
1273 case NET_BIOS_PORT2:
1274 os2bmc = is_nbios_filt_enabled(adapter);
1275 goto done;
1276 case DHCPV6_RAS_PORT:
1277 os2bmc = is_ipv6_ras_filt_enabled(adapter);
1278 goto done;
1279 default:
1280 break;
1281 }
1282 }
1283done:
1284 /* For packets over a vlan, which are destined
1285 * to BMC, asic expects the vlan to be inline in the packet.
1286 */
1287 if (os2bmc)
1288 *skb = be_insert_vlan_in_pkt(adapter, *skb, NULL);
1289
1290 return os2bmc;
1291}
1292
ee9c799c
SP
1293static netdev_tx_t be_xmit(struct sk_buff *skb, struct net_device *netdev)
1294{
1295 struct be_adapter *adapter = netdev_priv(netdev);
5f07b3c5
SP
1296 u16 q_idx = skb_get_queue_mapping(skb);
1297 struct be_tx_obj *txo = &adapter->tx_obj[q_idx];
804abcdb 1298 struct be_wrb_params wrb_params = { 0 };
804abcdb 1299 bool flush = !skb->xmit_more;
5f07b3c5 1300 u16 wrb_cnt;
ee9c799c 1301
804abcdb 1302 skb = be_xmit_workarounds(adapter, skb, &wrb_params);
5f07b3c5
SP
1303 if (unlikely(!skb))
1304 goto drop;
6b7c5b94 1305
804abcdb
SB
1306 be_get_wrb_params_from_skb(adapter, skb, &wrb_params);
1307
1308 wrb_cnt = be_xmit_enqueue(adapter, txo, skb, &wrb_params);
5f07b3c5
SP
1309 if (unlikely(!wrb_cnt)) {
1310 dev_kfree_skb_any(skb);
1311 goto drop;
1312 }
cd8f76c0 1313
760c295e
VD
1314 /* if os2bmc is enabled and if the pkt is destined to bmc,
1315 * enqueue the pkt a 2nd time with mgmt bit set.
1316 */
1317 if (be_send_pkt_to_bmc(adapter, &skb)) {
1318 BE_WRB_F_SET(wrb_params.features, OS2BMC, 1);
1319 wrb_cnt = be_xmit_enqueue(adapter, txo, skb, &wrb_params);
1320 if (unlikely(!wrb_cnt))
1321 goto drop;
1322 else
1323 skb_get(skb);
1324 }
1325
cf5671e6 1326 if (be_is_txq_full(txo)) {
5f07b3c5
SP
1327 netif_stop_subqueue(netdev, q_idx);
1328 tx_stats(txo)->tx_stops++;
1329 }
c190e3c8 1330
5f07b3c5
SP
1331 if (flush || __netif_subqueue_stopped(netdev, q_idx))
1332 be_xmit_flush(adapter, txo);
6b7c5b94 1333
5f07b3c5
SP
1334 return NETDEV_TX_OK;
1335drop:
1336 tx_stats(txo)->tx_drv_drops++;
1337 /* Flush the already enqueued tx requests */
1338 if (flush && txo->pend_wrb_cnt)
1339 be_xmit_flush(adapter, txo);
6b7c5b94 1340
6b7c5b94
SP
1341 return NETDEV_TX_OK;
1342}
1343
1344static int be_change_mtu(struct net_device *netdev, int new_mtu)
1345{
1346 struct be_adapter *adapter = netdev_priv(netdev);
0d3f5cce
KA
1347 struct device *dev = &adapter->pdev->dev;
1348
1349 if (new_mtu < BE_MIN_MTU || new_mtu > BE_MAX_MTU) {
1350 dev_info(dev, "MTU must be between %d and %d bytes\n",
1351 BE_MIN_MTU, BE_MAX_MTU);
6b7c5b94
SP
1352 return -EINVAL;
1353 }
0d3f5cce
KA
1354
1355 dev_info(dev, "MTU changed from %d to %d bytes\n",
748b539a 1356 netdev->mtu, new_mtu);
6b7c5b94
SP
1357 netdev->mtu = new_mtu;
1358 return 0;
1359}
1360
f66b7cfd
SP
1361static inline bool be_in_all_promisc(struct be_adapter *adapter)
1362{
1363 return (adapter->if_flags & BE_IF_FLAGS_ALL_PROMISCUOUS) ==
1364 BE_IF_FLAGS_ALL_PROMISCUOUS;
1365}
1366
1367static int be_set_vlan_promisc(struct be_adapter *adapter)
1368{
1369 struct device *dev = &adapter->pdev->dev;
1370 int status;
1371
1372 if (adapter->if_flags & BE_IF_FLAGS_VLAN_PROMISCUOUS)
1373 return 0;
1374
1375 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_VLAN_PROMISCUOUS, ON);
1376 if (!status) {
1377 dev_info(dev, "Enabled VLAN promiscuous mode\n");
1378 adapter->if_flags |= BE_IF_FLAGS_VLAN_PROMISCUOUS;
1379 } else {
1380 dev_err(dev, "Failed to enable VLAN promiscuous mode\n");
1381 }
1382 return status;
1383}
1384
1385static int be_clear_vlan_promisc(struct be_adapter *adapter)
1386{
1387 struct device *dev = &adapter->pdev->dev;
1388 int status;
1389
1390 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_VLAN_PROMISCUOUS, OFF);
1391 if (!status) {
1392 dev_info(dev, "Disabling VLAN promiscuous mode\n");
1393 adapter->if_flags &= ~BE_IF_FLAGS_VLAN_PROMISCUOUS;
1394 }
1395 return status;
1396}
1397
6b7c5b94 1398/*
82903e4b
AK
1399 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
1400 * If the user configures more, place BE in vlan promiscuous mode.
6b7c5b94 1401 */
10329df8 1402static int be_vid_config(struct be_adapter *adapter)
6b7c5b94 1403{
50762667 1404 struct device *dev = &adapter->pdev->dev;
10329df8 1405 u16 vids[BE_NUM_VLANS_SUPPORTED];
f6cbd364 1406 u16 num = 0, i = 0;
82903e4b 1407 int status = 0;
1da87b7f 1408
c0e64ef4 1409 /* No need to further configure vids if in promiscuous mode */
f66b7cfd 1410 if (be_in_all_promisc(adapter))
c0e64ef4
SP
1411 return 0;
1412
92bf14ab 1413 if (adapter->vlans_added > be_max_vlans(adapter))
f66b7cfd 1414 return be_set_vlan_promisc(adapter);
0fc16ebf
PR
1415
1416 /* Construct VLAN Table to give to HW */
f6cbd364
RN
1417 for_each_set_bit(i, adapter->vids, VLAN_N_VID)
1418 vids[num++] = cpu_to_le16(i);
0fc16ebf 1419
435452aa 1420 status = be_cmd_vlan_config(adapter, adapter->if_handle, vids, num, 0);
0fc16ebf 1421 if (status) {
f66b7cfd 1422 dev_err(dev, "Setting HW VLAN filtering failed\n");
d9d604f8 1423 /* Set to VLAN promisc mode as setting VLAN filter failed */
77be8c1c
KA
1424 if (addl_status(status) == MCC_ADDL_STATUS_INSUFFICIENT_VLANS ||
1425 addl_status(status) ==
4c60005f 1426 MCC_ADDL_STATUS_INSUFFICIENT_RESOURCES)
f66b7cfd
SP
1427 return be_set_vlan_promisc(adapter);
1428 } else if (adapter->if_flags & BE_IF_FLAGS_VLAN_PROMISCUOUS) {
1429 status = be_clear_vlan_promisc(adapter);
6b7c5b94 1430 }
0fc16ebf 1431 return status;
6b7c5b94
SP
1432}
1433
80d5c368 1434static int be_vlan_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
6b7c5b94
SP
1435{
1436 struct be_adapter *adapter = netdev_priv(netdev);
80817cbf 1437 int status = 0;
6b7c5b94 1438
a85e9986
PR
1439 /* Packets with VID 0 are always received by Lancer by default */
1440 if (lancer_chip(adapter) && vid == 0)
48291c22
VV
1441 return status;
1442
f6cbd364 1443 if (test_bit(vid, adapter->vids))
48291c22 1444 return status;
a85e9986 1445
f6cbd364 1446 set_bit(vid, adapter->vids);
a6b74e01 1447 adapter->vlans_added++;
8e586137 1448
a6b74e01
SK
1449 status = be_vid_config(adapter);
1450 if (status) {
1451 adapter->vlans_added--;
f6cbd364 1452 clear_bit(vid, adapter->vids);
a6b74e01 1453 }
48291c22 1454
80817cbf 1455 return status;
6b7c5b94
SP
1456}
1457
80d5c368 1458static int be_vlan_rem_vid(struct net_device *netdev, __be16 proto, u16 vid)
6b7c5b94
SP
1459{
1460 struct be_adapter *adapter = netdev_priv(netdev);
1461
a85e9986
PR
1462 /* Packets with VID 0 are always received by Lancer by default */
1463 if (lancer_chip(adapter) && vid == 0)
9d4dfe4a 1464 return 0;
a85e9986 1465
f6cbd364 1466 clear_bit(vid, adapter->vids);
9d4dfe4a
KA
1467 adapter->vlans_added--;
1468
1469 return be_vid_config(adapter);
6b7c5b94
SP
1470}
1471
f66b7cfd 1472static void be_clear_all_promisc(struct be_adapter *adapter)
7ad09458 1473{
ac34b743 1474 be_cmd_rx_filter(adapter, BE_IF_FLAGS_ALL_PROMISCUOUS, OFF);
f66b7cfd 1475 adapter->if_flags &= ~BE_IF_FLAGS_ALL_PROMISCUOUS;
7ad09458
S
1476}
1477
f66b7cfd
SP
1478static void be_set_all_promisc(struct be_adapter *adapter)
1479{
1480 be_cmd_rx_filter(adapter, BE_IF_FLAGS_ALL_PROMISCUOUS, ON);
1481 adapter->if_flags |= BE_IF_FLAGS_ALL_PROMISCUOUS;
1482}
1483
1484static void be_set_mc_promisc(struct be_adapter *adapter)
6b7c5b94 1485{
0fc16ebf 1486 int status;
6b7c5b94 1487
f66b7cfd
SP
1488 if (adapter->if_flags & BE_IF_FLAGS_MCAST_PROMISCUOUS)
1489 return;
6b7c5b94 1490
f66b7cfd
SP
1491 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_MCAST_PROMISCUOUS, ON);
1492 if (!status)
1493 adapter->if_flags |= BE_IF_FLAGS_MCAST_PROMISCUOUS;
1494}
1495
1496static void be_set_mc_list(struct be_adapter *adapter)
1497{
1498 int status;
1499
1500 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_MULTICAST, ON);
1501 if (!status)
1502 adapter->if_flags &= ~BE_IF_FLAGS_MCAST_PROMISCUOUS;
1503 else
1504 be_set_mc_promisc(adapter);
1505}
1506
1507static void be_set_uc_list(struct be_adapter *adapter)
1508{
1509 struct netdev_hw_addr *ha;
1510 int i = 1; /* First slot is claimed by the Primary MAC */
1511
1512 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++)
1513 be_cmd_pmac_del(adapter, adapter->if_handle,
1514 adapter->pmac_id[i], 0);
1515
1516 if (netdev_uc_count(adapter->netdev) > be_max_uc(adapter)) {
1517 be_set_all_promisc(adapter);
1518 return;
6b7c5b94
SP
1519 }
1520
f66b7cfd
SP
1521 netdev_for_each_uc_addr(ha, adapter->netdev) {
1522 adapter->uc_macs++; /* First slot is for Primary MAC */
1523 be_cmd_pmac_add(adapter, (u8 *)ha->addr, adapter->if_handle,
1524 &adapter->pmac_id[adapter->uc_macs], 0);
1525 }
1526}
6b7c5b94 1527
f66b7cfd
SP
1528static void be_clear_uc_list(struct be_adapter *adapter)
1529{
1530 int i;
fbc13f01 1531
f66b7cfd
SP
1532 for (i = 1; i < (adapter->uc_macs + 1); i++)
1533 be_cmd_pmac_del(adapter, adapter->if_handle,
1534 adapter->pmac_id[i], 0);
1535 adapter->uc_macs = 0;
1536}
fbc13f01 1537
f66b7cfd
SP
1538static void be_set_rx_mode(struct net_device *netdev)
1539{
1540 struct be_adapter *adapter = netdev_priv(netdev);
fbc13f01 1541
f66b7cfd
SP
1542 if (netdev->flags & IFF_PROMISC) {
1543 be_set_all_promisc(adapter);
1544 return;
fbc13f01
AK
1545 }
1546
f66b7cfd
SP
1547 /* Interface was previously in promiscuous mode; disable it */
1548 if (be_in_all_promisc(adapter)) {
1549 be_clear_all_promisc(adapter);
1550 if (adapter->vlans_added)
1551 be_vid_config(adapter);
0fc16ebf 1552 }
a0794885 1553
f66b7cfd
SP
1554 /* Enable multicast promisc if num configured exceeds what we support */
1555 if (netdev->flags & IFF_ALLMULTI ||
1556 netdev_mc_count(netdev) > be_max_mc(adapter)) {
1557 be_set_mc_promisc(adapter);
a0794885 1558 return;
f66b7cfd 1559 }
a0794885 1560
f66b7cfd
SP
1561 if (netdev_uc_count(netdev) != adapter->uc_macs)
1562 be_set_uc_list(adapter);
1563
1564 be_set_mc_list(adapter);
6b7c5b94
SP
1565}
1566
ba343c77
SB
1567static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
1568{
1569 struct be_adapter *adapter = netdev_priv(netdev);
11ac75ed 1570 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
ba343c77
SB
1571 int status;
1572
11ac75ed 1573 if (!sriov_enabled(adapter))
ba343c77
SB
1574 return -EPERM;
1575
11ac75ed 1576 if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
ba343c77
SB
1577 return -EINVAL;
1578
3c31aaf3
VV
1579 /* Proceed further only if user provided MAC is different
1580 * from active MAC
1581 */
1582 if (ether_addr_equal(mac, vf_cfg->mac_addr))
1583 return 0;
1584
3175d8c2
SP
1585 if (BEx_chip(adapter)) {
1586 be_cmd_pmac_del(adapter, vf_cfg->if_handle, vf_cfg->pmac_id,
1587 vf + 1);
ba343c77 1588
11ac75ed
SP
1589 status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
1590 &vf_cfg->pmac_id, vf + 1);
3175d8c2
SP
1591 } else {
1592 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
1593 vf + 1);
590c391d
PR
1594 }
1595
abccf23e
KA
1596 if (status) {
1597 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed: %#x",
1598 mac, vf, status);
1599 return be_cmd_status(status);
1600 }
64600ea5 1601
abccf23e
KA
1602 ether_addr_copy(vf_cfg->mac_addr, mac);
1603
1604 return 0;
ba343c77
SB
1605}
1606
64600ea5 1607static int be_get_vf_config(struct net_device *netdev, int vf,
748b539a 1608 struct ifla_vf_info *vi)
64600ea5
AK
1609{
1610 struct be_adapter *adapter = netdev_priv(netdev);
11ac75ed 1611 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
64600ea5 1612
11ac75ed 1613 if (!sriov_enabled(adapter))
64600ea5
AK
1614 return -EPERM;
1615
11ac75ed 1616 if (vf >= adapter->num_vfs)
64600ea5
AK
1617 return -EINVAL;
1618
1619 vi->vf = vf;
ed616689
SC
1620 vi->max_tx_rate = vf_cfg->tx_rate;
1621 vi->min_tx_rate = 0;
a60b3a13
AK
1622 vi->vlan = vf_cfg->vlan_tag & VLAN_VID_MASK;
1623 vi->qos = vf_cfg->vlan_tag >> VLAN_PRIO_SHIFT;
11ac75ed 1624 memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
bdce2ad7 1625 vi->linkstate = adapter->vf_cfg[vf].plink_tracking;
e7bcbd7b 1626 vi->spoofchk = adapter->vf_cfg[vf].spoofchk;
64600ea5
AK
1627
1628 return 0;
1629}
1630
435452aa
VV
1631static int be_set_vf_tvt(struct be_adapter *adapter, int vf, u16 vlan)
1632{
1633 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1634 u16 vids[BE_NUM_VLANS_SUPPORTED];
1635 int vf_if_id = vf_cfg->if_handle;
1636 int status;
1637
1638 /* Enable Transparent VLAN Tagging */
e7bcbd7b 1639 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1, vf_if_id, 0, 0);
435452aa
VV
1640 if (status)
1641 return status;
1642
1643 /* Clear pre-programmed VLAN filters on VF if any, if TVT is enabled */
1644 vids[0] = 0;
1645 status = be_cmd_vlan_config(adapter, vf_if_id, vids, 1, vf + 1);
1646 if (!status)
1647 dev_info(&adapter->pdev->dev,
1648 "Cleared guest VLANs on VF%d", vf);
1649
1650 /* After TVT is enabled, disallow VFs to program VLAN filters */
1651 if (vf_cfg->privileges & BE_PRIV_FILTMGMT) {
1652 status = be_cmd_set_fn_privileges(adapter, vf_cfg->privileges &
1653 ~BE_PRIV_FILTMGMT, vf + 1);
1654 if (!status)
1655 vf_cfg->privileges &= ~BE_PRIV_FILTMGMT;
1656 }
1657 return 0;
1658}
1659
1660static int be_clear_vf_tvt(struct be_adapter *adapter, int vf)
1661{
1662 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1663 struct device *dev = &adapter->pdev->dev;
1664 int status;
1665
1666 /* Reset Transparent VLAN Tagging. */
1667 status = be_cmd_set_hsw_config(adapter, BE_RESET_VLAN_TAG_ID, vf + 1,
e7bcbd7b 1668 vf_cfg->if_handle, 0, 0);
435452aa
VV
1669 if (status)
1670 return status;
1671
1672 /* Allow VFs to program VLAN filtering */
1673 if (!(vf_cfg->privileges & BE_PRIV_FILTMGMT)) {
1674 status = be_cmd_set_fn_privileges(adapter, vf_cfg->privileges |
1675 BE_PRIV_FILTMGMT, vf + 1);
1676 if (!status) {
1677 vf_cfg->privileges |= BE_PRIV_FILTMGMT;
1678 dev_info(dev, "VF%d: FILTMGMT priv enabled", vf);
1679 }
1680 }
1681
1682 dev_info(dev,
1683 "Disable/re-enable i/f in VM to clear Transparent VLAN tag");
1684 return 0;
1685}
1686
748b539a 1687static int be_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos)
1da87b7f
AK
1688{
1689 struct be_adapter *adapter = netdev_priv(netdev);
b9fc0e53 1690 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
435452aa 1691 int status;
1da87b7f 1692
11ac75ed 1693 if (!sriov_enabled(adapter))
1da87b7f
AK
1694 return -EPERM;
1695
b9fc0e53 1696 if (vf >= adapter->num_vfs || vlan > 4095 || qos > 7)
1da87b7f
AK
1697 return -EINVAL;
1698
b9fc0e53
AK
1699 if (vlan || qos) {
1700 vlan |= qos << VLAN_PRIO_SHIFT;
435452aa 1701 status = be_set_vf_tvt(adapter, vf, vlan);
1da87b7f 1702 } else {
435452aa 1703 status = be_clear_vf_tvt(adapter, vf);
1da87b7f
AK
1704 }
1705
abccf23e
KA
1706 if (status) {
1707 dev_err(&adapter->pdev->dev,
435452aa
VV
1708 "VLAN %d config on VF %d failed : %#x\n", vlan, vf,
1709 status);
abccf23e
KA
1710 return be_cmd_status(status);
1711 }
1712
1713 vf_cfg->vlan_tag = vlan;
abccf23e 1714 return 0;
1da87b7f
AK
1715}
1716
ed616689
SC
1717static int be_set_vf_tx_rate(struct net_device *netdev, int vf,
1718 int min_tx_rate, int max_tx_rate)
e1d18735
AK
1719{
1720 struct be_adapter *adapter = netdev_priv(netdev);
0f77ba73
RN
1721 struct device *dev = &adapter->pdev->dev;
1722 int percent_rate, status = 0;
1723 u16 link_speed = 0;
1724 u8 link_status;
e1d18735 1725
11ac75ed 1726 if (!sriov_enabled(adapter))
e1d18735
AK
1727 return -EPERM;
1728
94f434c2 1729 if (vf >= adapter->num_vfs)
e1d18735
AK
1730 return -EINVAL;
1731
ed616689
SC
1732 if (min_tx_rate)
1733 return -EINVAL;
1734
0f77ba73
RN
1735 if (!max_tx_rate)
1736 goto config_qos;
1737
1738 status = be_cmd_link_status_query(adapter, &link_speed,
1739 &link_status, 0);
1740 if (status)
1741 goto err;
1742
1743 if (!link_status) {
1744 dev_err(dev, "TX-rate setting not allowed when link is down\n");
940a3fcd 1745 status = -ENETDOWN;
0f77ba73
RN
1746 goto err;
1747 }
1748
1749 if (max_tx_rate < 100 || max_tx_rate > link_speed) {
1750 dev_err(dev, "TX-rate must be between 100 and %d Mbps\n",
1751 link_speed);
1752 status = -EINVAL;
1753 goto err;
1754 }
1755
1756 /* On Skyhawk the QOS setting must be done only as a % value */
1757 percent_rate = link_speed / 100;
1758 if (skyhawk_chip(adapter) && (max_tx_rate % percent_rate)) {
1759 dev_err(dev, "TX-rate must be a multiple of %d Mbps\n",
1760 percent_rate);
1761 status = -EINVAL;
1762 goto err;
94f434c2 1763 }
e1d18735 1764
0f77ba73
RN
1765config_qos:
1766 status = be_cmd_config_qos(adapter, max_tx_rate, link_speed, vf + 1);
e1d18735 1767 if (status)
0f77ba73
RN
1768 goto err;
1769
1770 adapter->vf_cfg[vf].tx_rate = max_tx_rate;
1771 return 0;
1772
1773err:
1774 dev_err(dev, "TX-rate setting of %dMbps on VF%d failed\n",
1775 max_tx_rate, vf);
abccf23e 1776 return be_cmd_status(status);
e1d18735 1777}
e2fb1afa 1778
bdce2ad7
SR
1779static int be_set_vf_link_state(struct net_device *netdev, int vf,
1780 int link_state)
1781{
1782 struct be_adapter *adapter = netdev_priv(netdev);
1783 int status;
1784
1785 if (!sriov_enabled(adapter))
1786 return -EPERM;
1787
1788 if (vf >= adapter->num_vfs)
1789 return -EINVAL;
1790
1791 status = be_cmd_set_logical_link_config(adapter, link_state, vf+1);
abccf23e
KA
1792 if (status) {
1793 dev_err(&adapter->pdev->dev,
1794 "Link state change on VF %d failed: %#x\n", vf, status);
1795 return be_cmd_status(status);
1796 }
bdce2ad7 1797
abccf23e
KA
1798 adapter->vf_cfg[vf].plink_tracking = link_state;
1799
1800 return 0;
bdce2ad7 1801}
e1d18735 1802
e7bcbd7b
KA
1803static int be_set_vf_spoofchk(struct net_device *netdev, int vf, bool enable)
1804{
1805 struct be_adapter *adapter = netdev_priv(netdev);
1806 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1807 u8 spoofchk;
1808 int status;
1809
1810 if (!sriov_enabled(adapter))
1811 return -EPERM;
1812
1813 if (vf >= adapter->num_vfs)
1814 return -EINVAL;
1815
1816 if (BEx_chip(adapter))
1817 return -EOPNOTSUPP;
1818
1819 if (enable == vf_cfg->spoofchk)
1820 return 0;
1821
1822 spoofchk = enable ? ENABLE_MAC_SPOOFCHK : DISABLE_MAC_SPOOFCHK;
1823
1824 status = be_cmd_set_hsw_config(adapter, 0, vf + 1, vf_cfg->if_handle,
1825 0, spoofchk);
1826 if (status) {
1827 dev_err(&adapter->pdev->dev,
1828 "Spoofchk change on VF %d failed: %#x\n", vf, status);
1829 return be_cmd_status(status);
1830 }
1831
1832 vf_cfg->spoofchk = enable;
1833 return 0;
1834}
1835
2632bafd
SP
1836static void be_aic_update(struct be_aic_obj *aic, u64 rx_pkts, u64 tx_pkts,
1837 ulong now)
6b7c5b94 1838{
2632bafd
SP
1839 aic->rx_pkts_prev = rx_pkts;
1840 aic->tx_reqs_prev = tx_pkts;
1841 aic->jiffies = now;
1842}
ac124ff9 1843
20947770 1844static int be_get_new_eqd(struct be_eq_obj *eqo)
2632bafd 1845{
20947770
PR
1846 struct be_adapter *adapter = eqo->adapter;
1847 int eqd, start;
2632bafd 1848 struct be_aic_obj *aic;
2632bafd
SP
1849 struct be_rx_obj *rxo;
1850 struct be_tx_obj *txo;
20947770 1851 u64 rx_pkts = 0, tx_pkts = 0;
2632bafd
SP
1852 ulong now;
1853 u32 pps, delta;
20947770 1854 int i;
10ef9ab4 1855
20947770
PR
1856 aic = &adapter->aic_obj[eqo->idx];
1857 if (!aic->enable) {
1858 if (aic->jiffies)
1859 aic->jiffies = 0;
1860 eqd = aic->et_eqd;
1861 return eqd;
1862 }
6b7c5b94 1863
20947770 1864 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
2632bafd 1865 do {
57a7744e 1866 start = u64_stats_fetch_begin_irq(&rxo->stats.sync);
20947770 1867 rx_pkts += rxo->stats.rx_pkts;
57a7744e 1868 } while (u64_stats_fetch_retry_irq(&rxo->stats.sync, start));
20947770 1869 }
10ef9ab4 1870
20947770 1871 for_all_tx_queues_on_eq(adapter, eqo, txo, i) {
2632bafd 1872 do {
57a7744e 1873 start = u64_stats_fetch_begin_irq(&txo->stats.sync);
20947770 1874 tx_pkts += txo->stats.tx_reqs;
57a7744e 1875 } while (u64_stats_fetch_retry_irq(&txo->stats.sync, start));
20947770 1876 }
6b7c5b94 1877
20947770
PR
1878 /* Skip, if wrapped around or first calculation */
1879 now = jiffies;
1880 if (!aic->jiffies || time_before(now, aic->jiffies) ||
1881 rx_pkts < aic->rx_pkts_prev ||
1882 tx_pkts < aic->tx_reqs_prev) {
1883 be_aic_update(aic, rx_pkts, tx_pkts, now);
1884 return aic->prev_eqd;
1885 }
2632bafd 1886
20947770
PR
1887 delta = jiffies_to_msecs(now - aic->jiffies);
1888 if (delta == 0)
1889 return aic->prev_eqd;
10ef9ab4 1890
20947770
PR
1891 pps = (((u32)(rx_pkts - aic->rx_pkts_prev) * 1000) / delta) +
1892 (((u32)(tx_pkts - aic->tx_reqs_prev) * 1000) / delta);
1893 eqd = (pps / 15000) << 2;
2632bafd 1894
20947770
PR
1895 if (eqd < 8)
1896 eqd = 0;
1897 eqd = min_t(u32, eqd, aic->max_eqd);
1898 eqd = max_t(u32, eqd, aic->min_eqd);
1899
1900 be_aic_update(aic, rx_pkts, tx_pkts, now);
1901
1902 return eqd;
1903}
1904
1905/* For Skyhawk-R only */
1906static u32 be_get_eq_delay_mult_enc(struct be_eq_obj *eqo)
1907{
1908 struct be_adapter *adapter = eqo->adapter;
1909 struct be_aic_obj *aic = &adapter->aic_obj[eqo->idx];
1910 ulong now = jiffies;
1911 int eqd;
1912 u32 mult_enc;
1913
1914 if (!aic->enable)
1915 return 0;
1916
1917 if (time_before_eq(now, aic->jiffies) ||
1918 jiffies_to_msecs(now - aic->jiffies) < 1)
1919 eqd = aic->prev_eqd;
1920 else
1921 eqd = be_get_new_eqd(eqo);
1922
1923 if (eqd > 100)
1924 mult_enc = R2I_DLY_ENC_1;
1925 else if (eqd > 60)
1926 mult_enc = R2I_DLY_ENC_2;
1927 else if (eqd > 20)
1928 mult_enc = R2I_DLY_ENC_3;
1929 else
1930 mult_enc = R2I_DLY_ENC_0;
1931
1932 aic->prev_eqd = eqd;
1933
1934 return mult_enc;
1935}
1936
1937void be_eqd_update(struct be_adapter *adapter, bool force_update)
1938{
1939 struct be_set_eqd set_eqd[MAX_EVT_QS];
1940 struct be_aic_obj *aic;
1941 struct be_eq_obj *eqo;
1942 int i, num = 0, eqd;
1943
1944 for_all_evt_queues(adapter, eqo, i) {
1945 aic = &adapter->aic_obj[eqo->idx];
1946 eqd = be_get_new_eqd(eqo);
1947 if (force_update || eqd != aic->prev_eqd) {
2632bafd
SP
1948 set_eqd[num].delay_multiplier = (eqd * 65)/100;
1949 set_eqd[num].eq_id = eqo->q.id;
1950 aic->prev_eqd = eqd;
1951 num++;
1952 }
ac124ff9 1953 }
2632bafd
SP
1954
1955 if (num)
1956 be_cmd_modify_eqd(adapter, set_eqd, num);
6b7c5b94
SP
1957}
1958
3abcdeda 1959static void be_rx_stats_update(struct be_rx_obj *rxo,
748b539a 1960 struct be_rx_compl_info *rxcp)
4097f663 1961{
ac124ff9 1962 struct be_rx_stats *stats = rx_stats(rxo);
1ef78abe 1963
ab1594e9 1964 u64_stats_update_begin(&stats->sync);
3abcdeda 1965 stats->rx_compl++;
2e588f84 1966 stats->rx_bytes += rxcp->pkt_size;
3abcdeda 1967 stats->rx_pkts++;
8670f2a5
SB
1968 if (rxcp->tunneled)
1969 stats->rx_vxlan_offload_pkts++;
2e588f84 1970 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
3abcdeda 1971 stats->rx_mcast_pkts++;
2e588f84 1972 if (rxcp->err)
ac124ff9 1973 stats->rx_compl_err++;
ab1594e9 1974 u64_stats_update_end(&stats->sync);
4097f663
SP
1975}
1976
2e588f84 1977static inline bool csum_passed(struct be_rx_compl_info *rxcp)
728a9972 1978{
19fad86f 1979 /* L4 checksum is not reliable for non TCP/UDP packets.
c9c47142
SP
1980 * Also ignore ipcksm for ipv6 pkts
1981 */
2e588f84 1982 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
c9c47142 1983 (rxcp->ip_csum || rxcp->ipv6) && !rxcp->err;
728a9972
AK
1984}
1985
0b0ef1d0 1986static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo)
6b7c5b94 1987{
10ef9ab4 1988 struct be_adapter *adapter = rxo->adapter;
6b7c5b94 1989 struct be_rx_page_info *rx_page_info;
3abcdeda 1990 struct be_queue_info *rxq = &rxo->q;
0b0ef1d0 1991 u16 frag_idx = rxq->tail;
6b7c5b94 1992
3abcdeda 1993 rx_page_info = &rxo->page_info_tbl[frag_idx];
6b7c5b94
SP
1994 BUG_ON(!rx_page_info->page);
1995
e50287be 1996 if (rx_page_info->last_frag) {
2b7bcebf
IV
1997 dma_unmap_page(&adapter->pdev->dev,
1998 dma_unmap_addr(rx_page_info, bus),
1999 adapter->big_page_size, DMA_FROM_DEVICE);
e50287be
SP
2000 rx_page_info->last_frag = false;
2001 } else {
2002 dma_sync_single_for_cpu(&adapter->pdev->dev,
2003 dma_unmap_addr(rx_page_info, bus),
2004 rx_frag_size, DMA_FROM_DEVICE);
205859a2 2005 }
6b7c5b94 2006
0b0ef1d0 2007 queue_tail_inc(rxq);
6b7c5b94
SP
2008 atomic_dec(&rxq->used);
2009 return rx_page_info;
2010}
2011
2012/* Throwaway the data in the Rx completion */
10ef9ab4
SP
2013static void be_rx_compl_discard(struct be_rx_obj *rxo,
2014 struct be_rx_compl_info *rxcp)
6b7c5b94 2015{
6b7c5b94 2016 struct be_rx_page_info *page_info;
2e588f84 2017 u16 i, num_rcvd = rxcp->num_rcvd;
6b7c5b94 2018
e80d9da6 2019 for (i = 0; i < num_rcvd; i++) {
0b0ef1d0 2020 page_info = get_rx_page_info(rxo);
e80d9da6
PR
2021 put_page(page_info->page);
2022 memset(page_info, 0, sizeof(*page_info));
6b7c5b94
SP
2023 }
2024}
2025
2026/*
2027 * skb_fill_rx_data forms a complete skb for an ether frame
2028 * indicated by rxcp.
2029 */
10ef9ab4
SP
2030static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
2031 struct be_rx_compl_info *rxcp)
6b7c5b94 2032{
6b7c5b94 2033 struct be_rx_page_info *page_info;
2e588f84
SP
2034 u16 i, j;
2035 u16 hdr_len, curr_frag_len, remaining;
6b7c5b94 2036 u8 *start;
6b7c5b94 2037
0b0ef1d0 2038 page_info = get_rx_page_info(rxo);
6b7c5b94
SP
2039 start = page_address(page_info->page) + page_info->page_offset;
2040 prefetch(start);
2041
2042 /* Copy data in the first descriptor of this completion */
2e588f84 2043 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
6b7c5b94 2044
6b7c5b94
SP
2045 skb->len = curr_frag_len;
2046 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
ac1ae5f3 2047 memcpy(skb->data, start, curr_frag_len);
6b7c5b94
SP
2048 /* Complete packet has now been moved to data */
2049 put_page(page_info->page);
2050 skb->data_len = 0;
2051 skb->tail += curr_frag_len;
2052 } else {
ac1ae5f3
ED
2053 hdr_len = ETH_HLEN;
2054 memcpy(skb->data, start, hdr_len);
6b7c5b94 2055 skb_shinfo(skb)->nr_frags = 1;
b061b39e 2056 skb_frag_set_page(skb, 0, page_info->page);
6b7c5b94
SP
2057 skb_shinfo(skb)->frags[0].page_offset =
2058 page_info->page_offset + hdr_len;
748b539a
SP
2059 skb_frag_size_set(&skb_shinfo(skb)->frags[0],
2060 curr_frag_len - hdr_len);
6b7c5b94 2061 skb->data_len = curr_frag_len - hdr_len;
bdb28a97 2062 skb->truesize += rx_frag_size;
6b7c5b94
SP
2063 skb->tail += hdr_len;
2064 }
205859a2 2065 page_info->page = NULL;
6b7c5b94 2066
2e588f84
SP
2067 if (rxcp->pkt_size <= rx_frag_size) {
2068 BUG_ON(rxcp->num_rcvd != 1);
2069 return;
6b7c5b94
SP
2070 }
2071
2072 /* More frags present for this completion */
2e588f84
SP
2073 remaining = rxcp->pkt_size - curr_frag_len;
2074 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
0b0ef1d0 2075 page_info = get_rx_page_info(rxo);
2e588f84 2076 curr_frag_len = min(remaining, rx_frag_size);
6b7c5b94 2077
bd46cb6c
AK
2078 /* Coalesce all frags from the same physical page in one slot */
2079 if (page_info->page_offset == 0) {
2080 /* Fresh page */
2081 j++;
b061b39e 2082 skb_frag_set_page(skb, j, page_info->page);
bd46cb6c
AK
2083 skb_shinfo(skb)->frags[j].page_offset =
2084 page_info->page_offset;
9e903e08 2085 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
bd46cb6c
AK
2086 skb_shinfo(skb)->nr_frags++;
2087 } else {
2088 put_page(page_info->page);
2089 }
2090
9e903e08 2091 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
6b7c5b94
SP
2092 skb->len += curr_frag_len;
2093 skb->data_len += curr_frag_len;
bdb28a97 2094 skb->truesize += rx_frag_size;
2e588f84 2095 remaining -= curr_frag_len;
205859a2 2096 page_info->page = NULL;
6b7c5b94 2097 }
bd46cb6c 2098 BUG_ON(j > MAX_SKB_FRAGS);
6b7c5b94
SP
2099}
2100
5be93b9a 2101/* Process the RX completion indicated by rxcp when GRO is disabled */
6384a4d0 2102static void be_rx_compl_process(struct be_rx_obj *rxo, struct napi_struct *napi,
10ef9ab4 2103 struct be_rx_compl_info *rxcp)
6b7c5b94 2104{
10ef9ab4 2105 struct be_adapter *adapter = rxo->adapter;
6332c8d3 2106 struct net_device *netdev = adapter->netdev;
6b7c5b94 2107 struct sk_buff *skb;
89420424 2108
bb349bb4 2109 skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE);
a058a632 2110 if (unlikely(!skb)) {
ac124ff9 2111 rx_stats(rxo)->rx_drops_no_skbs++;
10ef9ab4 2112 be_rx_compl_discard(rxo, rxcp);
6b7c5b94
SP
2113 return;
2114 }
2115
10ef9ab4 2116 skb_fill_rx_data(rxo, skb, rxcp);
6b7c5b94 2117
6332c8d3 2118 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
728a9972 2119 skb->ip_summed = CHECKSUM_UNNECESSARY;
c6ce2f4b
SK
2120 else
2121 skb_checksum_none_assert(skb);
6b7c5b94 2122
6332c8d3 2123 skb->protocol = eth_type_trans(skb, netdev);
aaa6daec 2124 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
10ef9ab4 2125 if (netdev->features & NETIF_F_RXHASH)
d2464c8c 2126 skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
c9c47142 2127
b6c0e89d 2128 skb->csum_level = rxcp->tunneled;
6384a4d0 2129 skb_mark_napi_id(skb, napi);
6b7c5b94 2130
343e43c0 2131 if (rxcp->vlanf)
86a9bad3 2132 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
4c5102f9
AK
2133
2134 netif_receive_skb(skb);
6b7c5b94
SP
2135}
2136
5be93b9a 2137/* Process the RX completion indicated by rxcp when GRO is enabled */
4188e7df
JH
2138static void be_rx_compl_process_gro(struct be_rx_obj *rxo,
2139 struct napi_struct *napi,
2140 struct be_rx_compl_info *rxcp)
6b7c5b94 2141{
10ef9ab4 2142 struct be_adapter *adapter = rxo->adapter;
6b7c5b94 2143 struct be_rx_page_info *page_info;
5be93b9a 2144 struct sk_buff *skb = NULL;
2e588f84
SP
2145 u16 remaining, curr_frag_len;
2146 u16 i, j;
3968fa1e 2147
10ef9ab4 2148 skb = napi_get_frags(napi);
5be93b9a 2149 if (!skb) {
10ef9ab4 2150 be_rx_compl_discard(rxo, rxcp);
5be93b9a
AK
2151 return;
2152 }
2153
2e588f84
SP
2154 remaining = rxcp->pkt_size;
2155 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
0b0ef1d0 2156 page_info = get_rx_page_info(rxo);
6b7c5b94
SP
2157
2158 curr_frag_len = min(remaining, rx_frag_size);
2159
bd46cb6c
AK
2160 /* Coalesce all frags from the same physical page in one slot */
2161 if (i == 0 || page_info->page_offset == 0) {
2162 /* First frag or Fresh page */
2163 j++;
b061b39e 2164 skb_frag_set_page(skb, j, page_info->page);
5be93b9a
AK
2165 skb_shinfo(skb)->frags[j].page_offset =
2166 page_info->page_offset;
9e903e08 2167 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
bd46cb6c
AK
2168 } else {
2169 put_page(page_info->page);
2170 }
9e903e08 2171 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
bdb28a97 2172 skb->truesize += rx_frag_size;
bd46cb6c 2173 remaining -= curr_frag_len;
6b7c5b94
SP
2174 memset(page_info, 0, sizeof(*page_info));
2175 }
bd46cb6c 2176 BUG_ON(j > MAX_SKB_FRAGS);
6b7c5b94 2177
5be93b9a 2178 skb_shinfo(skb)->nr_frags = j + 1;
2e588f84
SP
2179 skb->len = rxcp->pkt_size;
2180 skb->data_len = rxcp->pkt_size;
5be93b9a 2181 skb->ip_summed = CHECKSUM_UNNECESSARY;
aaa6daec 2182 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
4b972914 2183 if (adapter->netdev->features & NETIF_F_RXHASH)
d2464c8c 2184 skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
c9c47142 2185
b6c0e89d 2186 skb->csum_level = rxcp->tunneled;
6384a4d0 2187 skb_mark_napi_id(skb, napi);
5be93b9a 2188
343e43c0 2189 if (rxcp->vlanf)
86a9bad3 2190 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
4c5102f9 2191
10ef9ab4 2192 napi_gro_frags(napi);
2e588f84
SP
2193}
2194
10ef9ab4
SP
2195static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
2196 struct be_rx_compl_info *rxcp)
2e588f84 2197{
c3c18bc1
SP
2198 rxcp->pkt_size = GET_RX_COMPL_V1_BITS(pktsize, compl);
2199 rxcp->vlanf = GET_RX_COMPL_V1_BITS(vtp, compl);
2200 rxcp->err = GET_RX_COMPL_V1_BITS(err, compl);
2201 rxcp->tcpf = GET_RX_COMPL_V1_BITS(tcpf, compl);
2202 rxcp->udpf = GET_RX_COMPL_V1_BITS(udpf, compl);
2203 rxcp->ip_csum = GET_RX_COMPL_V1_BITS(ipcksm, compl);
2204 rxcp->l4_csum = GET_RX_COMPL_V1_BITS(l4_cksm, compl);
2205 rxcp->ipv6 = GET_RX_COMPL_V1_BITS(ip_version, compl);
2206 rxcp->num_rcvd = GET_RX_COMPL_V1_BITS(numfrags, compl);
2207 rxcp->pkt_type = GET_RX_COMPL_V1_BITS(cast_enc, compl);
2208 rxcp->rss_hash = GET_RX_COMPL_V1_BITS(rsshash, compl);
15d72184 2209 if (rxcp->vlanf) {
c3c18bc1
SP
2210 rxcp->qnq = GET_RX_COMPL_V1_BITS(qnq, compl);
2211 rxcp->vlan_tag = GET_RX_COMPL_V1_BITS(vlan_tag, compl);
15d72184 2212 }
c3c18bc1 2213 rxcp->port = GET_RX_COMPL_V1_BITS(port, compl);
c9c47142 2214 rxcp->tunneled =
c3c18bc1 2215 GET_RX_COMPL_V1_BITS(tunneled, compl);
2e588f84
SP
2216}
2217
10ef9ab4
SP
2218static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
2219 struct be_rx_compl_info *rxcp)
2e588f84 2220{
c3c18bc1
SP
2221 rxcp->pkt_size = GET_RX_COMPL_V0_BITS(pktsize, compl);
2222 rxcp->vlanf = GET_RX_COMPL_V0_BITS(vtp, compl);
2223 rxcp->err = GET_RX_COMPL_V0_BITS(err, compl);
2224 rxcp->tcpf = GET_RX_COMPL_V0_BITS(tcpf, compl);
2225 rxcp->udpf = GET_RX_COMPL_V0_BITS(udpf, compl);
2226 rxcp->ip_csum = GET_RX_COMPL_V0_BITS(ipcksm, compl);
2227 rxcp->l4_csum = GET_RX_COMPL_V0_BITS(l4_cksm, compl);
2228 rxcp->ipv6 = GET_RX_COMPL_V0_BITS(ip_version, compl);
2229 rxcp->num_rcvd = GET_RX_COMPL_V0_BITS(numfrags, compl);
2230 rxcp->pkt_type = GET_RX_COMPL_V0_BITS(cast_enc, compl);
2231 rxcp->rss_hash = GET_RX_COMPL_V0_BITS(rsshash, compl);
15d72184 2232 if (rxcp->vlanf) {
c3c18bc1
SP
2233 rxcp->qnq = GET_RX_COMPL_V0_BITS(qnq, compl);
2234 rxcp->vlan_tag = GET_RX_COMPL_V0_BITS(vlan_tag, compl);
15d72184 2235 }
c3c18bc1
SP
2236 rxcp->port = GET_RX_COMPL_V0_BITS(port, compl);
2237 rxcp->ip_frag = GET_RX_COMPL_V0_BITS(ip_frag, compl);
2e588f84
SP
2238}
2239
2240static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
2241{
2242 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
2243 struct be_rx_compl_info *rxcp = &rxo->rxcp;
2244 struct be_adapter *adapter = rxo->adapter;
6b7c5b94 2245
2e588f84
SP
2246 /* For checking the valid bit it is Ok to use either definition as the
2247 * valid bit is at the same position in both v0 and v1 Rx compl */
2248 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
2249 return NULL;
6b7c5b94 2250
2e588f84
SP
2251 rmb();
2252 be_dws_le_to_cpu(compl, sizeof(*compl));
6b7c5b94 2253
2e588f84 2254 if (adapter->be3_native)
10ef9ab4 2255 be_parse_rx_compl_v1(compl, rxcp);
2e588f84 2256 else
10ef9ab4 2257 be_parse_rx_compl_v0(compl, rxcp);
6b7c5b94 2258
e38b1706
SK
2259 if (rxcp->ip_frag)
2260 rxcp->l4_csum = 0;
2261
15d72184 2262 if (rxcp->vlanf) {
f93f160b
VV
2263 /* In QNQ modes, if qnq bit is not set, then the packet was
2264 * tagged only with the transparent outer vlan-tag and must
2265 * not be treated as a vlan packet by host
2266 */
2267 if (be_is_qnq_mode(adapter) && !rxcp->qnq)
15d72184 2268 rxcp->vlanf = 0;
6b7c5b94 2269
15d72184 2270 if (!lancer_chip(adapter))
3c709f8f 2271 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
6b7c5b94 2272
939cf306 2273 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
f6cbd364 2274 !test_bit(rxcp->vlan_tag, adapter->vids))
15d72184
SP
2275 rxcp->vlanf = 0;
2276 }
2e588f84
SP
2277
2278 /* As the compl has been parsed, reset it; we wont touch it again */
2279 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
6b7c5b94 2280
3abcdeda 2281 queue_tail_inc(&rxo->cq);
6b7c5b94
SP
2282 return rxcp;
2283}
2284
1829b086 2285static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
6b7c5b94 2286{
6b7c5b94 2287 u32 order = get_order(size);
1829b086 2288
6b7c5b94 2289 if (order > 0)
1829b086
ED
2290 gfp |= __GFP_COMP;
2291 return alloc_pages(gfp, order);
6b7c5b94
SP
2292}
2293
2294/*
2295 * Allocate a page, split it to fragments of size rx_frag_size and post as
2296 * receive buffers to BE
2297 */
c30d7266 2298static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp, u32 frags_needed)
6b7c5b94 2299{
3abcdeda 2300 struct be_adapter *adapter = rxo->adapter;
26d92f92 2301 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
3abcdeda 2302 struct be_queue_info *rxq = &rxo->q;
6b7c5b94 2303 struct page *pagep = NULL;
ba42fad0 2304 struct device *dev = &adapter->pdev->dev;
6b7c5b94
SP
2305 struct be_eth_rx_d *rxd;
2306 u64 page_dmaaddr = 0, frag_dmaaddr;
c30d7266 2307 u32 posted, page_offset = 0, notify = 0;
6b7c5b94 2308
3abcdeda 2309 page_info = &rxo->page_info_tbl[rxq->head];
c30d7266 2310 for (posted = 0; posted < frags_needed && !page_info->page; posted++) {
6b7c5b94 2311 if (!pagep) {
1829b086 2312 pagep = be_alloc_pages(adapter->big_page_size, gfp);
6b7c5b94 2313 if (unlikely(!pagep)) {
ac124ff9 2314 rx_stats(rxo)->rx_post_fail++;
6b7c5b94
SP
2315 break;
2316 }
ba42fad0
IV
2317 page_dmaaddr = dma_map_page(dev, pagep, 0,
2318 adapter->big_page_size,
2b7bcebf 2319 DMA_FROM_DEVICE);
ba42fad0
IV
2320 if (dma_mapping_error(dev, page_dmaaddr)) {
2321 put_page(pagep);
2322 pagep = NULL;
d3de1540 2323 adapter->drv_stats.dma_map_errors++;
ba42fad0
IV
2324 break;
2325 }
e50287be 2326 page_offset = 0;
6b7c5b94
SP
2327 } else {
2328 get_page(pagep);
e50287be 2329 page_offset += rx_frag_size;
6b7c5b94 2330 }
e50287be 2331 page_info->page_offset = page_offset;
6b7c5b94 2332 page_info->page = pagep;
6b7c5b94
SP
2333
2334 rxd = queue_head_node(rxq);
e50287be 2335 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
6b7c5b94
SP
2336 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
2337 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
6b7c5b94
SP
2338
2339 /* Any space left in the current big page for another frag? */
2340 if ((page_offset + rx_frag_size + rx_frag_size) >
2341 adapter->big_page_size) {
2342 pagep = NULL;
e50287be
SP
2343 page_info->last_frag = true;
2344 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
2345 } else {
2346 dma_unmap_addr_set(page_info, bus, frag_dmaaddr);
6b7c5b94 2347 }
26d92f92
SP
2348
2349 prev_page_info = page_info;
2350 queue_head_inc(rxq);
10ef9ab4 2351 page_info = &rxo->page_info_tbl[rxq->head];
6b7c5b94 2352 }
e50287be
SP
2353
2354 /* Mark the last frag of a page when we break out of the above loop
2355 * with no more slots available in the RXQ
2356 */
2357 if (pagep) {
2358 prev_page_info->last_frag = true;
2359 dma_unmap_addr_set(prev_page_info, bus, page_dmaaddr);
2360 }
6b7c5b94
SP
2361
2362 if (posted) {
6b7c5b94 2363 atomic_add(posted, &rxq->used);
6384a4d0
SP
2364 if (rxo->rx_post_starved)
2365 rxo->rx_post_starved = false;
c30d7266 2366 do {
69304cc9 2367 notify = min(MAX_NUM_POST_ERX_DB, posted);
c30d7266
AK
2368 be_rxq_notify(adapter, rxq->id, notify);
2369 posted -= notify;
2370 } while (posted);
ea1dae11
SP
2371 } else if (atomic_read(&rxq->used) == 0) {
2372 /* Let be_worker replenish when memory is available */
3abcdeda 2373 rxo->rx_post_starved = true;
6b7c5b94 2374 }
6b7c5b94
SP
2375}
2376
152ffe5b 2377static struct be_tx_compl_info *be_tx_compl_get(struct be_tx_obj *txo)
6b7c5b94 2378{
152ffe5b
SB
2379 struct be_queue_info *tx_cq = &txo->cq;
2380 struct be_tx_compl_info *txcp = &txo->txcp;
2381 struct be_eth_tx_compl *compl = queue_tail_node(tx_cq);
6b7c5b94 2382
152ffe5b 2383 if (compl->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
6b7c5b94
SP
2384 return NULL;
2385
152ffe5b 2386 /* Ensure load ordering of valid bit dword and other dwords below */
f3eb62d2 2387 rmb();
152ffe5b 2388 be_dws_le_to_cpu(compl, sizeof(*compl));
6b7c5b94 2389
152ffe5b
SB
2390 txcp->status = GET_TX_COMPL_BITS(status, compl);
2391 txcp->end_index = GET_TX_COMPL_BITS(wrb_index, compl);
6b7c5b94 2392
152ffe5b 2393 compl->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
6b7c5b94
SP
2394 queue_tail_inc(tx_cq);
2395 return txcp;
2396}
2397
3c8def97 2398static u16 be_tx_compl_process(struct be_adapter *adapter,
748b539a 2399 struct be_tx_obj *txo, u16 last_index)
6b7c5b94 2400{
5f07b3c5 2401 struct sk_buff **sent_skbs = txo->sent_skb_list;
3c8def97 2402 struct be_queue_info *txq = &txo->q;
5f07b3c5
SP
2403 u16 frag_index, num_wrbs = 0;
2404 struct sk_buff *skb = NULL;
2405 bool unmap_skb_hdr = false;
a73b796e 2406 struct be_eth_wrb *wrb;
6b7c5b94 2407
ec43b1a6 2408 do {
5f07b3c5
SP
2409 if (sent_skbs[txq->tail]) {
2410 /* Free skb from prev req */
2411 if (skb)
2412 dev_consume_skb_any(skb);
2413 skb = sent_skbs[txq->tail];
2414 sent_skbs[txq->tail] = NULL;
2415 queue_tail_inc(txq); /* skip hdr wrb */
2416 num_wrbs++;
2417 unmap_skb_hdr = true;
2418 }
a73b796e 2419 wrb = queue_tail_node(txq);
5f07b3c5 2420 frag_index = txq->tail;
2b7bcebf 2421 unmap_tx_frag(&adapter->pdev->dev, wrb,
5f07b3c5 2422 (unmap_skb_hdr && skb_headlen(skb)));
ec43b1a6 2423 unmap_skb_hdr = false;
6b7c5b94 2424 queue_tail_inc(txq);
5f07b3c5
SP
2425 num_wrbs++;
2426 } while (frag_index != last_index);
2427 dev_consume_skb_any(skb);
6b7c5b94 2428
4d586b82 2429 return num_wrbs;
6b7c5b94
SP
2430}
2431
10ef9ab4
SP
2432/* Return the number of events in the event queue */
2433static inline int events_get(struct be_eq_obj *eqo)
859b1e4e 2434{
10ef9ab4
SP
2435 struct be_eq_entry *eqe;
2436 int num = 0;
859b1e4e 2437
10ef9ab4
SP
2438 do {
2439 eqe = queue_tail_node(&eqo->q);
2440 if (eqe->evt == 0)
2441 break;
859b1e4e 2442
10ef9ab4
SP
2443 rmb();
2444 eqe->evt = 0;
2445 num++;
2446 queue_tail_inc(&eqo->q);
2447 } while (true);
2448
2449 return num;
859b1e4e
SP
2450}
2451
10ef9ab4
SP
2452/* Leaves the EQ is disarmed state */
2453static void be_eq_clean(struct be_eq_obj *eqo)
859b1e4e 2454{
10ef9ab4 2455 int num = events_get(eqo);
859b1e4e 2456
20947770 2457 be_eq_notify(eqo->adapter, eqo->q.id, false, true, num, 0);
859b1e4e
SP
2458}
2459
99b44304
KA
2460/* Free posted rx buffers that were not used */
2461static void be_rxq_clean(struct be_rx_obj *rxo)
6b7c5b94 2462{
3abcdeda 2463 struct be_queue_info *rxq = &rxo->q;
99b44304
KA
2464 struct be_rx_page_info *page_info;
2465
2466 while (atomic_read(&rxq->used) > 0) {
2467 page_info = get_rx_page_info(rxo);
2468 put_page(page_info->page);
2469 memset(page_info, 0, sizeof(*page_info));
2470 }
2471 BUG_ON(atomic_read(&rxq->used));
2472 rxq->tail = 0;
2473 rxq->head = 0;
2474}
2475
2476static void be_rx_cq_clean(struct be_rx_obj *rxo)
2477{
3abcdeda 2478 struct be_queue_info *rx_cq = &rxo->cq;
2e588f84 2479 struct be_rx_compl_info *rxcp;
d23e946c
SP
2480 struct be_adapter *adapter = rxo->adapter;
2481 int flush_wait = 0;
6b7c5b94 2482
d23e946c
SP
2483 /* Consume pending rx completions.
2484 * Wait for the flush completion (identified by zero num_rcvd)
2485 * to arrive. Notify CQ even when there are no more CQ entries
2486 * for HW to flush partially coalesced CQ entries.
2487 * In Lancer, there is no need to wait for flush compl.
2488 */
2489 for (;;) {
2490 rxcp = be_rx_compl_get(rxo);
ddf1169f 2491 if (!rxcp) {
d23e946c
SP
2492 if (lancer_chip(adapter))
2493 break;
2494
954f6825
VD
2495 if (flush_wait++ > 50 ||
2496 be_check_error(adapter,
2497 BE_ERROR_HW)) {
d23e946c
SP
2498 dev_warn(&adapter->pdev->dev,
2499 "did not receive flush compl\n");
2500 break;
2501 }
2502 be_cq_notify(adapter, rx_cq->id, true, 0);
2503 mdelay(1);
2504 } else {
2505 be_rx_compl_discard(rxo, rxcp);
3f5dffe6 2506 be_cq_notify(adapter, rx_cq->id, false, 1);
d23e946c
SP
2507 if (rxcp->num_rcvd == 0)
2508 break;
2509 }
6b7c5b94
SP
2510 }
2511
d23e946c
SP
2512 /* After cleanup, leave the CQ in unarmed state */
2513 be_cq_notify(adapter, rx_cq->id, false, 0);
6b7c5b94
SP
2514}
2515
0ae57bb3 2516static void be_tx_compl_clean(struct be_adapter *adapter)
6b7c5b94 2517{
5f07b3c5
SP
2518 u16 end_idx, notified_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
2519 struct device *dev = &adapter->pdev->dev;
152ffe5b 2520 struct be_tx_compl_info *txcp;
0ae57bb3 2521 struct be_queue_info *txq;
152ffe5b 2522 struct be_tx_obj *txo;
0ae57bb3 2523 int i, pending_txqs;
a8e9179a 2524
1a3d0717 2525 /* Stop polling for compls when HW has been silent for 10ms */
a8e9179a 2526 do {
0ae57bb3
SP
2527 pending_txqs = adapter->num_tx_qs;
2528
2529 for_all_tx_queues(adapter, txo, i) {
1a3d0717
VV
2530 cmpl = 0;
2531 num_wrbs = 0;
0ae57bb3 2532 txq = &txo->q;
152ffe5b
SB
2533 while ((txcp = be_tx_compl_get(txo))) {
2534 num_wrbs +=
2535 be_tx_compl_process(adapter, txo,
2536 txcp->end_index);
0ae57bb3
SP
2537 cmpl++;
2538 }
2539 if (cmpl) {
2540 be_cq_notify(adapter, txo->cq.id, false, cmpl);
2541 atomic_sub(num_wrbs, &txq->used);
1a3d0717 2542 timeo = 0;
0ae57bb3 2543 }
cf5671e6 2544 if (!be_is_tx_compl_pending(txo))
0ae57bb3 2545 pending_txqs--;
a8e9179a
SP
2546 }
2547
954f6825
VD
2548 if (pending_txqs == 0 || ++timeo > 10 ||
2549 be_check_error(adapter, BE_ERROR_HW))
a8e9179a
SP
2550 break;
2551
2552 mdelay(1);
2553 } while (true);
2554
5f07b3c5 2555 /* Free enqueued TX that was never notified to HW */
0ae57bb3
SP
2556 for_all_tx_queues(adapter, txo, i) {
2557 txq = &txo->q;
0ae57bb3 2558
5f07b3c5
SP
2559 if (atomic_read(&txq->used)) {
2560 dev_info(dev, "txq%d: cleaning %d pending tx-wrbs\n",
2561 i, atomic_read(&txq->used));
2562 notified_idx = txq->tail;
0ae57bb3 2563 end_idx = txq->tail;
5f07b3c5
SP
2564 index_adv(&end_idx, atomic_read(&txq->used) - 1,
2565 txq->len);
2566 /* Use the tx-compl process logic to handle requests
2567 * that were not sent to the HW.
2568 */
0ae57bb3
SP
2569 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
2570 atomic_sub(num_wrbs, &txq->used);
5f07b3c5
SP
2571 BUG_ON(atomic_read(&txq->used));
2572 txo->pend_wrb_cnt = 0;
2573 /* Since hw was never notified of these requests,
2574 * reset TXQ indices
2575 */
2576 txq->head = notified_idx;
2577 txq->tail = notified_idx;
0ae57bb3 2578 }
b03388d6 2579 }
6b7c5b94
SP
2580}
2581
10ef9ab4
SP
2582static void be_evt_queues_destroy(struct be_adapter *adapter)
2583{
2584 struct be_eq_obj *eqo;
2585 int i;
2586
2587 for_all_evt_queues(adapter, eqo, i) {
19d59aa7
PR
2588 if (eqo->q.created) {
2589 be_eq_clean(eqo);
10ef9ab4 2590 be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
6384a4d0 2591 napi_hash_del(&eqo->napi);
68d7bdcb 2592 netif_napi_del(&eqo->napi);
649886a3 2593 free_cpumask_var(eqo->affinity_mask);
19d59aa7 2594 }
10ef9ab4
SP
2595 be_queue_free(adapter, &eqo->q);
2596 }
2597}
2598
2599static int be_evt_queues_create(struct be_adapter *adapter)
2600{
2601 struct be_queue_info *eq;
2602 struct be_eq_obj *eqo;
2632bafd 2603 struct be_aic_obj *aic;
10ef9ab4
SP
2604 int i, rc;
2605
92bf14ab
SP
2606 adapter->num_evt_qs = min_t(u16, num_irqs(adapter),
2607 adapter->cfg_num_qs);
10ef9ab4
SP
2608
2609 for_all_evt_queues(adapter, eqo, i) {
f36963c9 2610 int numa_node = dev_to_node(&adapter->pdev->dev);
649886a3 2611
2632bafd 2612 aic = &adapter->aic_obj[i];
10ef9ab4 2613 eqo->adapter = adapter;
10ef9ab4 2614 eqo->idx = i;
2632bafd
SP
2615 aic->max_eqd = BE_MAX_EQD;
2616 aic->enable = true;
10ef9ab4
SP
2617
2618 eq = &eqo->q;
2619 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
748b539a 2620 sizeof(struct be_eq_entry));
10ef9ab4
SP
2621 if (rc)
2622 return rc;
2623
f2f781a7 2624 rc = be_cmd_eq_create(adapter, eqo);
10ef9ab4
SP
2625 if (rc)
2626 return rc;
649886a3
KA
2627
2628 if (!zalloc_cpumask_var(&eqo->affinity_mask, GFP_KERNEL))
2629 return -ENOMEM;
2630 cpumask_set_cpu(cpumask_local_spread(i, numa_node),
2631 eqo->affinity_mask);
2632 netif_napi_add(adapter->netdev, &eqo->napi, be_poll,
2633 BE_NAPI_WEIGHT);
2634 napi_hash_add(&eqo->napi);
10ef9ab4 2635 }
1cfafab9 2636 return 0;
10ef9ab4
SP
2637}
2638
5fb379ee
SP
2639static void be_mcc_queues_destroy(struct be_adapter *adapter)
2640{
2641 struct be_queue_info *q;
5fb379ee 2642
8788fdc2 2643 q = &adapter->mcc_obj.q;
5fb379ee 2644 if (q->created)
8788fdc2 2645 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
5fb379ee
SP
2646 be_queue_free(adapter, q);
2647
8788fdc2 2648 q = &adapter->mcc_obj.cq;
5fb379ee 2649 if (q->created)
8788fdc2 2650 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
5fb379ee
SP
2651 be_queue_free(adapter, q);
2652}
2653
2654/* Must be called only after TX qs are created as MCC shares TX EQ */
2655static int be_mcc_queues_create(struct be_adapter *adapter)
2656{
2657 struct be_queue_info *q, *cq;
5fb379ee 2658
8788fdc2 2659 cq = &adapter->mcc_obj.cq;
5fb379ee 2660 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
748b539a 2661 sizeof(struct be_mcc_compl)))
5fb379ee
SP
2662 goto err;
2663
10ef9ab4
SP
2664 /* Use the default EQ for MCC completions */
2665 if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0))
5fb379ee
SP
2666 goto mcc_cq_free;
2667
8788fdc2 2668 q = &adapter->mcc_obj.q;
5fb379ee
SP
2669 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
2670 goto mcc_cq_destroy;
2671
8788fdc2 2672 if (be_cmd_mccq_create(adapter, q, cq))
5fb379ee
SP
2673 goto mcc_q_free;
2674
2675 return 0;
2676
2677mcc_q_free:
2678 be_queue_free(adapter, q);
2679mcc_cq_destroy:
8788fdc2 2680 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
5fb379ee
SP
2681mcc_cq_free:
2682 be_queue_free(adapter, cq);
2683err:
2684 return -1;
2685}
2686
6b7c5b94
SP
2687static void be_tx_queues_destroy(struct be_adapter *adapter)
2688{
2689 struct be_queue_info *q;
3c8def97
SP
2690 struct be_tx_obj *txo;
2691 u8 i;
6b7c5b94 2692
3c8def97
SP
2693 for_all_tx_queues(adapter, txo, i) {
2694 q = &txo->q;
2695 if (q->created)
2696 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
2697 be_queue_free(adapter, q);
6b7c5b94 2698
3c8def97
SP
2699 q = &txo->cq;
2700 if (q->created)
2701 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2702 be_queue_free(adapter, q);
2703 }
6b7c5b94
SP
2704}
2705
7707133c 2706static int be_tx_qs_create(struct be_adapter *adapter)
6b7c5b94 2707{
73f394e6 2708 struct be_queue_info *cq;
3c8def97 2709 struct be_tx_obj *txo;
73f394e6 2710 struct be_eq_obj *eqo;
92bf14ab 2711 int status, i;
6b7c5b94 2712
92bf14ab 2713 adapter->num_tx_qs = min(adapter->num_evt_qs, be_max_txqs(adapter));
dafc0fe3 2714
10ef9ab4
SP
2715 for_all_tx_queues(adapter, txo, i) {
2716 cq = &txo->cq;
2717 status = be_queue_alloc(adapter, cq, TX_CQ_LEN,
2718 sizeof(struct be_eth_tx_compl));
2719 if (status)
2720 return status;
3c8def97 2721
827da44c
JS
2722 u64_stats_init(&txo->stats.sync);
2723 u64_stats_init(&txo->stats.sync_compl);
2724
10ef9ab4
SP
2725 /* If num_evt_qs is less than num_tx_qs, then more than
2726 * one txq share an eq
2727 */
73f394e6
SP
2728 eqo = &adapter->eq_obj[i % adapter->num_evt_qs];
2729 status = be_cmd_cq_create(adapter, cq, &eqo->q, false, 3);
10ef9ab4
SP
2730 if (status)
2731 return status;
6b7c5b94 2732
10ef9ab4
SP
2733 status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
2734 sizeof(struct be_eth_wrb));
2735 if (status)
2736 return status;
6b7c5b94 2737
94d73aaa 2738 status = be_cmd_txq_create(adapter, txo);
10ef9ab4
SP
2739 if (status)
2740 return status;
73f394e6
SP
2741
2742 netif_set_xps_queue(adapter->netdev, eqo->affinity_mask,
2743 eqo->idx);
3c8def97 2744 }
6b7c5b94 2745
d379142b
SP
2746 dev_info(&adapter->pdev->dev, "created %d TX queue(s)\n",
2747 adapter->num_tx_qs);
10ef9ab4 2748 return 0;
6b7c5b94
SP
2749}
2750
10ef9ab4 2751static void be_rx_cqs_destroy(struct be_adapter *adapter)
6b7c5b94
SP
2752{
2753 struct be_queue_info *q;
3abcdeda
SP
2754 struct be_rx_obj *rxo;
2755 int i;
2756
2757 for_all_rx_queues(adapter, rxo, i) {
3abcdeda
SP
2758 q = &rxo->cq;
2759 if (q->created)
2760 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2761 be_queue_free(adapter, q);
ac6a0c4a
SP
2762 }
2763}
2764
10ef9ab4 2765static int be_rx_cqs_create(struct be_adapter *adapter)
6b7c5b94 2766{
10ef9ab4 2767 struct be_queue_info *eq, *cq;
3abcdeda
SP
2768 struct be_rx_obj *rxo;
2769 int rc, i;
6b7c5b94 2770
92bf14ab 2771 /* We can create as many RSS rings as there are EQs. */
71bb8bd0 2772 adapter->num_rss_qs = adapter->num_evt_qs;
92bf14ab 2773
71bb8bd0
VV
2774 /* We'll use RSS only if atleast 2 RSS rings are supported. */
2775 if (adapter->num_rss_qs <= 1)
2776 adapter->num_rss_qs = 0;
2777
2778 adapter->num_rx_qs = adapter->num_rss_qs + adapter->need_def_rxq;
2779
2780 /* When the interface is not capable of RSS rings (and there is no
2781 * need to create a default RXQ) we'll still need one RXQ
10ef9ab4 2782 */
71bb8bd0
VV
2783 if (adapter->num_rx_qs == 0)
2784 adapter->num_rx_qs = 1;
92bf14ab 2785
6b7c5b94 2786 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
3abcdeda
SP
2787 for_all_rx_queues(adapter, rxo, i) {
2788 rxo->adapter = adapter;
3abcdeda
SP
2789 cq = &rxo->cq;
2790 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
748b539a 2791 sizeof(struct be_eth_rx_compl));
3abcdeda 2792 if (rc)
10ef9ab4 2793 return rc;
3abcdeda 2794
827da44c 2795 u64_stats_init(&rxo->stats.sync);
10ef9ab4
SP
2796 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2797 rc = be_cmd_cq_create(adapter, cq, eq, false, 3);
3abcdeda 2798 if (rc)
10ef9ab4 2799 return rc;
3abcdeda 2800 }
6b7c5b94 2801
d379142b 2802 dev_info(&adapter->pdev->dev,
71bb8bd0 2803 "created %d RX queue(s)\n", adapter->num_rx_qs);
10ef9ab4 2804 return 0;
b628bde2
SP
2805}
2806
6b7c5b94
SP
2807static irqreturn_t be_intx(int irq, void *dev)
2808{
e49cc34f
SP
2809 struct be_eq_obj *eqo = dev;
2810 struct be_adapter *adapter = eqo->adapter;
2811 int num_evts = 0;
6b7c5b94 2812
d0b9cec3
SP
2813 /* IRQ is not expected when NAPI is scheduled as the EQ
2814 * will not be armed.
2815 * But, this can happen on Lancer INTx where it takes
2816 * a while to de-assert INTx or in BE2 where occasionaly
2817 * an interrupt may be raised even when EQ is unarmed.
2818 * If NAPI is already scheduled, then counting & notifying
2819 * events will orphan them.
e49cc34f 2820 */
d0b9cec3 2821 if (napi_schedule_prep(&eqo->napi)) {
e49cc34f 2822 num_evts = events_get(eqo);
d0b9cec3
SP
2823 __napi_schedule(&eqo->napi);
2824 if (num_evts)
2825 eqo->spurious_intr = 0;
2826 }
20947770 2827 be_eq_notify(adapter, eqo->q.id, false, true, num_evts, 0);
e49cc34f 2828
d0b9cec3
SP
2829 /* Return IRQ_HANDLED only for the the first spurious intr
2830 * after a valid intr to stop the kernel from branding
2831 * this irq as a bad one!
e49cc34f 2832 */
d0b9cec3
SP
2833 if (num_evts || eqo->spurious_intr++ == 0)
2834 return IRQ_HANDLED;
2835 else
2836 return IRQ_NONE;
6b7c5b94
SP
2837}
2838
10ef9ab4 2839static irqreturn_t be_msix(int irq, void *dev)
6b7c5b94 2840{
10ef9ab4 2841 struct be_eq_obj *eqo = dev;
6b7c5b94 2842
20947770 2843 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0, 0);
0b545a62 2844 napi_schedule(&eqo->napi);
6b7c5b94
SP
2845 return IRQ_HANDLED;
2846}
2847
2e588f84 2848static inline bool do_gro(struct be_rx_compl_info *rxcp)
6b7c5b94 2849{
e38b1706 2850 return (rxcp->tcpf && !rxcp->err && rxcp->l4_csum) ? true : false;
6b7c5b94
SP
2851}
2852
10ef9ab4 2853static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
748b539a 2854 int budget, int polling)
6b7c5b94 2855{
3abcdeda
SP
2856 struct be_adapter *adapter = rxo->adapter;
2857 struct be_queue_info *rx_cq = &rxo->cq;
2e588f84 2858 struct be_rx_compl_info *rxcp;
6b7c5b94 2859 u32 work_done;
c30d7266 2860 u32 frags_consumed = 0;
6b7c5b94
SP
2861
2862 for (work_done = 0; work_done < budget; work_done++) {
3abcdeda 2863 rxcp = be_rx_compl_get(rxo);
6b7c5b94
SP
2864 if (!rxcp)
2865 break;
2866
12004ae9
SP
2867 /* Is it a flush compl that has no data */
2868 if (unlikely(rxcp->num_rcvd == 0))
2869 goto loop_continue;
2870
2871 /* Discard compl with partial DMA Lancer B0 */
2872 if (unlikely(!rxcp->pkt_size)) {
10ef9ab4 2873 be_rx_compl_discard(rxo, rxcp);
12004ae9
SP
2874 goto loop_continue;
2875 }
2876
2877 /* On BE drop pkts that arrive due to imperfect filtering in
2878 * promiscuous mode on some skews
2879 */
2880 if (unlikely(rxcp->port != adapter->port_num &&
748b539a 2881 !lancer_chip(adapter))) {
10ef9ab4 2882 be_rx_compl_discard(rxo, rxcp);
12004ae9 2883 goto loop_continue;
64642811 2884 }
009dd872 2885
6384a4d0
SP
2886 /* Don't do gro when we're busy_polling */
2887 if (do_gro(rxcp) && polling != BUSY_POLLING)
10ef9ab4 2888 be_rx_compl_process_gro(rxo, napi, rxcp);
12004ae9 2889 else
6384a4d0
SP
2890 be_rx_compl_process(rxo, napi, rxcp);
2891
12004ae9 2892loop_continue:
c30d7266 2893 frags_consumed += rxcp->num_rcvd;
2e588f84 2894 be_rx_stats_update(rxo, rxcp);
6b7c5b94
SP
2895 }
2896
10ef9ab4
SP
2897 if (work_done) {
2898 be_cq_notify(adapter, rx_cq->id, true, work_done);
9372cacb 2899
6384a4d0
SP
2900 /* When an rx-obj gets into post_starved state, just
2901 * let be_worker do the posting.
2902 */
2903 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM &&
2904 !rxo->rx_post_starved)
c30d7266
AK
2905 be_post_rx_frags(rxo, GFP_ATOMIC,
2906 max_t(u32, MAX_RX_POST,
2907 frags_consumed));
6b7c5b94 2908 }
10ef9ab4 2909
6b7c5b94
SP
2910 return work_done;
2911}
2912
152ffe5b 2913static inline void be_update_tx_err(struct be_tx_obj *txo, u8 status)
512bb8a2
KA
2914{
2915 switch (status) {
2916 case BE_TX_COMP_HDR_PARSE_ERR:
2917 tx_stats(txo)->tx_hdr_parse_err++;
2918 break;
2919 case BE_TX_COMP_NDMA_ERR:
2920 tx_stats(txo)->tx_dma_err++;
2921 break;
2922 case BE_TX_COMP_ACL_ERR:
2923 tx_stats(txo)->tx_spoof_check_err++;
2924 break;
2925 }
2926}
2927
152ffe5b 2928static inline void lancer_update_tx_err(struct be_tx_obj *txo, u8 status)
512bb8a2
KA
2929{
2930 switch (status) {
2931 case LANCER_TX_COMP_LSO_ERR:
2932 tx_stats(txo)->tx_tso_err++;
2933 break;
2934 case LANCER_TX_COMP_HSW_DROP_MAC_ERR:
2935 case LANCER_TX_COMP_HSW_DROP_VLAN_ERR:
2936 tx_stats(txo)->tx_spoof_check_err++;
2937 break;
2938 case LANCER_TX_COMP_QINQ_ERR:
2939 tx_stats(txo)->tx_qinq_err++;
2940 break;
2941 case LANCER_TX_COMP_PARITY_ERR:
2942 tx_stats(txo)->tx_internal_parity_err++;
2943 break;
2944 case LANCER_TX_COMP_DMA_ERR:
2945 tx_stats(txo)->tx_dma_err++;
2946 break;
2947 }
2948}
2949
c8f64615
SP
2950static void be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
2951 int idx)
6b7c5b94 2952{
c8f64615 2953 int num_wrbs = 0, work_done = 0;
152ffe5b 2954 struct be_tx_compl_info *txcp;
c8f64615 2955
152ffe5b
SB
2956 while ((txcp = be_tx_compl_get(txo))) {
2957 num_wrbs += be_tx_compl_process(adapter, txo, txcp->end_index);
c8f64615 2958 work_done++;
3c8def97 2959
152ffe5b 2960 if (txcp->status) {
512bb8a2 2961 if (lancer_chip(adapter))
152ffe5b 2962 lancer_update_tx_err(txo, txcp->status);
512bb8a2 2963 else
152ffe5b 2964 be_update_tx_err(txo, txcp->status);
512bb8a2 2965 }
10ef9ab4 2966 }
6b7c5b94 2967
10ef9ab4
SP
2968 if (work_done) {
2969 be_cq_notify(adapter, txo->cq.id, true, work_done);
2970 atomic_sub(num_wrbs, &txo->q.used);
3c8def97 2971
10ef9ab4
SP
2972 /* As Tx wrbs have been freed up, wake up netdev queue
2973 * if it was stopped due to lack of tx wrbs. */
2974 if (__netif_subqueue_stopped(adapter->netdev, idx) &&
cf5671e6 2975 be_can_txq_wake(txo)) {
10ef9ab4 2976 netif_wake_subqueue(adapter->netdev, idx);
3c8def97 2977 }
10ef9ab4
SP
2978
2979 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
2980 tx_stats(txo)->tx_compl += work_done;
2981 u64_stats_update_end(&tx_stats(txo)->sync_compl);
6b7c5b94 2982 }
10ef9ab4 2983}
6b7c5b94 2984
f7062ee5
SP
2985#ifdef CONFIG_NET_RX_BUSY_POLL
2986static inline bool be_lock_napi(struct be_eq_obj *eqo)
2987{
2988 bool status = true;
2989
2990 spin_lock(&eqo->lock); /* BH is already disabled */
2991 if (eqo->state & BE_EQ_LOCKED) {
2992 WARN_ON(eqo->state & BE_EQ_NAPI);
2993 eqo->state |= BE_EQ_NAPI_YIELD;
2994 status = false;
2995 } else {
2996 eqo->state = BE_EQ_NAPI;
2997 }
2998 spin_unlock(&eqo->lock);
2999 return status;
3000}
3001
3002static inline void be_unlock_napi(struct be_eq_obj *eqo)
3003{
3004 spin_lock(&eqo->lock); /* BH is already disabled */
3005
3006 WARN_ON(eqo->state & (BE_EQ_POLL | BE_EQ_NAPI_YIELD));
3007 eqo->state = BE_EQ_IDLE;
3008
3009 spin_unlock(&eqo->lock);
3010}
3011
3012static inline bool be_lock_busy_poll(struct be_eq_obj *eqo)
3013{
3014 bool status = true;
3015
3016 spin_lock_bh(&eqo->lock);
3017 if (eqo->state & BE_EQ_LOCKED) {
3018 eqo->state |= BE_EQ_POLL_YIELD;
3019 status = false;
3020 } else {
3021 eqo->state |= BE_EQ_POLL;
3022 }
3023 spin_unlock_bh(&eqo->lock);
3024 return status;
3025}
3026
3027static inline void be_unlock_busy_poll(struct be_eq_obj *eqo)
3028{
3029 spin_lock_bh(&eqo->lock);
3030
3031 WARN_ON(eqo->state & (BE_EQ_NAPI));
3032 eqo->state = BE_EQ_IDLE;
3033
3034 spin_unlock_bh(&eqo->lock);
3035}
3036
3037static inline void be_enable_busy_poll(struct be_eq_obj *eqo)
3038{
3039 spin_lock_init(&eqo->lock);
3040 eqo->state = BE_EQ_IDLE;
3041}
3042
3043static inline void be_disable_busy_poll(struct be_eq_obj *eqo)
3044{
3045 local_bh_disable();
3046
3047 /* It's enough to just acquire napi lock on the eqo to stop
3048 * be_busy_poll() from processing any queueus.
3049 */
3050 while (!be_lock_napi(eqo))
3051 mdelay(1);
3052
3053 local_bh_enable();
3054}
3055
3056#else /* CONFIG_NET_RX_BUSY_POLL */
3057
3058static inline bool be_lock_napi(struct be_eq_obj *eqo)
3059{
3060 return true;
3061}
3062
3063static inline void be_unlock_napi(struct be_eq_obj *eqo)
3064{
3065}
3066
3067static inline bool be_lock_busy_poll(struct be_eq_obj *eqo)
3068{
3069 return false;
3070}
3071
3072static inline void be_unlock_busy_poll(struct be_eq_obj *eqo)
3073{
3074}
3075
3076static inline void be_enable_busy_poll(struct be_eq_obj *eqo)
3077{
3078}
3079
3080static inline void be_disable_busy_poll(struct be_eq_obj *eqo)
3081{
3082}
3083#endif /* CONFIG_NET_RX_BUSY_POLL */
3084
68d7bdcb 3085int be_poll(struct napi_struct *napi, int budget)
10ef9ab4
SP
3086{
3087 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
3088 struct be_adapter *adapter = eqo->adapter;
0b545a62 3089 int max_work = 0, work, i, num_evts;
6384a4d0 3090 struct be_rx_obj *rxo;
a4906ea0 3091 struct be_tx_obj *txo;
20947770 3092 u32 mult_enc = 0;
f31e50a8 3093
0b545a62
SP
3094 num_evts = events_get(eqo);
3095
a4906ea0
SP
3096 for_all_tx_queues_on_eq(adapter, eqo, txo, i)
3097 be_process_tx(adapter, txo, i);
f31e50a8 3098
6384a4d0
SP
3099 if (be_lock_napi(eqo)) {
3100 /* This loop will iterate twice for EQ0 in which
3101 * completions of the last RXQ (default one) are also processed
3102 * For other EQs the loop iterates only once
3103 */
3104 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
3105 work = be_process_rx(rxo, napi, budget, NAPI_POLLING);
3106 max_work = max(work, max_work);
3107 }
3108 be_unlock_napi(eqo);
3109 } else {
3110 max_work = budget;
10ef9ab4 3111 }
6b7c5b94 3112
10ef9ab4
SP
3113 if (is_mcc_eqo(eqo))
3114 be_process_mcc(adapter);
93c86700 3115
10ef9ab4
SP
3116 if (max_work < budget) {
3117 napi_complete(napi);
20947770
PR
3118
3119 /* Skyhawk EQ_DB has a provision to set the rearm to interrupt
3120 * delay via a delay multiplier encoding value
3121 */
3122 if (skyhawk_chip(adapter))
3123 mult_enc = be_get_eq_delay_mult_enc(eqo);
3124
3125 be_eq_notify(adapter, eqo->q.id, true, false, num_evts,
3126 mult_enc);
10ef9ab4
SP
3127 } else {
3128 /* As we'll continue in polling mode, count and clear events */
20947770 3129 be_eq_notify(adapter, eqo->q.id, false, false, num_evts, 0);
93c86700 3130 }
10ef9ab4 3131 return max_work;
6b7c5b94
SP
3132}
3133
6384a4d0
SP
3134#ifdef CONFIG_NET_RX_BUSY_POLL
3135static int be_busy_poll(struct napi_struct *napi)
3136{
3137 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
3138 struct be_adapter *adapter = eqo->adapter;
3139 struct be_rx_obj *rxo;
3140 int i, work = 0;
3141
3142 if (!be_lock_busy_poll(eqo))
3143 return LL_FLUSH_BUSY;
3144
3145 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
3146 work = be_process_rx(rxo, napi, 4, BUSY_POLLING);
3147 if (work)
3148 break;
3149 }
3150
3151 be_unlock_busy_poll(eqo);
3152 return work;
3153}
3154#endif
3155
f67ef7ba 3156void be_detect_error(struct be_adapter *adapter)
7c185276 3157{
e1cfb67a
PR
3158 u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
3159 u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
7c185276 3160 u32 i;
eb0eecc1 3161 struct device *dev = &adapter->pdev->dev;
7c185276 3162
954f6825 3163 if (be_check_error(adapter, BE_ERROR_HW))
72f02485
SP
3164 return;
3165
e1cfb67a
PR
3166 if (lancer_chip(adapter)) {
3167 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3168 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
954f6825 3169 be_set_error(adapter, BE_ERROR_UE);
e1cfb67a 3170 sliport_err1 = ioread32(adapter->db +
748b539a 3171 SLIPORT_ERROR1_OFFSET);
e1cfb67a 3172 sliport_err2 = ioread32(adapter->db +
748b539a 3173 SLIPORT_ERROR2_OFFSET);
eb0eecc1
SK
3174 /* Do not log error messages if its a FW reset */
3175 if (sliport_err1 == SLIPORT_ERROR_FW_RESET1 &&
3176 sliport_err2 == SLIPORT_ERROR_FW_RESET2) {
3177 dev_info(dev, "Firmware update in progress\n");
3178 } else {
eb0eecc1
SK
3179 dev_err(dev, "Error detected in the card\n");
3180 dev_err(dev, "ERR: sliport status 0x%x\n",
3181 sliport_status);
3182 dev_err(dev, "ERR: sliport error1 0x%x\n",
3183 sliport_err1);
3184 dev_err(dev, "ERR: sliport error2 0x%x\n",
3185 sliport_err2);
3186 }
e1cfb67a
PR
3187 }
3188 } else {
25848c90
SR
3189 ue_lo = ioread32(adapter->pcicfg + PCICFG_UE_STATUS_LOW);
3190 ue_hi = ioread32(adapter->pcicfg + PCICFG_UE_STATUS_HIGH);
3191 ue_lo_mask = ioread32(adapter->pcicfg +
3192 PCICFG_UE_STATUS_LOW_MASK);
3193 ue_hi_mask = ioread32(adapter->pcicfg +
3194 PCICFG_UE_STATUS_HI_MASK);
e1cfb67a 3195
f67ef7ba
PR
3196 ue_lo = (ue_lo & ~ue_lo_mask);
3197 ue_hi = (ue_hi & ~ue_hi_mask);
7c185276 3198
eb0eecc1
SK
3199 /* On certain platforms BE hardware can indicate spurious UEs.
3200 * Allow HW to stop working completely in case of a real UE.
3201 * Hence not setting the hw_error for UE detection.
3202 */
f67ef7ba 3203
eb0eecc1 3204 if (ue_lo || ue_hi) {
eb0eecc1
SK
3205 dev_err(dev,
3206 "Unrecoverable Error detected in the adapter");
3207 dev_err(dev, "Please reboot server to recover");
3208 if (skyhawk_chip(adapter))
954f6825
VD
3209 be_set_error(adapter, BE_ERROR_UE);
3210
eb0eecc1
SK
3211 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
3212 if (ue_lo & 1)
3213 dev_err(dev, "UE: %s bit set\n",
3214 ue_status_low_desc[i]);
3215 }
3216 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
3217 if (ue_hi & 1)
3218 dev_err(dev, "UE: %s bit set\n",
3219 ue_status_hi_desc[i]);
3220 }
7c185276
AK
3221 }
3222 }
7c185276
AK
3223}
3224
8d56ff11
SP
3225static void be_msix_disable(struct be_adapter *adapter)
3226{
ac6a0c4a 3227 if (msix_enabled(adapter)) {
8d56ff11 3228 pci_disable_msix(adapter->pdev);
ac6a0c4a 3229 adapter->num_msix_vec = 0;
68d7bdcb 3230 adapter->num_msix_roce_vec = 0;
3abcdeda
SP
3231 }
3232}
3233
c2bba3df 3234static int be_msix_enable(struct be_adapter *adapter)
6b7c5b94 3235{
7dc4c064 3236 int i, num_vec;
d379142b 3237 struct device *dev = &adapter->pdev->dev;
6b7c5b94 3238
92bf14ab
SP
3239 /* If RoCE is supported, program the max number of NIC vectors that
3240 * may be configured via set-channels, along with vectors needed for
3241 * RoCe. Else, just program the number we'll use initially.
3242 */
3243 if (be_roce_supported(adapter))
3244 num_vec = min_t(int, 2 * be_max_eqs(adapter),
3245 2 * num_online_cpus());
3246 else
3247 num_vec = adapter->cfg_num_qs;
3abcdeda 3248
ac6a0c4a 3249 for (i = 0; i < num_vec; i++)
6b7c5b94
SP
3250 adapter->msix_entries[i].entry = i;
3251
7dc4c064
AG
3252 num_vec = pci_enable_msix_range(adapter->pdev, adapter->msix_entries,
3253 MIN_MSIX_VECTORS, num_vec);
3254 if (num_vec < 0)
3255 goto fail;
92bf14ab 3256
92bf14ab
SP
3257 if (be_roce_supported(adapter) && num_vec > MIN_MSIX_VECTORS) {
3258 adapter->num_msix_roce_vec = num_vec / 2;
3259 dev_info(dev, "enabled %d MSI-x vector(s) for RoCE\n",
3260 adapter->num_msix_roce_vec);
3261 }
3262
3263 adapter->num_msix_vec = num_vec - adapter->num_msix_roce_vec;
3264
3265 dev_info(dev, "enabled %d MSI-x vector(s) for NIC\n",
3266 adapter->num_msix_vec);
c2bba3df 3267 return 0;
7dc4c064
AG
3268
3269fail:
3270 dev_warn(dev, "MSIx enable failed\n");
3271
3272 /* INTx is not supported in VFs, so fail probe if enable_msix fails */
18c57c74 3273 if (be_virtfn(adapter))
7dc4c064
AG
3274 return num_vec;
3275 return 0;
6b7c5b94
SP
3276}
3277
fe6d2a38 3278static inline int be_msix_vec_get(struct be_adapter *adapter,
748b539a 3279 struct be_eq_obj *eqo)
b628bde2 3280{
f2f781a7 3281 return adapter->msix_entries[eqo->msix_idx].vector;
b628bde2 3282}
6b7c5b94 3283
b628bde2
SP
3284static int be_msix_register(struct be_adapter *adapter)
3285{
10ef9ab4
SP
3286 struct net_device *netdev = adapter->netdev;
3287 struct be_eq_obj *eqo;
3288 int status, i, vec;
6b7c5b94 3289
10ef9ab4
SP
3290 for_all_evt_queues(adapter, eqo, i) {
3291 sprintf(eqo->desc, "%s-q%d", netdev->name, i);
3292 vec = be_msix_vec_get(adapter, eqo);
3293 status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
3abcdeda
SP
3294 if (status)
3295 goto err_msix;
d658d98a
PR
3296
3297 irq_set_affinity_hint(vec, eqo->affinity_mask);
3abcdeda 3298 }
b628bde2 3299
6b7c5b94 3300 return 0;
3abcdeda 3301err_msix:
10ef9ab4
SP
3302 for (i--, eqo = &adapter->eq_obj[i]; i >= 0; i--, eqo--)
3303 free_irq(be_msix_vec_get(adapter, eqo), eqo);
3304 dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
748b539a 3305 status);
ac6a0c4a 3306 be_msix_disable(adapter);
6b7c5b94
SP
3307 return status;
3308}
3309
3310static int be_irq_register(struct be_adapter *adapter)
3311{
3312 struct net_device *netdev = adapter->netdev;
3313 int status;
3314
ac6a0c4a 3315 if (msix_enabled(adapter)) {
6b7c5b94
SP
3316 status = be_msix_register(adapter);
3317 if (status == 0)
3318 goto done;
ba343c77 3319 /* INTx is not supported for VF */
18c57c74 3320 if (be_virtfn(adapter))
ba343c77 3321 return status;
6b7c5b94
SP
3322 }
3323
e49cc34f 3324 /* INTx: only the first EQ is used */
6b7c5b94
SP
3325 netdev->irq = adapter->pdev->irq;
3326 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
e49cc34f 3327 &adapter->eq_obj[0]);
6b7c5b94
SP
3328 if (status) {
3329 dev_err(&adapter->pdev->dev,
3330 "INTx request IRQ failed - err %d\n", status);
3331 return status;
3332 }
3333done:
3334 adapter->isr_registered = true;
3335 return 0;
3336}
3337
3338static void be_irq_unregister(struct be_adapter *adapter)
3339{
3340 struct net_device *netdev = adapter->netdev;
10ef9ab4 3341 struct be_eq_obj *eqo;
d658d98a 3342 int i, vec;
6b7c5b94
SP
3343
3344 if (!adapter->isr_registered)
3345 return;
3346
3347 /* INTx */
ac6a0c4a 3348 if (!msix_enabled(adapter)) {
e49cc34f 3349 free_irq(netdev->irq, &adapter->eq_obj[0]);
6b7c5b94
SP
3350 goto done;
3351 }
3352
3353 /* MSIx */
d658d98a
PR
3354 for_all_evt_queues(adapter, eqo, i) {
3355 vec = be_msix_vec_get(adapter, eqo);
3356 irq_set_affinity_hint(vec, NULL);
3357 free_irq(vec, eqo);
3358 }
3abcdeda 3359
6b7c5b94
SP
3360done:
3361 adapter->isr_registered = false;
6b7c5b94
SP
3362}
3363
10ef9ab4 3364static void be_rx_qs_destroy(struct be_adapter *adapter)
482c9e79
SP
3365{
3366 struct be_queue_info *q;
3367 struct be_rx_obj *rxo;
3368 int i;
3369
3370 for_all_rx_queues(adapter, rxo, i) {
3371 q = &rxo->q;
3372 if (q->created) {
99b44304
KA
3373 /* If RXQs are destroyed while in an "out of buffer"
3374 * state, there is a possibility of an HW stall on
3375 * Lancer. So, post 64 buffers to each queue to relieve
3376 * the "out of buffer" condition.
3377 * Make sure there's space in the RXQ before posting.
3378 */
3379 if (lancer_chip(adapter)) {
3380 be_rx_cq_clean(rxo);
3381 if (atomic_read(&q->used) == 0)
3382 be_post_rx_frags(rxo, GFP_KERNEL,
3383 MAX_RX_POST);
3384 }
3385
482c9e79 3386 be_cmd_rxq_destroy(adapter, q);
10ef9ab4 3387 be_rx_cq_clean(rxo);
99b44304 3388 be_rxq_clean(rxo);
482c9e79 3389 }
10ef9ab4 3390 be_queue_free(adapter, q);
482c9e79
SP
3391 }
3392}
3393
bcc84140
KA
3394static void be_disable_if_filters(struct be_adapter *adapter)
3395{
3396 be_cmd_pmac_del(adapter, adapter->if_handle,
3397 adapter->pmac_id[0], 0);
3398
3399 be_clear_uc_list(adapter);
3400
3401 /* The IFACE flags are enabled in the open path and cleared
3402 * in the close path. When a VF gets detached from the host and
3403 * assigned to a VM the following happens:
3404 * - VF's IFACE flags get cleared in the detach path
3405 * - IFACE create is issued by the VF in the attach path
3406 * Due to a bug in the BE3/Skyhawk-R FW
3407 * (Lancer FW doesn't have the bug), the IFACE capability flags
3408 * specified along with the IFACE create cmd issued by a VF are not
3409 * honoured by FW. As a consequence, if a *new* driver
3410 * (that enables/disables IFACE flags in open/close)
3411 * is loaded in the host and an *old* driver is * used by a VM/VF,
3412 * the IFACE gets created *without* the needed flags.
3413 * To avoid this, disable RX-filter flags only for Lancer.
3414 */
3415 if (lancer_chip(adapter)) {
3416 be_cmd_rx_filter(adapter, BE_IF_ALL_FILT_FLAGS, OFF);
3417 adapter->if_flags &= ~BE_IF_ALL_FILT_FLAGS;
3418 }
3419}
3420
889cd4b2
SP
3421static int be_close(struct net_device *netdev)
3422{
3423 struct be_adapter *adapter = netdev_priv(netdev);
10ef9ab4
SP
3424 struct be_eq_obj *eqo;
3425 int i;
889cd4b2 3426
e1ad8e33
KA
3427 /* This protection is needed as be_close() may be called even when the
3428 * adapter is in cleared state (after eeh perm failure)
3429 */
3430 if (!(adapter->flags & BE_FLAGS_SETUP_DONE))
3431 return 0;
3432
bcc84140
KA
3433 be_disable_if_filters(adapter);
3434
045508a8
PP
3435 be_roce_dev_close(adapter);
3436
dff345c5
IV
3437 if (adapter->flags & BE_FLAGS_NAPI_ENABLED) {
3438 for_all_evt_queues(adapter, eqo, i) {
04d3d624 3439 napi_disable(&eqo->napi);
6384a4d0
SP
3440 be_disable_busy_poll(eqo);
3441 }
71237b6f 3442 adapter->flags &= ~BE_FLAGS_NAPI_ENABLED;
04d3d624 3443 }
a323d9bf
SP
3444
3445 be_async_mcc_disable(adapter);
3446
3447 /* Wait for all pending tx completions to arrive so that
3448 * all tx skbs are freed.
3449 */
fba87559 3450 netif_tx_disable(netdev);
6e1f9975 3451 be_tx_compl_clean(adapter);
a323d9bf
SP
3452
3453 be_rx_qs_destroy(adapter);
d11a347d 3454
a323d9bf 3455 for_all_evt_queues(adapter, eqo, i) {
10ef9ab4
SP
3456 if (msix_enabled(adapter))
3457 synchronize_irq(be_msix_vec_get(adapter, eqo));
3458 else
3459 synchronize_irq(netdev->irq);
3460 be_eq_clean(eqo);
63fcb27f
PR
3461 }
3462
889cd4b2
SP
3463 be_irq_unregister(adapter);
3464
482c9e79
SP
3465 return 0;
3466}
3467
10ef9ab4 3468static int be_rx_qs_create(struct be_adapter *adapter)
482c9e79 3469{
1dcf7b1c
ED
3470 struct rss_info *rss = &adapter->rss_info;
3471 u8 rss_key[RSS_HASH_KEY_LEN];
482c9e79 3472 struct be_rx_obj *rxo;
e9008ee9 3473 int rc, i, j;
482c9e79
SP
3474
3475 for_all_rx_queues(adapter, rxo, i) {
10ef9ab4
SP
3476 rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
3477 sizeof(struct be_eth_rx_d));
3478 if (rc)
3479 return rc;
3480 }
3481
71bb8bd0
VV
3482 if (adapter->need_def_rxq || !adapter->num_rss_qs) {
3483 rxo = default_rxo(adapter);
3484 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
3485 rx_frag_size, adapter->if_handle,
3486 false, &rxo->rss_id);
3487 if (rc)
3488 return rc;
3489 }
10ef9ab4
SP
3490
3491 for_all_rss_queues(adapter, rxo, i) {
482c9e79 3492 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
10ef9ab4
SP
3493 rx_frag_size, adapter->if_handle,
3494 true, &rxo->rss_id);
482c9e79
SP
3495 if (rc)
3496 return rc;
3497 }
3498
3499 if (be_multi_rxq(adapter)) {
71bb8bd0 3500 for (j = 0; j < RSS_INDIR_TABLE_LEN; j += adapter->num_rss_qs) {
e9008ee9 3501 for_all_rss_queues(adapter, rxo, i) {
e2557877 3502 if ((j + i) >= RSS_INDIR_TABLE_LEN)
e9008ee9 3503 break;
e2557877
VD
3504 rss->rsstable[j + i] = rxo->rss_id;
3505 rss->rss_queue[j + i] = i;
e9008ee9
PR
3506 }
3507 }
e2557877
VD
3508 rss->rss_flags = RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_IPV4 |
3509 RSS_ENABLE_TCP_IPV6 | RSS_ENABLE_IPV6;
594ad54a
SR
3510
3511 if (!BEx_chip(adapter))
e2557877
VD
3512 rss->rss_flags |= RSS_ENABLE_UDP_IPV4 |
3513 RSS_ENABLE_UDP_IPV6;
da1388d6
VV
3514 } else {
3515 /* Disable RSS, if only default RX Q is created */
e2557877 3516 rss->rss_flags = RSS_ENABLE_NONE;
da1388d6 3517 }
594ad54a 3518
1dcf7b1c 3519 netdev_rss_key_fill(rss_key, RSS_HASH_KEY_LEN);
748b539a 3520 rc = be_cmd_rss_config(adapter, rss->rsstable, rss->rss_flags,
d5d30981 3521 RSS_INDIR_TABLE_LEN, rss_key);
da1388d6 3522 if (rc) {
e2557877 3523 rss->rss_flags = RSS_ENABLE_NONE;
da1388d6 3524 return rc;
482c9e79
SP
3525 }
3526
1dcf7b1c 3527 memcpy(rss->rss_hkey, rss_key, RSS_HASH_KEY_LEN);
e2557877 3528
b02e60c8
SR
3529 /* Post 1 less than RXQ-len to avoid head being equal to tail,
3530 * which is a queue empty condition
3531 */
10ef9ab4 3532 for_all_rx_queues(adapter, rxo, i)
b02e60c8
SR
3533 be_post_rx_frags(rxo, GFP_KERNEL, RX_Q_LEN - 1);
3534
889cd4b2
SP
3535 return 0;
3536}
3537
bcc84140
KA
3538static int be_enable_if_filters(struct be_adapter *adapter)
3539{
3540 int status;
3541
3542 status = be_cmd_rx_filter(adapter, BE_IF_EN_FLAGS, ON);
3543 if (status)
3544 return status;
3545
3546 /* For BE3 VFs, the PF programs the initial MAC address */
3547 if (!(BEx_chip(adapter) && be_virtfn(adapter))) {
3548 status = be_cmd_pmac_add(adapter, adapter->netdev->dev_addr,
3549 adapter->if_handle,
3550 &adapter->pmac_id[0], 0);
3551 if (status)
3552 return status;
3553 }
3554
3555 if (adapter->vlans_added)
3556 be_vid_config(adapter);
3557
3558 be_set_rx_mode(adapter->netdev);
3559
3560 return 0;
3561}
3562
6b7c5b94
SP
3563static int be_open(struct net_device *netdev)
3564{
3565 struct be_adapter *adapter = netdev_priv(netdev);
10ef9ab4 3566 struct be_eq_obj *eqo;
3abcdeda 3567 struct be_rx_obj *rxo;
10ef9ab4 3568 struct be_tx_obj *txo;
b236916a 3569 u8 link_status;
3abcdeda 3570 int status, i;
5fb379ee 3571
10ef9ab4 3572 status = be_rx_qs_create(adapter);
482c9e79
SP
3573 if (status)
3574 goto err;
3575
bcc84140
KA
3576 status = be_enable_if_filters(adapter);
3577 if (status)
3578 goto err;
3579
c2bba3df
SK
3580 status = be_irq_register(adapter);
3581 if (status)
3582 goto err;
5fb379ee 3583
10ef9ab4 3584 for_all_rx_queues(adapter, rxo, i)
3abcdeda 3585 be_cq_notify(adapter, rxo->cq.id, true, 0);
5fb379ee 3586
10ef9ab4
SP
3587 for_all_tx_queues(adapter, txo, i)
3588 be_cq_notify(adapter, txo->cq.id, true, 0);
3589
7a1e9b20
SP
3590 be_async_mcc_enable(adapter);
3591
10ef9ab4
SP
3592 for_all_evt_queues(adapter, eqo, i) {
3593 napi_enable(&eqo->napi);
6384a4d0 3594 be_enable_busy_poll(eqo);
20947770 3595 be_eq_notify(adapter, eqo->q.id, true, true, 0, 0);
10ef9ab4 3596 }
04d3d624 3597 adapter->flags |= BE_FLAGS_NAPI_ENABLED;
10ef9ab4 3598
323ff71e 3599 status = be_cmd_link_status_query(adapter, NULL, &link_status, 0);
b236916a
AK
3600 if (!status)
3601 be_link_status_update(adapter, link_status);
3602
fba87559 3603 netif_tx_start_all_queues(netdev);
045508a8 3604 be_roce_dev_open(adapter);
c9c47142 3605
c5abe7c0 3606#ifdef CONFIG_BE2NET_VXLAN
c9c47142
SP
3607 if (skyhawk_chip(adapter))
3608 vxlan_get_rx_port(netdev);
c5abe7c0
SP
3609#endif
3610
889cd4b2
SP
3611 return 0;
3612err:
3613 be_close(adapter->netdev);
3614 return -EIO;
5fb379ee
SP
3615}
3616
71d8d1b5
AK
3617static int be_setup_wol(struct be_adapter *adapter, bool enable)
3618{
145155e7 3619 struct device *dev = &adapter->pdev->dev;
71d8d1b5 3620 struct be_dma_mem cmd;
71d8d1b5 3621 u8 mac[ETH_ALEN];
145155e7 3622 int status;
71d8d1b5 3623
c7bf7169 3624 eth_zero_addr(mac);
71d8d1b5
AK
3625
3626 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
145155e7 3627 cmd.va = dma_zalloc_coherent(dev, cmd.size, &cmd.dma, GFP_KERNEL);
ddf1169f 3628 if (!cmd.va)
6b568689 3629 return -ENOMEM;
71d8d1b5
AK
3630
3631 if (enable) {
3632 status = pci_write_config_dword(adapter->pdev,
748b539a
SP
3633 PCICFG_PM_CONTROL_OFFSET,
3634 PCICFG_PM_CONTROL_MASK);
71d8d1b5 3635 if (status) {
145155e7
KP
3636 dev_err(dev, "Could not enable Wake-on-lan\n");
3637 goto err;
71d8d1b5 3638 }
71d8d1b5 3639 } else {
145155e7 3640 ether_addr_copy(mac, adapter->netdev->dev_addr);
71d8d1b5
AK
3641 }
3642
145155e7
KP
3643 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
3644 pci_enable_wake(adapter->pdev, PCI_D3hot, enable);
3645 pci_enable_wake(adapter->pdev, PCI_D3cold, enable);
3646err:
3647 dma_free_coherent(dev, cmd.size, cmd.va, cmd.dma);
71d8d1b5
AK
3648 return status;
3649}
3650
f7062ee5
SP
3651static void be_vf_eth_addr_generate(struct be_adapter *adapter, u8 *mac)
3652{
3653 u32 addr;
3654
3655 addr = jhash(adapter->netdev->dev_addr, ETH_ALEN, 0);
3656
3657 mac[5] = (u8)(addr & 0xFF);
3658 mac[4] = (u8)((addr >> 8) & 0xFF);
3659 mac[3] = (u8)((addr >> 16) & 0xFF);
3660 /* Use the OUI from the current MAC address */
3661 memcpy(mac, adapter->netdev->dev_addr, 3);
3662}
3663
6d87f5c3
AK
3664/*
3665 * Generate a seed MAC address from the PF MAC Address using jhash.
3666 * MAC Address for VFs are assigned incrementally starting from the seed.
3667 * These addresses are programmed in the ASIC by the PF and the VF driver
3668 * queries for the MAC address during its probe.
3669 */
4c876616 3670static int be_vf_eth_addr_config(struct be_adapter *adapter)
6d87f5c3 3671{
f9449ab7 3672 u32 vf;
3abcdeda 3673 int status = 0;
6d87f5c3 3674 u8 mac[ETH_ALEN];
11ac75ed 3675 struct be_vf_cfg *vf_cfg;
6d87f5c3
AK
3676
3677 be_vf_eth_addr_generate(adapter, mac);
3678
11ac75ed 3679 for_all_vfs(adapter, vf_cfg, vf) {
3175d8c2 3680 if (BEx_chip(adapter))
590c391d 3681 status = be_cmd_pmac_add(adapter, mac,
11ac75ed
SP
3682 vf_cfg->if_handle,
3683 &vf_cfg->pmac_id, vf + 1);
3175d8c2
SP
3684 else
3685 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
3686 vf + 1);
590c391d 3687
6d87f5c3
AK
3688 if (status)
3689 dev_err(&adapter->pdev->dev,
748b539a
SP
3690 "Mac address assignment failed for VF %d\n",
3691 vf);
6d87f5c3 3692 else
11ac75ed 3693 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
6d87f5c3
AK
3694
3695 mac[5] += 1;
3696 }
3697 return status;
3698}
3699
4c876616
SP
3700static int be_vfs_mac_query(struct be_adapter *adapter)
3701{
3702 int status, vf;
3703 u8 mac[ETH_ALEN];
3704 struct be_vf_cfg *vf_cfg;
4c876616
SP
3705
3706 for_all_vfs(adapter, vf_cfg, vf) {
b188f090
SR
3707 status = be_cmd_get_active_mac(adapter, vf_cfg->pmac_id,
3708 mac, vf_cfg->if_handle,
3709 false, vf+1);
4c876616
SP
3710 if (status)
3711 return status;
3712 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
3713 }
3714 return 0;
3715}
3716
f9449ab7 3717static void be_vf_clear(struct be_adapter *adapter)
6d87f5c3 3718{
11ac75ed 3719 struct be_vf_cfg *vf_cfg;
6d87f5c3
AK
3720 u32 vf;
3721
257a3feb 3722 if (pci_vfs_assigned(adapter->pdev)) {
4c876616
SP
3723 dev_warn(&adapter->pdev->dev,
3724 "VFs are assigned to VMs: not disabling VFs\n");
39f1d94d
SP
3725 goto done;
3726 }
3727
b4c1df93
SP
3728 pci_disable_sriov(adapter->pdev);
3729
11ac75ed 3730 for_all_vfs(adapter, vf_cfg, vf) {
3175d8c2 3731 if (BEx_chip(adapter))
11ac75ed
SP
3732 be_cmd_pmac_del(adapter, vf_cfg->if_handle,
3733 vf_cfg->pmac_id, vf + 1);
3175d8c2
SP
3734 else
3735 be_cmd_set_mac(adapter, NULL, vf_cfg->if_handle,
3736 vf + 1);
f9449ab7 3737
11ac75ed
SP
3738 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
3739 }
39f1d94d
SP
3740done:
3741 kfree(adapter->vf_cfg);
3742 adapter->num_vfs = 0;
f174c7ec 3743 adapter->flags &= ~BE_FLAGS_SRIOV_ENABLED;
6d87f5c3
AK
3744}
3745
7707133c
SP
3746static void be_clear_queues(struct be_adapter *adapter)
3747{
3748 be_mcc_queues_destroy(adapter);
3749 be_rx_cqs_destroy(adapter);
3750 be_tx_queues_destroy(adapter);
3751 be_evt_queues_destroy(adapter);
3752}
3753
68d7bdcb 3754static void be_cancel_worker(struct be_adapter *adapter)
a54769f5 3755{
191eb756
SP
3756 if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) {
3757 cancel_delayed_work_sync(&adapter->work);
3758 adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED;
3759 }
68d7bdcb
SP
3760}
3761
eb7dd46c
SP
3762static void be_cancel_err_detection(struct be_adapter *adapter)
3763{
3764 if (adapter->flags & BE_FLAGS_ERR_DETECTION_SCHEDULED) {
3765 cancel_delayed_work_sync(&adapter->be_err_detection_work);
3766 adapter->flags &= ~BE_FLAGS_ERR_DETECTION_SCHEDULED;
3767 }
3768}
3769
c5abe7c0 3770#ifdef CONFIG_BE2NET_VXLAN
c9c47142
SP
3771static void be_disable_vxlan_offloads(struct be_adapter *adapter)
3772{
630f4b70
SB
3773 struct net_device *netdev = adapter->netdev;
3774
c9c47142
SP
3775 if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS)
3776 be_cmd_manage_iface(adapter, adapter->if_handle,
3777 OP_CONVERT_TUNNEL_TO_NORMAL);
3778
3779 if (adapter->vxlan_port)
3780 be_cmd_set_vxlan_port(adapter, 0);
3781
3782 adapter->flags &= ~BE_FLAGS_VXLAN_OFFLOADS;
3783 adapter->vxlan_port = 0;
630f4b70
SB
3784
3785 netdev->hw_enc_features = 0;
3786 netdev->hw_features &= ~(NETIF_F_GSO_UDP_TUNNEL);
ac9a3d84 3787 netdev->features &= ~(NETIF_F_GSO_UDP_TUNNEL);
c9c47142 3788}
c5abe7c0 3789#endif
c9c47142 3790
f2858738
VV
3791static u16 be_calculate_vf_qs(struct be_adapter *adapter, u16 num_vfs)
3792{
3793 struct be_resources res = adapter->pool_res;
3794 u16 num_vf_qs = 1;
3795
3796 /* Distribute the queue resources equally among the PF and it's VFs
3797 * Do not distribute queue resources in multi-channel configuration.
3798 */
3799 if (num_vfs && !be_is_mc(adapter)) {
3800 /* If number of VFs requested is 8 less than max supported,
3801 * assign 8 queue pairs to the PF and divide the remaining
3802 * resources evenly among the VFs
3803 */
3804 if (num_vfs < (be_max_vfs(adapter) - 8))
3805 num_vf_qs = (res.max_rss_qs - 8) / num_vfs;
3806 else
3807 num_vf_qs = res.max_rss_qs / num_vfs;
3808
3809 /* Skyhawk-R chip supports only MAX_RSS_IFACES RSS capable
3810 * interfaces per port. Provide RSS on VFs, only if number
3811 * of VFs requested is less than MAX_RSS_IFACES limit.
3812 */
3813 if (num_vfs >= MAX_RSS_IFACES)
3814 num_vf_qs = 1;
3815 }
3816 return num_vf_qs;
3817}
3818
b05004ad
SK
3819static int be_clear(struct be_adapter *adapter)
3820{
f2858738
VV
3821 struct pci_dev *pdev = adapter->pdev;
3822 u16 num_vf_qs;
3823
68d7bdcb 3824 be_cancel_worker(adapter);
191eb756 3825
11ac75ed 3826 if (sriov_enabled(adapter))
f9449ab7
SP
3827 be_vf_clear(adapter);
3828
bec84e6b
VV
3829 /* Re-configure FW to distribute resources evenly across max-supported
3830 * number of VFs, only when VFs are not already enabled.
3831 */
ace40aff
VV
3832 if (skyhawk_chip(adapter) && be_physfn(adapter) &&
3833 !pci_vfs_assigned(pdev)) {
f2858738
VV
3834 num_vf_qs = be_calculate_vf_qs(adapter,
3835 pci_sriov_get_totalvfs(pdev));
bec84e6b 3836 be_cmd_set_sriov_config(adapter, adapter->pool_res,
f2858738
VV
3837 pci_sriov_get_totalvfs(pdev),
3838 num_vf_qs);
3839 }
bec84e6b 3840
c5abe7c0 3841#ifdef CONFIG_BE2NET_VXLAN
c9c47142 3842 be_disable_vxlan_offloads(adapter);
c5abe7c0 3843#endif
bcc84140
KA
3844 kfree(adapter->pmac_id);
3845 adapter->pmac_id = NULL;
fbc13f01 3846
f9449ab7 3847 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
a54769f5 3848
7707133c 3849 be_clear_queues(adapter);
a54769f5 3850
10ef9ab4 3851 be_msix_disable(adapter);
e1ad8e33 3852 adapter->flags &= ~BE_FLAGS_SETUP_DONE;
a54769f5
SP
3853 return 0;
3854}
3855
4c876616 3856static int be_vfs_if_create(struct be_adapter *adapter)
abb93951 3857{
92bf14ab 3858 struct be_resources res = {0};
bcc84140 3859 u32 cap_flags, en_flags, vf;
4c876616 3860 struct be_vf_cfg *vf_cfg;
0700d816 3861 int status;
abb93951 3862
0700d816 3863 /* If a FW profile exists, then cap_flags are updated */
4c876616 3864 cap_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
0ed7d749 3865 BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
abb93951 3866
4c876616 3867 for_all_vfs(adapter, vf_cfg, vf) {
92bf14ab
SP
3868 if (!BE3_chip(adapter)) {
3869 status = be_cmd_get_profile_config(adapter, &res,
f2858738 3870 RESOURCE_LIMITS,
92bf14ab 3871 vf + 1);
435452aa 3872 if (!status) {
92bf14ab 3873 cap_flags = res.if_cap_flags;
435452aa
VV
3874 /* Prevent VFs from enabling VLAN promiscuous
3875 * mode
3876 */
3877 cap_flags &= ~BE_IF_FLAGS_VLAN_PROMISCUOUS;
3878 }
92bf14ab 3879 }
4c876616 3880
bcc84140
KA
3881 en_flags = cap_flags & (BE_IF_FLAGS_UNTAGGED |
3882 BE_IF_FLAGS_BROADCAST |
3883 BE_IF_FLAGS_MULTICAST |
3884 BE_IF_FLAGS_PASS_L3L4_ERRORS);
3885 status = be_cmd_if_create(adapter, cap_flags, en_flags,
3886 &vf_cfg->if_handle, vf + 1);
4c876616 3887 if (status)
0700d816 3888 return status;
4c876616 3889 }
0700d816
KA
3890
3891 return 0;
abb93951
PR
3892}
3893
39f1d94d 3894static int be_vf_setup_init(struct be_adapter *adapter)
30128031 3895{
11ac75ed 3896 struct be_vf_cfg *vf_cfg;
30128031
SP
3897 int vf;
3898
39f1d94d
SP
3899 adapter->vf_cfg = kcalloc(adapter->num_vfs, sizeof(*vf_cfg),
3900 GFP_KERNEL);
3901 if (!adapter->vf_cfg)
3902 return -ENOMEM;
3903
11ac75ed
SP
3904 for_all_vfs(adapter, vf_cfg, vf) {
3905 vf_cfg->if_handle = -1;
3906 vf_cfg->pmac_id = -1;
30128031 3907 }
39f1d94d 3908 return 0;
30128031
SP
3909}
3910
f9449ab7
SP
3911static int be_vf_setup(struct be_adapter *adapter)
3912{
c502224e 3913 struct device *dev = &adapter->pdev->dev;
11ac75ed 3914 struct be_vf_cfg *vf_cfg;
4c876616 3915 int status, old_vfs, vf;
e7bcbd7b 3916 bool spoofchk;
39f1d94d 3917
257a3feb 3918 old_vfs = pci_num_vf(adapter->pdev);
39f1d94d
SP
3919
3920 status = be_vf_setup_init(adapter);
3921 if (status)
3922 goto err;
30128031 3923
4c876616
SP
3924 if (old_vfs) {
3925 for_all_vfs(adapter, vf_cfg, vf) {
3926 status = be_cmd_get_if_id(adapter, vf_cfg, vf);
3927 if (status)
3928 goto err;
3929 }
f9449ab7 3930
4c876616
SP
3931 status = be_vfs_mac_query(adapter);
3932 if (status)
3933 goto err;
3934 } else {
bec84e6b
VV
3935 status = be_vfs_if_create(adapter);
3936 if (status)
3937 goto err;
3938
39f1d94d
SP
3939 status = be_vf_eth_addr_config(adapter);
3940 if (status)
3941 goto err;
3942 }
f9449ab7 3943
11ac75ed 3944 for_all_vfs(adapter, vf_cfg, vf) {
04a06028 3945 /* Allow VFs to programs MAC/VLAN filters */
435452aa
VV
3946 status = be_cmd_get_fn_privileges(adapter, &vf_cfg->privileges,
3947 vf + 1);
3948 if (!status && !(vf_cfg->privileges & BE_PRIV_FILTMGMT)) {
04a06028 3949 status = be_cmd_set_fn_privileges(adapter,
435452aa 3950 vf_cfg->privileges |
04a06028
SP
3951 BE_PRIV_FILTMGMT,
3952 vf + 1);
435452aa
VV
3953 if (!status) {
3954 vf_cfg->privileges |= BE_PRIV_FILTMGMT;
04a06028
SP
3955 dev_info(dev, "VF%d has FILTMGMT privilege\n",
3956 vf);
435452aa 3957 }
04a06028
SP
3958 }
3959
0f77ba73
RN
3960 /* Allow full available bandwidth */
3961 if (!old_vfs)
3962 be_cmd_config_qos(adapter, 0, 0, vf + 1);
f1f3ee1b 3963
e7bcbd7b
KA
3964 status = be_cmd_get_hsw_config(adapter, NULL, vf + 1,
3965 vf_cfg->if_handle, NULL,
3966 &spoofchk);
3967 if (!status)
3968 vf_cfg->spoofchk = spoofchk;
3969
bdce2ad7 3970 if (!old_vfs) {
0599863d 3971 be_cmd_enable_vf(adapter, vf + 1);
bdce2ad7
SR
3972 be_cmd_set_logical_link_config(adapter,
3973 IFLA_VF_LINK_STATE_AUTO,
3974 vf+1);
3975 }
f9449ab7 3976 }
b4c1df93
SP
3977
3978 if (!old_vfs) {
3979 status = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
3980 if (status) {
3981 dev_err(dev, "SRIOV enable failed\n");
3982 adapter->num_vfs = 0;
3983 goto err;
3984 }
3985 }
f174c7ec
VV
3986
3987 adapter->flags |= BE_FLAGS_SRIOV_ENABLED;
f9449ab7
SP
3988 return 0;
3989err:
4c876616
SP
3990 dev_err(dev, "VF setup failed\n");
3991 be_vf_clear(adapter);
f9449ab7
SP
3992 return status;
3993}
3994
f93f160b
VV
3995/* Converting function_mode bits on BE3 to SH mc_type enums */
3996
3997static u8 be_convert_mc_type(u32 function_mode)
3998{
66064dbc 3999 if (function_mode & VNIC_MODE && function_mode & QNQ_MODE)
f93f160b 4000 return vNIC1;
66064dbc 4001 else if (function_mode & QNQ_MODE)
f93f160b
VV
4002 return FLEX10;
4003 else if (function_mode & VNIC_MODE)
4004 return vNIC2;
4005 else if (function_mode & UMC_ENABLED)
4006 return UMC;
4007 else
4008 return MC_NONE;
4009}
4010
92bf14ab
SP
4011/* On BE2/BE3 FW does not suggest the supported limits */
4012static void BEx_get_resources(struct be_adapter *adapter,
4013 struct be_resources *res)
4014{
bec84e6b 4015 bool use_sriov = adapter->num_vfs ? 1 : 0;
92bf14ab
SP
4016
4017 if (be_physfn(adapter))
4018 res->max_uc_mac = BE_UC_PMAC_COUNT;
4019 else
4020 res->max_uc_mac = BE_VF_UC_PMAC_COUNT;
4021
f93f160b
VV
4022 adapter->mc_type = be_convert_mc_type(adapter->function_mode);
4023
4024 if (be_is_mc(adapter)) {
4025 /* Assuming that there are 4 channels per port,
4026 * when multi-channel is enabled
4027 */
4028 if (be_is_qnq_mode(adapter))
4029 res->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
4030 else
4031 /* In a non-qnq multichannel mode, the pvid
4032 * takes up one vlan entry
4033 */
4034 res->max_vlans = (BE_NUM_VLANS_SUPPORTED / 4) - 1;
4035 } else {
92bf14ab 4036 res->max_vlans = BE_NUM_VLANS_SUPPORTED;
f93f160b
VV
4037 }
4038
92bf14ab
SP
4039 res->max_mcast_mac = BE_MAX_MC;
4040
a5243dab
VV
4041 /* 1) For BE3 1Gb ports, FW does not support multiple TXQs
4042 * 2) Create multiple TX rings on a BE3-R multi-channel interface
4043 * *only* if it is RSS-capable.
4044 */
4045 if (BE2_chip(adapter) || use_sriov || (adapter->port_num > 1) ||
18c57c74
KA
4046 be_virtfn(adapter) ||
4047 (be_is_mc(adapter) &&
4048 !(adapter->function_caps & BE_FUNCTION_CAPS_RSS))) {
92bf14ab 4049 res->max_tx_qs = 1;
a28277dc
SR
4050 } else if (adapter->function_caps & BE_FUNCTION_CAPS_SUPER_NIC) {
4051 struct be_resources super_nic_res = {0};
4052
4053 /* On a SuperNIC profile, the driver needs to use the
4054 * GET_PROFILE_CONFIG cmd to query the per-function TXQ limits
4055 */
f2858738
VV
4056 be_cmd_get_profile_config(adapter, &super_nic_res,
4057 RESOURCE_LIMITS, 0);
a28277dc
SR
4058 /* Some old versions of BE3 FW don't report max_tx_qs value */
4059 res->max_tx_qs = super_nic_res.max_tx_qs ? : BE3_MAX_TX_QS;
4060 } else {
92bf14ab 4061 res->max_tx_qs = BE3_MAX_TX_QS;
a28277dc 4062 }
92bf14ab
SP
4063
4064 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
4065 !use_sriov && be_physfn(adapter))
4066 res->max_rss_qs = (adapter->be3_native) ?
4067 BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
4068 res->max_rx_qs = res->max_rss_qs + 1;
4069
e3dc867c 4070 if (be_physfn(adapter))
d3518e21 4071 res->max_evt_qs = (be_max_vfs(adapter) > 0) ?
e3dc867c
SR
4072 BE3_SRIOV_MAX_EVT_QS : BE3_MAX_EVT_QS;
4073 else
4074 res->max_evt_qs = 1;
92bf14ab
SP
4075
4076 res->if_cap_flags = BE_IF_CAP_FLAGS_WANT;
71bb8bd0 4077 res->if_cap_flags &= ~BE_IF_FLAGS_DEFQ_RSS;
92bf14ab
SP
4078 if (!(adapter->function_caps & BE_FUNCTION_CAPS_RSS))
4079 res->if_cap_flags &= ~BE_IF_FLAGS_RSS;
4080}
4081
30128031
SP
4082static void be_setup_init(struct be_adapter *adapter)
4083{
4084 adapter->vlan_prio_bmap = 0xff;
42f11cf2 4085 adapter->phy.link_speed = -1;
30128031
SP
4086 adapter->if_handle = -1;
4087 adapter->be3_native = false;
f66b7cfd 4088 adapter->if_flags = 0;
f25b119c
PR
4089 if (be_physfn(adapter))
4090 adapter->cmd_privileges = MAX_PRIVILEGES;
4091 else
4092 adapter->cmd_privileges = MIN_PRIVILEGES;
30128031
SP
4093}
4094
bec84e6b
VV
4095static int be_get_sriov_config(struct be_adapter *adapter)
4096{
bec84e6b 4097 struct be_resources res = {0};
d3d18312 4098 int max_vfs, old_vfs;
bec84e6b 4099
f2858738 4100 be_cmd_get_profile_config(adapter, &res, RESOURCE_LIMITS, 0);
d3d18312 4101
ace40aff 4102 /* Some old versions of BE3 FW don't report max_vfs value */
bec84e6b
VV
4103 if (BE3_chip(adapter) && !res.max_vfs) {
4104 max_vfs = pci_sriov_get_totalvfs(adapter->pdev);
4105 res.max_vfs = max_vfs > 0 ? min(MAX_VFS, max_vfs) : 0;
4106 }
4107
d3d18312 4108 adapter->pool_res = res;
bec84e6b 4109
ace40aff
VV
4110 /* If during previous unload of the driver, the VFs were not disabled,
4111 * then we cannot rely on the PF POOL limits for the TotalVFs value.
4112 * Instead use the TotalVFs value stored in the pci-dev struct.
4113 */
bec84e6b
VV
4114 old_vfs = pci_num_vf(adapter->pdev);
4115 if (old_vfs) {
ace40aff
VV
4116 dev_info(&adapter->pdev->dev, "%d VFs are already enabled\n",
4117 old_vfs);
4118
4119 adapter->pool_res.max_vfs =
4120 pci_sriov_get_totalvfs(adapter->pdev);
bec84e6b 4121 adapter->num_vfs = old_vfs;
bec84e6b
VV
4122 }
4123
4124 return 0;
4125}
4126
ace40aff
VV
4127static void be_alloc_sriov_res(struct be_adapter *adapter)
4128{
4129 int old_vfs = pci_num_vf(adapter->pdev);
4130 u16 num_vf_qs;
4131 int status;
4132
4133 be_get_sriov_config(adapter);
4134
4135 if (!old_vfs)
4136 pci_sriov_set_totalvfs(adapter->pdev, be_max_vfs(adapter));
4137
4138 /* When the HW is in SRIOV capable configuration, the PF-pool
4139 * resources are given to PF during driver load, if there are no
4140 * old VFs. This facility is not available in BE3 FW.
4141 * Also, this is done by FW in Lancer chip.
4142 */
4143 if (skyhawk_chip(adapter) && be_max_vfs(adapter) && !old_vfs) {
4144 num_vf_qs = be_calculate_vf_qs(adapter, 0);
4145 status = be_cmd_set_sriov_config(adapter, adapter->pool_res, 0,
4146 num_vf_qs);
4147 if (status)
4148 dev_err(&adapter->pdev->dev,
4149 "Failed to optimize SRIOV resources\n");
4150 }
4151}
4152
92bf14ab 4153static int be_get_resources(struct be_adapter *adapter)
abb93951 4154{
92bf14ab
SP
4155 struct device *dev = &adapter->pdev->dev;
4156 struct be_resources res = {0};
4157 int status;
abb93951 4158
92bf14ab
SP
4159 if (BEx_chip(adapter)) {
4160 BEx_get_resources(adapter, &res);
4161 adapter->res = res;
abb93951
PR
4162 }
4163
92bf14ab
SP
4164 /* For Lancer, SH etc read per-function resource limits from FW.
4165 * GET_FUNC_CONFIG returns per function guaranteed limits.
4166 * GET_PROFILE_CONFIG returns PCI-E related limits PF-pool limits
4167 */
4168 if (!BEx_chip(adapter)) {
4169 status = be_cmd_get_func_config(adapter, &res);
4170 if (status)
4171 return status;
abb93951 4172
71bb8bd0
VV
4173 /* If a deafault RXQ must be created, we'll use up one RSSQ*/
4174 if (res.max_rss_qs && res.max_rss_qs == res.max_rx_qs &&
4175 !(res.if_cap_flags & BE_IF_FLAGS_DEFQ_RSS))
4176 res.max_rss_qs -= 1;
4177
92bf14ab
SP
4178 /* If RoCE may be enabled stash away half the EQs for RoCE */
4179 if (be_roce_supported(adapter))
4180 res.max_evt_qs /= 2;
4181 adapter->res = res;
abb93951 4182 }
4c876616 4183
71bb8bd0
VV
4184 /* If FW supports RSS default queue, then skip creating non-RSS
4185 * queue for non-IP traffic.
4186 */
4187 adapter->need_def_rxq = (be_if_cap_flags(adapter) &
4188 BE_IF_FLAGS_DEFQ_RSS) ? 0 : 1;
4189
acbafeb1
SP
4190 dev_info(dev, "Max: txqs %d, rxqs %d, rss %d, eqs %d, vfs %d\n",
4191 be_max_txqs(adapter), be_max_rxqs(adapter),
4192 be_max_rss(adapter), be_max_eqs(adapter),
4193 be_max_vfs(adapter));
4194 dev_info(dev, "Max: uc-macs %d, mc-macs %d, vlans %d\n",
4195 be_max_uc(adapter), be_max_mc(adapter),
4196 be_max_vlans(adapter));
4197
ace40aff
VV
4198 /* Sanitize cfg_num_qs based on HW and platform limits */
4199 adapter->cfg_num_qs = min_t(u16, netif_get_num_default_rss_queues(),
4200 be_max_qs(adapter));
92bf14ab 4201 return 0;
abb93951
PR
4202}
4203
39f1d94d
SP
4204static int be_get_config(struct be_adapter *adapter)
4205{
6b085ba9 4206 int status, level;
542963b7 4207 u16 profile_id;
6b085ba9 4208
e97e3cda 4209 status = be_cmd_query_fw_cfg(adapter);
abb93951 4210 if (status)
92bf14ab 4211 return status;
abb93951 4212
6b085ba9
SP
4213 if (BEx_chip(adapter)) {
4214 level = be_cmd_get_fw_log_level(adapter);
4215 adapter->msg_enable =
4216 level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0;
4217 }
4218
4219 be_cmd_get_acpi_wol_cap(adapter);
4220
21252377
VV
4221 be_cmd_query_port_name(adapter);
4222
4223 if (be_physfn(adapter)) {
542963b7
VV
4224 status = be_cmd_get_active_profile(adapter, &profile_id);
4225 if (!status)
4226 dev_info(&adapter->pdev->dev,
4227 "Using profile 0x%x\n", profile_id);
962bcb75 4228 }
bec84e6b 4229
92bf14ab
SP
4230 status = be_get_resources(adapter);
4231 if (status)
4232 return status;
abb93951 4233
46ee9c14
RN
4234 adapter->pmac_id = kcalloc(be_max_uc(adapter),
4235 sizeof(*adapter->pmac_id), GFP_KERNEL);
92bf14ab
SP
4236 if (!adapter->pmac_id)
4237 return -ENOMEM;
abb93951 4238
92bf14ab 4239 return 0;
39f1d94d
SP
4240}
4241
95046b92
SP
4242static int be_mac_setup(struct be_adapter *adapter)
4243{
4244 u8 mac[ETH_ALEN];
4245 int status;
4246
4247 if (is_zero_ether_addr(adapter->netdev->dev_addr)) {
4248 status = be_cmd_get_perm_mac(adapter, mac);
4249 if (status)
4250 return status;
4251
4252 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
4253 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
95046b92
SP
4254 }
4255
95046b92
SP
4256 return 0;
4257}
4258
68d7bdcb
SP
4259static void be_schedule_worker(struct be_adapter *adapter)
4260{
4261 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
4262 adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
4263}
4264
eb7dd46c
SP
4265static void be_schedule_err_detection(struct be_adapter *adapter)
4266{
4267 schedule_delayed_work(&adapter->be_err_detection_work,
4268 msecs_to_jiffies(1000));
4269 adapter->flags |= BE_FLAGS_ERR_DETECTION_SCHEDULED;
4270}
4271
7707133c 4272static int be_setup_queues(struct be_adapter *adapter)
5fb379ee 4273{
68d7bdcb 4274 struct net_device *netdev = adapter->netdev;
10ef9ab4 4275 int status;
ba343c77 4276
7707133c 4277 status = be_evt_queues_create(adapter);
abb93951
PR
4278 if (status)
4279 goto err;
73d540f2 4280
7707133c 4281 status = be_tx_qs_create(adapter);
c2bba3df
SK
4282 if (status)
4283 goto err;
10ef9ab4 4284
7707133c 4285 status = be_rx_cqs_create(adapter);
10ef9ab4 4286 if (status)
a54769f5 4287 goto err;
6b7c5b94 4288
7707133c 4289 status = be_mcc_queues_create(adapter);
10ef9ab4
SP
4290 if (status)
4291 goto err;
4292
68d7bdcb
SP
4293 status = netif_set_real_num_rx_queues(netdev, adapter->num_rx_qs);
4294 if (status)
4295 goto err;
4296
4297 status = netif_set_real_num_tx_queues(netdev, adapter->num_tx_qs);
4298 if (status)
4299 goto err;
4300
7707133c
SP
4301 return 0;
4302err:
4303 dev_err(&adapter->pdev->dev, "queue_setup failed\n");
4304 return status;
4305}
4306
68d7bdcb
SP
4307int be_update_queues(struct be_adapter *adapter)
4308{
4309 struct net_device *netdev = adapter->netdev;
4310 int status;
4311
4312 if (netif_running(netdev))
4313 be_close(netdev);
4314
4315 be_cancel_worker(adapter);
4316
4317 /* If any vectors have been shared with RoCE we cannot re-program
4318 * the MSIx table.
4319 */
4320 if (!adapter->num_msix_roce_vec)
4321 be_msix_disable(adapter);
4322
4323 be_clear_queues(adapter);
4324
4325 if (!msix_enabled(adapter)) {
4326 status = be_msix_enable(adapter);
4327 if (status)
4328 return status;
4329 }
4330
4331 status = be_setup_queues(adapter);
4332 if (status)
4333 return status;
4334
4335 be_schedule_worker(adapter);
4336
4337 if (netif_running(netdev))
4338 status = be_open(netdev);
4339
4340 return status;
4341}
4342
f7062ee5
SP
4343static inline int fw_major_num(const char *fw_ver)
4344{
4345 int fw_major = 0, i;
4346
4347 i = sscanf(fw_ver, "%d.", &fw_major);
4348 if (i != 1)
4349 return 0;
4350
4351 return fw_major;
4352}
4353
f962f840
SP
4354/* If any VFs are already enabled don't FLR the PF */
4355static bool be_reset_required(struct be_adapter *adapter)
4356{
4357 return pci_num_vf(adapter->pdev) ? false : true;
4358}
4359
4360/* Wait for the FW to be ready and perform the required initialization */
4361static int be_func_init(struct be_adapter *adapter)
4362{
4363 int status;
4364
4365 status = be_fw_wait_ready(adapter);
4366 if (status)
4367 return status;
4368
4369 if (be_reset_required(adapter)) {
4370 status = be_cmd_reset_function(adapter);
4371 if (status)
4372 return status;
4373
4374 /* Wait for interrupts to quiesce after an FLR */
4375 msleep(100);
4376
4377 /* We can clear all errors when function reset succeeds */
954f6825 4378 be_clear_error(adapter, BE_CLEAR_ALL);
f962f840
SP
4379 }
4380
4381 /* Tell FW we're ready to fire cmds */
4382 status = be_cmd_fw_init(adapter);
4383 if (status)
4384 return status;
4385
4386 /* Allow interrupts for other ULPs running on NIC function */
4387 be_intr_set(adapter, true);
4388
4389 return 0;
4390}
4391
7707133c
SP
4392static int be_setup(struct be_adapter *adapter)
4393{
4394 struct device *dev = &adapter->pdev->dev;
bcc84140 4395 u32 en_flags;
7707133c
SP
4396 int status;
4397
f962f840
SP
4398 status = be_func_init(adapter);
4399 if (status)
4400 return status;
4401
7707133c
SP
4402 be_setup_init(adapter);
4403
4404 if (!lancer_chip(adapter))
4405 be_cmd_req_native_mode(adapter);
4406
72ef3a88
SK
4407 /* Need to invoke this cmd first to get the PCI Function Number */
4408 status = be_cmd_get_cntl_attributes(adapter);
4409 if (status)
4410 return status;
4411
ace40aff
VV
4412 if (!BE2_chip(adapter) && be_physfn(adapter))
4413 be_alloc_sriov_res(adapter);
4414
7707133c 4415 status = be_get_config(adapter);
10ef9ab4 4416 if (status)
a54769f5 4417 goto err;
6b7c5b94 4418
7707133c 4419 status = be_msix_enable(adapter);
10ef9ab4 4420 if (status)
a54769f5 4421 goto err;
6b7c5b94 4422
bcc84140
KA
4423 /* will enable all the needed filter flags in be_open() */
4424 en_flags = BE_IF_FLAGS_RSS | BE_IF_FLAGS_DEFQ_RSS;
4425 en_flags = en_flags & be_if_cap_flags(adapter);
4426 status = be_cmd_if_create(adapter, be_if_cap_flags(adapter), en_flags,
4427 &adapter->if_handle, 0);
7707133c 4428 if (status)
a54769f5 4429 goto err;
6b7c5b94 4430
68d7bdcb
SP
4431 /* Updating real_num_tx/rx_queues() requires rtnl_lock() */
4432 rtnl_lock();
7707133c 4433 status = be_setup_queues(adapter);
68d7bdcb 4434 rtnl_unlock();
95046b92 4435 if (status)
1578e777
PR
4436 goto err;
4437
7707133c 4438 be_cmd_get_fn_privileges(adapter, &adapter->cmd_privileges, 0);
7707133c
SP
4439
4440 status = be_mac_setup(adapter);
10ef9ab4
SP
4441 if (status)
4442 goto err;
4443
e97e3cda 4444 be_cmd_get_fw_ver(adapter);
acbafeb1 4445 dev_info(dev, "FW version is %s\n", adapter->fw_ver);
5a56eb10 4446
e9e2a904 4447 if (BE2_chip(adapter) && fw_major_num(adapter->fw_ver) < 4) {
50762667 4448 dev_err(dev, "Firmware on card is old(%s), IRQs may not work",
e9e2a904
SK
4449 adapter->fw_ver);
4450 dev_err(dev, "Please upgrade firmware to version >= 4.0\n");
4451 }
4452
00d594c3
KA
4453 status = be_cmd_set_flow_control(adapter, adapter->tx_fc,
4454 adapter->rx_fc);
4455 if (status)
4456 be_cmd_get_flow_control(adapter, &adapter->tx_fc,
4457 &adapter->rx_fc);
590c391d 4458
00d594c3
KA
4459 dev_info(&adapter->pdev->dev, "HW Flow control - TX:%d RX:%d\n",
4460 adapter->tx_fc, adapter->rx_fc);
2dc1deb6 4461
bdce2ad7
SR
4462 if (be_physfn(adapter))
4463 be_cmd_set_logical_link_config(adapter,
4464 IFLA_VF_LINK_STATE_AUTO, 0);
4465
bec84e6b
VV
4466 if (adapter->num_vfs)
4467 be_vf_setup(adapter);
f9449ab7 4468
f25b119c
PR
4469 status = be_cmd_get_phy_info(adapter);
4470 if (!status && be_pause_supported(adapter))
42f11cf2
AK
4471 adapter->phy.fc_autoneg = 1;
4472
68d7bdcb 4473 be_schedule_worker(adapter);
e1ad8e33 4474 adapter->flags |= BE_FLAGS_SETUP_DONE;
f9449ab7 4475 return 0;
a54769f5
SP
4476err:
4477 be_clear(adapter);
4478 return status;
4479}
6b7c5b94 4480
66268739
IV
4481#ifdef CONFIG_NET_POLL_CONTROLLER
4482static void be_netpoll(struct net_device *netdev)
4483{
4484 struct be_adapter *adapter = netdev_priv(netdev);
10ef9ab4 4485 struct be_eq_obj *eqo;
66268739
IV
4486 int i;
4487
e49cc34f 4488 for_all_evt_queues(adapter, eqo, i) {
20947770 4489 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0, 0);
e49cc34f
SP
4490 napi_schedule(&eqo->napi);
4491 }
66268739
IV
4492}
4493#endif
4494
96c9b2e4 4495static char flash_cookie[2][16] = {"*** SE FLAS", "H DIRECTORY *** "};
fa9a6fed 4496
306f1348
SP
4497static bool phy_flashing_required(struct be_adapter *adapter)
4498{
e02cfd96 4499 return (adapter->phy.phy_type == PHY_TYPE_TN_8022 &&
42f11cf2 4500 adapter->phy.interface_type == PHY_TYPE_BASET_10GB);
306f1348
SP
4501}
4502
c165541e
PR
4503static bool is_comp_in_ufi(struct be_adapter *adapter,
4504 struct flash_section_info *fsec, int type)
4505{
4506 int i = 0, img_type = 0;
4507 struct flash_section_info_g2 *fsec_g2 = NULL;
4508
ca34fe38 4509 if (BE2_chip(adapter))
c165541e
PR
4510 fsec_g2 = (struct flash_section_info_g2 *)fsec;
4511
4512 for (i = 0; i < MAX_FLASH_COMP; i++) {
4513 if (fsec_g2)
4514 img_type = le32_to_cpu(fsec_g2->fsec_entry[i].type);
4515 else
4516 img_type = le32_to_cpu(fsec->fsec_entry[i].type);
4517
4518 if (img_type == type)
4519 return true;
4520 }
4521 return false;
4522
4523}
4524
4188e7df 4525static struct flash_section_info *get_fsec_info(struct be_adapter *adapter,
748b539a
SP
4526 int header_size,
4527 const struct firmware *fw)
c165541e
PR
4528{
4529 struct flash_section_info *fsec = NULL;
4530 const u8 *p = fw->data;
4531
4532 p += header_size;
4533 while (p < (fw->data + fw->size)) {
4534 fsec = (struct flash_section_info *)p;
4535 if (!memcmp(flash_cookie, fsec->cookie, sizeof(flash_cookie)))
4536 return fsec;
4537 p += 32;
4538 }
4539 return NULL;
4540}
4541
96c9b2e4
VV
4542static int be_check_flash_crc(struct be_adapter *adapter, const u8 *p,
4543 u32 img_offset, u32 img_size, int hdr_size,
4544 u16 img_optype, bool *crc_match)
4545{
4546 u32 crc_offset;
4547 int status;
4548 u8 crc[4];
4549
70a7b525
VV
4550 status = be_cmd_get_flash_crc(adapter, crc, img_optype, img_offset,
4551 img_size - 4);
96c9b2e4
VV
4552 if (status)
4553 return status;
4554
4555 crc_offset = hdr_size + img_offset + img_size - 4;
4556
4557 /* Skip flashing, if crc of flashed region matches */
4558 if (!memcmp(crc, p + crc_offset, 4))
4559 *crc_match = true;
4560 else
4561 *crc_match = false;
4562
4563 return status;
4564}
4565
773a2d7c 4566static int be_flash(struct be_adapter *adapter, const u8 *img,
70a7b525
VV
4567 struct be_dma_mem *flash_cmd, int optype, int img_size,
4568 u32 img_offset)
773a2d7c 4569{
70a7b525 4570 u32 flash_op, num_bytes, total_bytes = img_size, bytes_sent = 0;
773a2d7c 4571 struct be_cmd_write_flashrom *req = flash_cmd->va;
96c9b2e4 4572 int status;
773a2d7c 4573
773a2d7c
PR
4574 while (total_bytes) {
4575 num_bytes = min_t(u32, 32*1024, total_bytes);
4576
4577 total_bytes -= num_bytes;
4578
4579 if (!total_bytes) {
4580 if (optype == OPTYPE_PHY_FW)
4581 flash_op = FLASHROM_OPER_PHY_FLASH;
4582 else
4583 flash_op = FLASHROM_OPER_FLASH;
4584 } else {
4585 if (optype == OPTYPE_PHY_FW)
4586 flash_op = FLASHROM_OPER_PHY_SAVE;
4587 else
4588 flash_op = FLASHROM_OPER_SAVE;
4589 }
4590
be716446 4591 memcpy(req->data_buf, img, num_bytes);
773a2d7c
PR
4592 img += num_bytes;
4593 status = be_cmd_write_flashrom(adapter, flash_cmd, optype,
70a7b525
VV
4594 flash_op, img_offset +
4595 bytes_sent, num_bytes);
4c60005f 4596 if (base_status(status) == MCC_STATUS_ILLEGAL_REQUEST &&
96c9b2e4
VV
4597 optype == OPTYPE_PHY_FW)
4598 break;
4599 else if (status)
773a2d7c 4600 return status;
70a7b525
VV
4601
4602 bytes_sent += num_bytes;
773a2d7c
PR
4603 }
4604 return 0;
4605}
4606
0ad3157e 4607/* For BE2, BE3 and BE3-R */
ca34fe38 4608static int be_flash_BEx(struct be_adapter *adapter,
748b539a
SP
4609 const struct firmware *fw,
4610 struct be_dma_mem *flash_cmd, int num_of_images)
84517482 4611{
c165541e 4612 int img_hdrs_size = (num_of_images * sizeof(struct image_hdr));
96c9b2e4 4613 struct device *dev = &adapter->pdev->dev;
c165541e 4614 struct flash_section_info *fsec = NULL;
96c9b2e4
VV
4615 int status, i, filehdr_size, num_comp;
4616 const struct flash_comp *pflashcomp;
4617 bool crc_match;
4618 const u8 *p;
c165541e
PR
4619
4620 struct flash_comp gen3_flash_types[] = {
4621 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, OPTYPE_ISCSI_ACTIVE,
4622 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_iSCSI},
4623 { FLASH_REDBOOT_START_g3, OPTYPE_REDBOOT,
4624 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3, IMAGE_BOOT_CODE},
4625 { FLASH_iSCSI_BIOS_START_g3, OPTYPE_BIOS,
4626 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_ISCSI},
4627 { FLASH_PXE_BIOS_START_g3, OPTYPE_PXE_BIOS,
4628 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_PXE},
4629 { FLASH_FCoE_BIOS_START_g3, OPTYPE_FCOE_BIOS,
4630 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_FCoE},
4631 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, OPTYPE_ISCSI_BACKUP,
4632 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_iSCSI},
4633 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, OPTYPE_FCOE_FW_ACTIVE,
4634 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_FCoE},
4635 { FLASH_FCoE_BACKUP_IMAGE_START_g3, OPTYPE_FCOE_FW_BACKUP,
4636 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_FCoE},
4637 { FLASH_NCSI_START_g3, OPTYPE_NCSI_FW,
4638 FLASH_NCSI_IMAGE_MAX_SIZE_g3, IMAGE_NCSI},
4639 { FLASH_PHY_FW_START_g3, OPTYPE_PHY_FW,
4640 FLASH_PHY_FW_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_PHY}
3f0d4560 4641 };
c165541e
PR
4642
4643 struct flash_comp gen2_flash_types[] = {
4644 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, OPTYPE_ISCSI_ACTIVE,
4645 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_iSCSI},
4646 { FLASH_REDBOOT_START_g2, OPTYPE_REDBOOT,
4647 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2, IMAGE_BOOT_CODE},
4648 { FLASH_iSCSI_BIOS_START_g2, OPTYPE_BIOS,
4649 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_ISCSI},
4650 { FLASH_PXE_BIOS_START_g2, OPTYPE_PXE_BIOS,
4651 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_PXE},
4652 { FLASH_FCoE_BIOS_START_g2, OPTYPE_FCOE_BIOS,
4653 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_FCoE},
4654 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, OPTYPE_ISCSI_BACKUP,
4655 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_iSCSI},
4656 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, OPTYPE_FCOE_FW_ACTIVE,
4657 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_FCoE},
4658 { FLASH_FCoE_BACKUP_IMAGE_START_g2, OPTYPE_FCOE_FW_BACKUP,
4659 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_FCoE}
3f0d4560
AK
4660 };
4661
ca34fe38 4662 if (BE3_chip(adapter)) {
3f0d4560
AK
4663 pflashcomp = gen3_flash_types;
4664 filehdr_size = sizeof(struct flash_file_hdr_g3);
215faf9c 4665 num_comp = ARRAY_SIZE(gen3_flash_types);
3f0d4560
AK
4666 } else {
4667 pflashcomp = gen2_flash_types;
4668 filehdr_size = sizeof(struct flash_file_hdr_g2);
215faf9c 4669 num_comp = ARRAY_SIZE(gen2_flash_types);
5d3acd0d 4670 img_hdrs_size = 0;
84517482 4671 }
ca34fe38 4672
c165541e
PR
4673 /* Get flash section info*/
4674 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
4675 if (!fsec) {
96c9b2e4 4676 dev_err(dev, "Invalid Cookie. FW image may be corrupted\n");
c165541e
PR
4677 return -1;
4678 }
9fe96934 4679 for (i = 0; i < num_comp; i++) {
c165541e 4680 if (!is_comp_in_ufi(adapter, fsec, pflashcomp[i].img_type))
9fe96934 4681 continue;
c165541e
PR
4682
4683 if ((pflashcomp[i].optype == OPTYPE_NCSI_FW) &&
4684 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
4685 continue;
4686
773a2d7c
PR
4687 if (pflashcomp[i].optype == OPTYPE_PHY_FW &&
4688 !phy_flashing_required(adapter))
306f1348 4689 continue;
c165541e 4690
773a2d7c 4691 if (pflashcomp[i].optype == OPTYPE_REDBOOT) {
96c9b2e4
VV
4692 status = be_check_flash_crc(adapter, fw->data,
4693 pflashcomp[i].offset,
4694 pflashcomp[i].size,
4695 filehdr_size +
4696 img_hdrs_size,
4697 OPTYPE_REDBOOT, &crc_match);
4698 if (status) {
4699 dev_err(dev,
4700 "Could not get CRC for 0x%x region\n",
4701 pflashcomp[i].optype);
4702 continue;
4703 }
4704
4705 if (crc_match)
773a2d7c
PR
4706 continue;
4707 }
c165541e 4708
96c9b2e4
VV
4709 p = fw->data + filehdr_size + pflashcomp[i].offset +
4710 img_hdrs_size;
306f1348
SP
4711 if (p + pflashcomp[i].size > fw->data + fw->size)
4712 return -1;
773a2d7c
PR
4713
4714 status = be_flash(adapter, p, flash_cmd, pflashcomp[i].optype,
70a7b525 4715 pflashcomp[i].size, 0);
773a2d7c 4716 if (status) {
96c9b2e4 4717 dev_err(dev, "Flashing section type 0x%x failed\n",
773a2d7c
PR
4718 pflashcomp[i].img_type);
4719 return status;
84517482 4720 }
84517482 4721 }
84517482
AK
4722 return 0;
4723}
4724
96c9b2e4
VV
4725static u16 be_get_img_optype(struct flash_section_entry fsec_entry)
4726{
4727 u32 img_type = le32_to_cpu(fsec_entry.type);
4728 u16 img_optype = le16_to_cpu(fsec_entry.optype);
4729
4730 if (img_optype != 0xFFFF)
4731 return img_optype;
4732
4733 switch (img_type) {
4734 case IMAGE_FIRMWARE_iSCSI:
4735 img_optype = OPTYPE_ISCSI_ACTIVE;
4736 break;
4737 case IMAGE_BOOT_CODE:
4738 img_optype = OPTYPE_REDBOOT;
4739 break;
4740 case IMAGE_OPTION_ROM_ISCSI:
4741 img_optype = OPTYPE_BIOS;
4742 break;
4743 case IMAGE_OPTION_ROM_PXE:
4744 img_optype = OPTYPE_PXE_BIOS;
4745 break;
4746 case IMAGE_OPTION_ROM_FCoE:
4747 img_optype = OPTYPE_FCOE_BIOS;
4748 break;
4749 case IMAGE_FIRMWARE_BACKUP_iSCSI:
4750 img_optype = OPTYPE_ISCSI_BACKUP;
4751 break;
4752 case IMAGE_NCSI:
4753 img_optype = OPTYPE_NCSI_FW;
4754 break;
4755 case IMAGE_FLASHISM_JUMPVECTOR:
4756 img_optype = OPTYPE_FLASHISM_JUMPVECTOR;
4757 break;
4758 case IMAGE_FIRMWARE_PHY:
4759 img_optype = OPTYPE_SH_PHY_FW;
4760 break;
4761 case IMAGE_REDBOOT_DIR:
4762 img_optype = OPTYPE_REDBOOT_DIR;
4763 break;
4764 case IMAGE_REDBOOT_CONFIG:
4765 img_optype = OPTYPE_REDBOOT_CONFIG;
4766 break;
4767 case IMAGE_UFI_DIR:
4768 img_optype = OPTYPE_UFI_DIR;
4769 break;
4770 default:
4771 break;
4772 }
4773
4774 return img_optype;
4775}
4776
773a2d7c 4777static int be_flash_skyhawk(struct be_adapter *adapter,
748b539a
SP
4778 const struct firmware *fw,
4779 struct be_dma_mem *flash_cmd, int num_of_images)
3f0d4560 4780{
773a2d7c 4781 int img_hdrs_size = num_of_images * sizeof(struct image_hdr);
70a7b525 4782 bool crc_match, old_fw_img, flash_offset_support = true;
96c9b2e4 4783 struct device *dev = &adapter->pdev->dev;
773a2d7c 4784 struct flash_section_info *fsec = NULL;
96c9b2e4 4785 u32 img_offset, img_size, img_type;
70a7b525 4786 u16 img_optype, flash_optype;
96c9b2e4 4787 int status, i, filehdr_size;
96c9b2e4 4788 const u8 *p;
773a2d7c
PR
4789
4790 filehdr_size = sizeof(struct flash_file_hdr_g3);
4791 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
4792 if (!fsec) {
96c9b2e4 4793 dev_err(dev, "Invalid Cookie. FW image may be corrupted\n");
56ace3a0 4794 return -EINVAL;
773a2d7c
PR
4795 }
4796
70a7b525 4797retry_flash:
773a2d7c
PR
4798 for (i = 0; i < le32_to_cpu(fsec->fsec_hdr.num_images); i++) {
4799 img_offset = le32_to_cpu(fsec->fsec_entry[i].offset);
4800 img_size = le32_to_cpu(fsec->fsec_entry[i].pad_size);
96c9b2e4
VV
4801 img_type = le32_to_cpu(fsec->fsec_entry[i].type);
4802 img_optype = be_get_img_optype(fsec->fsec_entry[i]);
4803 old_fw_img = fsec->fsec_entry[i].optype == 0xFFFF;
773a2d7c 4804
96c9b2e4 4805 if (img_optype == 0xFFFF)
773a2d7c 4806 continue;
70a7b525
VV
4807
4808 if (flash_offset_support)
4809 flash_optype = OPTYPE_OFFSET_SPECIFIED;
4810 else
4811 flash_optype = img_optype;
4812
96c9b2e4
VV
4813 /* Don't bother verifying CRC if an old FW image is being
4814 * flashed
4815 */
4816 if (old_fw_img)
4817 goto flash;
4818
4819 status = be_check_flash_crc(adapter, fw->data, img_offset,
4820 img_size, filehdr_size +
70a7b525 4821 img_hdrs_size, flash_optype,
96c9b2e4 4822 &crc_match);
4c60005f
KA
4823 if (base_status(status) == MCC_STATUS_ILLEGAL_REQUEST ||
4824 base_status(status) == MCC_STATUS_ILLEGAL_FIELD) {
70a7b525
VV
4825 /* The current FW image on the card does not support
4826 * OFFSET based flashing. Retry using older mechanism
4827 * of OPTYPE based flashing
4828 */
4829 if (flash_optype == OPTYPE_OFFSET_SPECIFIED) {
4830 flash_offset_support = false;
4831 goto retry_flash;
4832 }
4833
4834 /* The current FW image on the card does not recognize
4835 * the new FLASH op_type. The FW download is partially
4836 * complete. Reboot the server now to enable FW image
4837 * to recognize the new FLASH op_type. To complete the
4838 * remaining process, download the same FW again after
4839 * the reboot.
4840 */
96c9b2e4
VV
4841 dev_err(dev, "Flash incomplete. Reset the server\n");
4842 dev_err(dev, "Download FW image again after reset\n");
4843 return -EAGAIN;
4844 } else if (status) {
4845 dev_err(dev, "Could not get CRC for 0x%x region\n",
4846 img_optype);
4847 return -EFAULT;
773a2d7c
PR
4848 }
4849
96c9b2e4
VV
4850 if (crc_match)
4851 continue;
773a2d7c 4852
96c9b2e4
VV
4853flash:
4854 p = fw->data + filehdr_size + img_offset + img_hdrs_size;
773a2d7c
PR
4855 if (p + img_size > fw->data + fw->size)
4856 return -1;
4857
70a7b525
VV
4858 status = be_flash(adapter, p, flash_cmd, flash_optype, img_size,
4859 img_offset);
4860
4861 /* The current FW image on the card does not support OFFSET
4862 * based flashing. Retry using older mechanism of OPTYPE based
4863 * flashing
4864 */
4865 if (base_status(status) == MCC_STATUS_ILLEGAL_FIELD &&
4866 flash_optype == OPTYPE_OFFSET_SPECIFIED) {
4867 flash_offset_support = false;
4868 goto retry_flash;
4869 }
4870
96c9b2e4
VV
4871 /* For old FW images ignore ILLEGAL_FIELD error or errors on
4872 * UFI_DIR region
4873 */
4c60005f
KA
4874 if (old_fw_img &&
4875 (base_status(status) == MCC_STATUS_ILLEGAL_FIELD ||
4876 (img_optype == OPTYPE_UFI_DIR &&
4877 base_status(status) == MCC_STATUS_FAILED))) {
96c9b2e4
VV
4878 continue;
4879 } else if (status) {
4880 dev_err(dev, "Flashing section type 0x%x failed\n",
4881 img_type);
4882 return -EFAULT;
773a2d7c
PR
4883 }
4884 }
4885 return 0;
3f0d4560
AK
4886}
4887
485bf569 4888static int lancer_fw_download(struct be_adapter *adapter,
748b539a 4889 const struct firmware *fw)
84517482 4890{
485bf569
SN
4891#define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024)
4892#define LANCER_FW_DOWNLOAD_LOCATION "/prg"
bb864e07 4893 struct device *dev = &adapter->pdev->dev;
84517482 4894 struct be_dma_mem flash_cmd;
485bf569
SN
4895 const u8 *data_ptr = NULL;
4896 u8 *dest_image_ptr = NULL;
4897 size_t image_size = 0;
4898 u32 chunk_size = 0;
4899 u32 data_written = 0;
4900 u32 offset = 0;
4901 int status = 0;
4902 u8 add_status = 0;
f67ef7ba 4903 u8 change_status;
84517482 4904
485bf569 4905 if (!IS_ALIGNED(fw->size, sizeof(u32))) {
bb864e07 4906 dev_err(dev, "FW image size should be multiple of 4\n");
3fb8cb80 4907 return -EINVAL;
d9efd2af
SB
4908 }
4909
485bf569
SN
4910 flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
4911 + LANCER_FW_DOWNLOAD_CHUNK;
e51000db
SB
4912 flash_cmd.va = dma_zalloc_coherent(dev, flash_cmd.size,
4913 &flash_cmd.dma, GFP_KERNEL);
3fb8cb80
KA
4914 if (!flash_cmd.va)
4915 return -ENOMEM;
84517482 4916
485bf569
SN
4917 dest_image_ptr = flash_cmd.va +
4918 sizeof(struct lancer_cmd_req_write_object);
4919 image_size = fw->size;
4920 data_ptr = fw->data;
4921
4922 while (image_size) {
4923 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
4924
4925 /* Copy the image chunk content. */
4926 memcpy(dest_image_ptr, data_ptr, chunk_size);
4927
4928 status = lancer_cmd_write_object(adapter, &flash_cmd,
f67ef7ba
PR
4929 chunk_size, offset,
4930 LANCER_FW_DOWNLOAD_LOCATION,
4931 &data_written, &change_status,
4932 &add_status);
485bf569
SN
4933 if (status)
4934 break;
4935
4936 offset += data_written;
4937 data_ptr += data_written;
4938 image_size -= data_written;
4939 }
4940
4941 if (!status) {
4942 /* Commit the FW written */
4943 status = lancer_cmd_write_object(adapter, &flash_cmd,
f67ef7ba
PR
4944 0, offset,
4945 LANCER_FW_DOWNLOAD_LOCATION,
4946 &data_written, &change_status,
4947 &add_status);
485bf569
SN
4948 }
4949
bb864e07 4950 dma_free_coherent(dev, flash_cmd.size, flash_cmd.va, flash_cmd.dma);
485bf569 4951 if (status) {
bb864e07 4952 dev_err(dev, "Firmware load error\n");
3fb8cb80 4953 return be_cmd_status(status);
485bf569
SN
4954 }
4955
bb864e07
KA
4956 dev_info(dev, "Firmware flashed successfully\n");
4957
f67ef7ba 4958 if (change_status == LANCER_FW_RESET_NEEDED) {
bb864e07 4959 dev_info(dev, "Resetting adapter to activate new FW\n");
5c510811
SK
4960 status = lancer_physdev_ctrl(adapter,
4961 PHYSDEV_CONTROL_FW_RESET_MASK);
f67ef7ba 4962 if (status) {
bb864e07
KA
4963 dev_err(dev, "Adapter busy, could not reset FW\n");
4964 dev_err(dev, "Reboot server to activate new FW\n");
f67ef7ba
PR
4965 }
4966 } else if (change_status != LANCER_NO_RESET_NEEDED) {
bb864e07 4967 dev_info(dev, "Reboot server to activate new FW\n");
f67ef7ba 4968 }
3fb8cb80
KA
4969
4970 return 0;
485bf569
SN
4971}
4972
a6e6ff6e
VV
4973/* Check if the flash image file is compatible with the adapter that
4974 * is being flashed.
4975 */
4976static bool be_check_ufi_compatibility(struct be_adapter *adapter,
4977 struct flash_file_hdr_g3 *fhdr)
773a2d7c 4978{
5d3acd0d
VV
4979 if (!fhdr) {
4980 dev_err(&adapter->pdev->dev, "Invalid FW UFI file");
887a65c4 4981 return false;
5d3acd0d 4982 }
773a2d7c 4983
5d3acd0d
VV
4984 /* First letter of the build version is used to identify
4985 * which chip this image file is meant for.
4986 */
4987 switch (fhdr->build[0]) {
4988 case BLD_STR_UFI_TYPE_SH:
a6e6ff6e
VV
4989 if (!skyhawk_chip(adapter))
4990 return false;
4991 break;
5d3acd0d 4992 case BLD_STR_UFI_TYPE_BE3:
a6e6ff6e
VV
4993 if (!BE3_chip(adapter))
4994 return false;
4995 break;
5d3acd0d 4996 case BLD_STR_UFI_TYPE_BE2:
a6e6ff6e
VV
4997 if (!BE2_chip(adapter))
4998 return false;
4999 break;
5d3acd0d
VV
5000 default:
5001 return false;
5002 }
a6e6ff6e 5003
ae4a9d6a
KA
5004 /* In BE3 FW images the "asic_type_rev" field doesn't track the
5005 * asic_rev of the chips it is compatible with.
5006 * When asic_type_rev is 0 the image is compatible only with
5007 * pre-BE3-R chips (asic_rev < 0x10)
5008 */
5009 if (BEx_chip(adapter) && fhdr->asic_type_rev == 0)
5010 return adapter->asic_rev < 0x10;
5011 else
5012 return (fhdr->asic_type_rev >= adapter->asic_rev);
773a2d7c
PR
5013}
5014
485bf569
SN
5015static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
5016{
5d3acd0d 5017 struct device *dev = &adapter->pdev->dev;
485bf569 5018 struct flash_file_hdr_g3 *fhdr3;
5d3acd0d
VV
5019 struct image_hdr *img_hdr_ptr;
5020 int status = 0, i, num_imgs;
485bf569 5021 struct be_dma_mem flash_cmd;
84517482 5022
5d3acd0d
VV
5023 fhdr3 = (struct flash_file_hdr_g3 *)fw->data;
5024 if (!be_check_ufi_compatibility(adapter, fhdr3)) {
5025 dev_err(dev, "Flash image is not compatible with adapter\n");
5026 return -EINVAL;
84517482
AK
5027 }
5028
5d3acd0d 5029 flash_cmd.size = sizeof(struct be_cmd_write_flashrom);
e51000db
SB
5030 flash_cmd.va = dma_zalloc_coherent(dev, flash_cmd.size, &flash_cmd.dma,
5031 GFP_KERNEL);
5d3acd0d
VV
5032 if (!flash_cmd.va)
5033 return -ENOMEM;
773a2d7c 5034
773a2d7c
PR
5035 num_imgs = le32_to_cpu(fhdr3->num_imgs);
5036 for (i = 0; i < num_imgs; i++) {
5037 img_hdr_ptr = (struct image_hdr *)(fw->data +
5038 (sizeof(struct flash_file_hdr_g3) +
5039 i * sizeof(struct image_hdr)));
5d3acd0d
VV
5040 if (!BE2_chip(adapter) &&
5041 le32_to_cpu(img_hdr_ptr->imageid) != 1)
5042 continue;
84517482 5043
5d3acd0d
VV
5044 if (skyhawk_chip(adapter))
5045 status = be_flash_skyhawk(adapter, fw, &flash_cmd,
5046 num_imgs);
5047 else
5048 status = be_flash_BEx(adapter, fw, &flash_cmd,
5049 num_imgs);
84517482
AK
5050 }
5051
5d3acd0d
VV
5052 dma_free_coherent(dev, flash_cmd.size, flash_cmd.va, flash_cmd.dma);
5053 if (!status)
5054 dev_info(dev, "Firmware flashed successfully\n");
84517482 5055
485bf569
SN
5056 return status;
5057}
5058
5059int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
5060{
5061 const struct firmware *fw;
5062 int status;
5063
5064 if (!netif_running(adapter->netdev)) {
5065 dev_err(&adapter->pdev->dev,
5066 "Firmware load not allowed (interface is down)\n");
940a3fcd 5067 return -ENETDOWN;
485bf569
SN
5068 }
5069
5070 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
5071 if (status)
5072 goto fw_exit;
5073
5074 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
5075
5076 if (lancer_chip(adapter))
5077 status = lancer_fw_download(adapter, fw);
5078 else
5079 status = be_fw_download(adapter, fw);
5080
eeb65ced 5081 if (!status)
e97e3cda 5082 be_cmd_get_fw_ver(adapter);
eeb65ced 5083
84517482
AK
5084fw_exit:
5085 release_firmware(fw);
5086 return status;
5087}
5088
add511b3
RP
5089static int be_ndo_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
5090 u16 flags)
a77dcb8c
AK
5091{
5092 struct be_adapter *adapter = netdev_priv(dev);
5093 struct nlattr *attr, *br_spec;
5094 int rem;
5095 int status = 0;
5096 u16 mode = 0;
5097
5098 if (!sriov_enabled(adapter))
5099 return -EOPNOTSUPP;
5100
5101 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
4ea85e83
TG
5102 if (!br_spec)
5103 return -EINVAL;
a77dcb8c
AK
5104
5105 nla_for_each_nested(attr, br_spec, rem) {
5106 if (nla_type(attr) != IFLA_BRIDGE_MODE)
5107 continue;
5108
b7c1a314
TG
5109 if (nla_len(attr) < sizeof(mode))
5110 return -EINVAL;
5111
a77dcb8c
AK
5112 mode = nla_get_u16(attr);
5113 if (mode != BRIDGE_MODE_VEPA && mode != BRIDGE_MODE_VEB)
5114 return -EINVAL;
5115
5116 status = be_cmd_set_hsw_config(adapter, 0, 0,
5117 adapter->if_handle,
5118 mode == BRIDGE_MODE_VEPA ?
5119 PORT_FWD_TYPE_VEPA :
e7bcbd7b 5120 PORT_FWD_TYPE_VEB, 0);
a77dcb8c
AK
5121 if (status)
5122 goto err;
5123
5124 dev_info(&adapter->pdev->dev, "enabled switch mode: %s\n",
5125 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
5126
5127 return status;
5128 }
5129err:
5130 dev_err(&adapter->pdev->dev, "Failed to set switch mode %s\n",
5131 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
5132
5133 return status;
5134}
5135
5136static int be_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
46c264da
ND
5137 struct net_device *dev, u32 filter_mask,
5138 int nlflags)
a77dcb8c
AK
5139{
5140 struct be_adapter *adapter = netdev_priv(dev);
5141 int status = 0;
5142 u8 hsw_mode;
5143
a77dcb8c
AK
5144 /* BE and Lancer chips support VEB mode only */
5145 if (BEx_chip(adapter) || lancer_chip(adapter)) {
5146 hsw_mode = PORT_FWD_TYPE_VEB;
5147 } else {
5148 status = be_cmd_get_hsw_config(adapter, NULL, 0,
e7bcbd7b
KA
5149 adapter->if_handle, &hsw_mode,
5150 NULL);
a77dcb8c
AK
5151 if (status)
5152 return 0;
ff9ed19d
KP
5153
5154 if (hsw_mode == PORT_FWD_TYPE_PASSTHRU)
5155 return 0;
a77dcb8c
AK
5156 }
5157
5158 return ndo_dflt_bridge_getlink(skb, pid, seq, dev,
5159 hsw_mode == PORT_FWD_TYPE_VEPA ?
2c3c031c 5160 BRIDGE_MODE_VEPA : BRIDGE_MODE_VEB,
7d4f8d87 5161 0, 0, nlflags, filter_mask, NULL);
a77dcb8c
AK
5162}
5163
c5abe7c0 5164#ifdef CONFIG_BE2NET_VXLAN
630f4b70
SB
5165/* VxLAN offload Notes:
5166 *
5167 * The stack defines tunnel offload flags (hw_enc_features) for IP and doesn't
5168 * distinguish various types of transports (VxLAN, GRE, NVGRE ..). So, offload
5169 * is expected to work across all types of IP tunnels once exported. Skyhawk
5170 * supports offloads for either VxLAN or NVGRE, exclusively. So we export VxLAN
16dde0d6
SB
5171 * offloads in hw_enc_features only when a VxLAN port is added. If other (non
5172 * VxLAN) tunnels are configured while VxLAN offloads are enabled, offloads for
5173 * those other tunnels are unexported on the fly through ndo_features_check().
630f4b70
SB
5174 *
5175 * Skyhawk supports VxLAN offloads only for one UDP dport. So, if the stack
5176 * adds more than one port, disable offloads and don't re-enable them again
5177 * until after all the tunnels are removed.
5178 */
c9c47142
SP
5179static void be_add_vxlan_port(struct net_device *netdev, sa_family_t sa_family,
5180 __be16 port)
5181{
5182 struct be_adapter *adapter = netdev_priv(netdev);
5183 struct device *dev = &adapter->pdev->dev;
5184 int status;
5185
af19e686 5186 if (lancer_chip(adapter) || BEx_chip(adapter) || be_is_mc(adapter))
c9c47142
SP
5187 return;
5188
1e5b311a
JB
5189 if (adapter->vxlan_port == port && adapter->vxlan_port_count) {
5190 adapter->vxlan_port_aliases++;
5191 return;
5192 }
5193
c9c47142 5194 if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS) {
c9c47142
SP
5195 dev_info(dev,
5196 "Only one UDP port supported for VxLAN offloads\n");
630f4b70
SB
5197 dev_info(dev, "Disabling VxLAN offloads\n");
5198 adapter->vxlan_port_count++;
5199 goto err;
c9c47142
SP
5200 }
5201
630f4b70
SB
5202 if (adapter->vxlan_port_count++ >= 1)
5203 return;
5204
c9c47142
SP
5205 status = be_cmd_manage_iface(adapter, adapter->if_handle,
5206 OP_CONVERT_NORMAL_TO_TUNNEL);
5207 if (status) {
5208 dev_warn(dev, "Failed to convert normal interface to tunnel\n");
5209 goto err;
5210 }
5211
5212 status = be_cmd_set_vxlan_port(adapter, port);
5213 if (status) {
5214 dev_warn(dev, "Failed to add VxLAN port\n");
5215 goto err;
5216 }
5217 adapter->flags |= BE_FLAGS_VXLAN_OFFLOADS;
5218 adapter->vxlan_port = port;
5219
630f4b70
SB
5220 netdev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
5221 NETIF_F_TSO | NETIF_F_TSO6 |
5222 NETIF_F_GSO_UDP_TUNNEL;
5223 netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL;
ac9a3d84 5224 netdev->features |= NETIF_F_GSO_UDP_TUNNEL;
630f4b70 5225
c9c47142
SP
5226 dev_info(dev, "Enabled VxLAN offloads for UDP port %d\n",
5227 be16_to_cpu(port));
5228 return;
5229err:
5230 be_disable_vxlan_offloads(adapter);
c9c47142
SP
5231}
5232
5233static void be_del_vxlan_port(struct net_device *netdev, sa_family_t sa_family,
5234 __be16 port)
5235{
5236 struct be_adapter *adapter = netdev_priv(netdev);
5237
af19e686 5238 if (lancer_chip(adapter) || BEx_chip(adapter) || be_is_mc(adapter))
c9c47142
SP
5239 return;
5240
5241 if (adapter->vxlan_port != port)
630f4b70 5242 goto done;
c9c47142 5243
1e5b311a
JB
5244 if (adapter->vxlan_port_aliases) {
5245 adapter->vxlan_port_aliases--;
5246 return;
5247 }
5248
c9c47142
SP
5249 be_disable_vxlan_offloads(adapter);
5250
5251 dev_info(&adapter->pdev->dev,
5252 "Disabled VxLAN offloads for UDP port %d\n",
5253 be16_to_cpu(port));
630f4b70
SB
5254done:
5255 adapter->vxlan_port_count--;
c9c47142 5256}
725d548f 5257
5f35227e
JG
5258static netdev_features_t be_features_check(struct sk_buff *skb,
5259 struct net_device *dev,
5260 netdev_features_t features)
725d548f 5261{
16dde0d6
SB
5262 struct be_adapter *adapter = netdev_priv(dev);
5263 u8 l4_hdr = 0;
5264
5265 /* The code below restricts offload features for some tunneled packets.
5266 * Offload features for normal (non tunnel) packets are unchanged.
5267 */
5268 if (!skb->encapsulation ||
5269 !(adapter->flags & BE_FLAGS_VXLAN_OFFLOADS))
5270 return features;
5271
5272 /* It's an encapsulated packet and VxLAN offloads are enabled. We
5273 * should disable tunnel offload features if it's not a VxLAN packet,
5274 * as tunnel offloads have been enabled only for VxLAN. This is done to
5275 * allow other tunneled traffic like GRE work fine while VxLAN
5276 * offloads are configured in Skyhawk-R.
5277 */
5278 switch (vlan_get_protocol(skb)) {
5279 case htons(ETH_P_IP):
5280 l4_hdr = ip_hdr(skb)->protocol;
5281 break;
5282 case htons(ETH_P_IPV6):
5283 l4_hdr = ipv6_hdr(skb)->nexthdr;
5284 break;
5285 default:
5286 return features;
5287 }
5288
5289 if (l4_hdr != IPPROTO_UDP ||
5290 skb->inner_protocol_type != ENCAP_TYPE_ETHER ||
5291 skb->inner_protocol != htons(ETH_P_TEB) ||
5292 skb_inner_mac_header(skb) - skb_transport_header(skb) !=
5293 sizeof(struct udphdr) + sizeof(struct vxlanhdr))
5294 return features & ~(NETIF_F_ALL_CSUM | NETIF_F_GSO_MASK);
5295
5296 return features;
725d548f 5297}
c5abe7c0 5298#endif
c9c47142 5299
a155a5db
SB
5300static int be_get_phys_port_id(struct net_device *dev,
5301 struct netdev_phys_item_id *ppid)
5302{
5303 int i, id_len = CNTL_SERIAL_NUM_WORDS * CNTL_SERIAL_NUM_WORD_SZ + 1;
5304 struct be_adapter *adapter = netdev_priv(dev);
5305 u8 *id;
5306
5307 if (MAX_PHYS_ITEM_ID_LEN < id_len)
5308 return -ENOSPC;
5309
5310 ppid->id[0] = adapter->hba_port_num + 1;
5311 id = &ppid->id[1];
5312 for (i = CNTL_SERIAL_NUM_WORDS - 1; i >= 0;
5313 i--, id += CNTL_SERIAL_NUM_WORD_SZ)
5314 memcpy(id, &adapter->serial_num[i], CNTL_SERIAL_NUM_WORD_SZ);
5315
5316 ppid->id_len = id_len;
5317
5318 return 0;
5319}
5320
e5686ad8 5321static const struct net_device_ops be_netdev_ops = {
6b7c5b94
SP
5322 .ndo_open = be_open,
5323 .ndo_stop = be_close,
5324 .ndo_start_xmit = be_xmit,
a54769f5 5325 .ndo_set_rx_mode = be_set_rx_mode,
6b7c5b94
SP
5326 .ndo_set_mac_address = be_mac_addr_set,
5327 .ndo_change_mtu = be_change_mtu,
ab1594e9 5328 .ndo_get_stats64 = be_get_stats64,
6b7c5b94 5329 .ndo_validate_addr = eth_validate_addr,
6b7c5b94
SP
5330 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
5331 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
64600ea5 5332 .ndo_set_vf_mac = be_set_vf_mac,
1da87b7f 5333 .ndo_set_vf_vlan = be_set_vf_vlan,
ed616689 5334 .ndo_set_vf_rate = be_set_vf_tx_rate,
66268739 5335 .ndo_get_vf_config = be_get_vf_config,
bdce2ad7 5336 .ndo_set_vf_link_state = be_set_vf_link_state,
e7bcbd7b 5337 .ndo_set_vf_spoofchk = be_set_vf_spoofchk,
66268739
IV
5338#ifdef CONFIG_NET_POLL_CONTROLLER
5339 .ndo_poll_controller = be_netpoll,
5340#endif
a77dcb8c
AK
5341 .ndo_bridge_setlink = be_ndo_bridge_setlink,
5342 .ndo_bridge_getlink = be_ndo_bridge_getlink,
6384a4d0 5343#ifdef CONFIG_NET_RX_BUSY_POLL
c9c47142 5344 .ndo_busy_poll = be_busy_poll,
6384a4d0 5345#endif
c5abe7c0 5346#ifdef CONFIG_BE2NET_VXLAN
c9c47142
SP
5347 .ndo_add_vxlan_port = be_add_vxlan_port,
5348 .ndo_del_vxlan_port = be_del_vxlan_port,
5f35227e 5349 .ndo_features_check = be_features_check,
c5abe7c0 5350#endif
a155a5db 5351 .ndo_get_phys_port_id = be_get_phys_port_id,
6b7c5b94
SP
5352};
5353
5354static void be_netdev_init(struct net_device *netdev)
5355{
5356 struct be_adapter *adapter = netdev_priv(netdev);
5357
6332c8d3 5358 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
8b8ddc68 5359 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
f646968f 5360 NETIF_F_HW_VLAN_CTAG_TX;
8b8ddc68
MM
5361 if (be_multi_rxq(adapter))
5362 netdev->hw_features |= NETIF_F_RXHASH;
6332c8d3
MM
5363
5364 netdev->features |= netdev->hw_features |
f646968f 5365 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER;
4b972914 5366
eb8a50d9 5367 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
79032644 5368 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
51c59870 5369
fbc13f01
AK
5370 netdev->priv_flags |= IFF_UNICAST_FLT;
5371
6b7c5b94
SP
5372 netdev->flags |= IFF_MULTICAST;
5373
b7e5887e 5374 netif_set_gso_max_size(netdev, 65535 - ETH_HLEN);
c190e3c8 5375
10ef9ab4 5376 netdev->netdev_ops = &be_netdev_ops;
6b7c5b94 5377
7ad24ea4 5378 netdev->ethtool_ops = &be_ethtool_ops;
6b7c5b94
SP
5379}
5380
87ac1a52
KA
5381static void be_cleanup(struct be_adapter *adapter)
5382{
5383 struct net_device *netdev = adapter->netdev;
5384
5385 rtnl_lock();
5386 netif_device_detach(netdev);
5387 if (netif_running(netdev))
5388 be_close(netdev);
5389 rtnl_unlock();
5390
5391 be_clear(adapter);
5392}
5393
484d76fd 5394static int be_resume(struct be_adapter *adapter)
78fad34e 5395{
d0e1b319 5396 struct net_device *netdev = adapter->netdev;
78fad34e
SP
5397 int status;
5398
78fad34e
SP
5399 status = be_setup(adapter);
5400 if (status)
484d76fd 5401 return status;
78fad34e 5402
d0e1b319
KA
5403 if (netif_running(netdev)) {
5404 status = be_open(netdev);
78fad34e 5405 if (status)
484d76fd 5406 return status;
78fad34e
SP
5407 }
5408
d0e1b319
KA
5409 netif_device_attach(netdev);
5410
484d76fd
KA
5411 return 0;
5412}
5413
5414static int be_err_recover(struct be_adapter *adapter)
5415{
5416 struct device *dev = &adapter->pdev->dev;
5417 int status;
5418
5419 status = be_resume(adapter);
5420 if (status)
5421 goto err;
5422
9fa465c0 5423 dev_info(dev, "Adapter recovery successful\n");
78fad34e
SP
5424 return 0;
5425err:
9fa465c0 5426 if (be_physfn(adapter))
78fad34e 5427 dev_err(dev, "Adapter recovery failed\n");
9fa465c0
SP
5428 else
5429 dev_err(dev, "Re-trying adapter recovery\n");
78fad34e
SP
5430
5431 return status;
5432}
5433
eb7dd46c 5434static void be_err_detection_task(struct work_struct *work)
78fad34e
SP
5435{
5436 struct be_adapter *adapter =
eb7dd46c
SP
5437 container_of(work, struct be_adapter,
5438 be_err_detection_work.work);
78fad34e
SP
5439 int status = 0;
5440
5441 be_detect_error(adapter);
5442
954f6825 5443 if (be_check_error(adapter, BE_ERROR_HW)) {
87ac1a52 5444 be_cleanup(adapter);
d0e1b319
KA
5445
5446 /* As of now error recovery support is in Lancer only */
5447 if (lancer_chip(adapter))
5448 status = be_err_recover(adapter);
78fad34e
SP
5449 }
5450
9fa465c0
SP
5451 /* Always attempt recovery on VFs */
5452 if (!status || be_virtfn(adapter))
eb7dd46c 5453 be_schedule_err_detection(adapter);
78fad34e
SP
5454}
5455
5456static void be_log_sfp_info(struct be_adapter *adapter)
5457{
5458 int status;
5459
5460 status = be_cmd_query_sfp_info(adapter);
5461 if (!status) {
5462 dev_err(&adapter->pdev->dev,
5463 "Unqualified SFP+ detected on %c from %s part no: %s",
5464 adapter->port_name, adapter->phy.vendor_name,
5465 adapter->phy.vendor_pn);
5466 }
5467 adapter->flags &= ~BE_FLAGS_EVT_INCOMPATIBLE_SFP;
5468}
5469
5470static void be_worker(struct work_struct *work)
5471{
5472 struct be_adapter *adapter =
5473 container_of(work, struct be_adapter, work.work);
5474 struct be_rx_obj *rxo;
5475 int i;
5476
5477 /* when interrupts are not yet enabled, just reap any pending
5478 * mcc completions
5479 */
5480 if (!netif_running(adapter->netdev)) {
5481 local_bh_disable();
5482 be_process_mcc(adapter);
5483 local_bh_enable();
5484 goto reschedule;
5485 }
5486
5487 if (!adapter->stats_cmd_sent) {
5488 if (lancer_chip(adapter))
5489 lancer_cmd_get_pport_stats(adapter,
5490 &adapter->stats_cmd);
5491 else
5492 be_cmd_get_stats(adapter, &adapter->stats_cmd);
5493 }
5494
5495 if (be_physfn(adapter) &&
5496 MODULO(adapter->work_counter, adapter->be_get_temp_freq) == 0)
5497 be_cmd_get_die_temperature(adapter);
5498
5499 for_all_rx_queues(adapter, rxo, i) {
5500 /* Replenish RX-queues starved due to memory
5501 * allocation failures.
5502 */
5503 if (rxo->rx_post_starved)
5504 be_post_rx_frags(rxo, GFP_KERNEL, MAX_RX_POST);
5505 }
5506
20947770
PR
5507 /* EQ-delay update for Skyhawk is done while notifying EQ */
5508 if (!skyhawk_chip(adapter))
5509 be_eqd_update(adapter, false);
78fad34e
SP
5510
5511 if (adapter->flags & BE_FLAGS_EVT_INCOMPATIBLE_SFP)
5512 be_log_sfp_info(adapter);
5513
5514reschedule:
5515 adapter->work_counter++;
5516 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
5517}
5518
6b7c5b94
SP
5519static void be_unmap_pci_bars(struct be_adapter *adapter)
5520{
c5b3ad4c
SP
5521 if (adapter->csr)
5522 pci_iounmap(adapter->pdev, adapter->csr);
8788fdc2 5523 if (adapter->db)
ce66f781 5524 pci_iounmap(adapter->pdev, adapter->db);
045508a8
PP
5525}
5526
ce66f781
SP
5527static int db_bar(struct be_adapter *adapter)
5528{
18c57c74 5529 if (lancer_chip(adapter) || be_virtfn(adapter))
ce66f781
SP
5530 return 0;
5531 else
5532 return 4;
5533}
5534
5535static int be_roce_map_pci_bars(struct be_adapter *adapter)
045508a8 5536{
dbf0f2a7 5537 if (skyhawk_chip(adapter)) {
ce66f781
SP
5538 adapter->roce_db.size = 4096;
5539 adapter->roce_db.io_addr = pci_resource_start(adapter->pdev,
5540 db_bar(adapter));
5541 adapter->roce_db.total_size = pci_resource_len(adapter->pdev,
5542 db_bar(adapter));
5543 }
045508a8 5544 return 0;
6b7c5b94
SP
5545}
5546
5547static int be_map_pci_bars(struct be_adapter *adapter)
5548{
0fa74a4b 5549 struct pci_dev *pdev = adapter->pdev;
6b7c5b94 5550 u8 __iomem *addr;
78fad34e
SP
5551 u32 sli_intf;
5552
5553 pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
5554 adapter->sli_family = (sli_intf & SLI_INTF_FAMILY_MASK) >>
5555 SLI_INTF_FAMILY_SHIFT;
5556 adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
fe6d2a38 5557
c5b3ad4c 5558 if (BEx_chip(adapter) && be_physfn(adapter)) {
0fa74a4b 5559 adapter->csr = pci_iomap(pdev, 2, 0);
ddf1169f 5560 if (!adapter->csr)
c5b3ad4c
SP
5561 return -ENOMEM;
5562 }
5563
25848c90 5564 addr = pci_iomap(pdev, db_bar(adapter), 0);
ddf1169f 5565 if (!addr)
6b7c5b94 5566 goto pci_map_err;
ba343c77 5567 adapter->db = addr;
ce66f781 5568
25848c90
SR
5569 if (skyhawk_chip(adapter) || BEx_chip(adapter)) {
5570 if (be_physfn(adapter)) {
5571 /* PCICFG is the 2nd BAR in BE2 */
5572 addr = pci_iomap(pdev, BE2_chip(adapter) ? 1 : 0, 0);
5573 if (!addr)
5574 goto pci_map_err;
5575 adapter->pcicfg = addr;
5576 } else {
5577 adapter->pcicfg = adapter->db + SRIOV_VF_PCICFG_OFFSET;
5578 }
5579 }
5580
ce66f781 5581 be_roce_map_pci_bars(adapter);
6b7c5b94 5582 return 0;
ce66f781 5583
6b7c5b94 5584pci_map_err:
25848c90 5585 dev_err(&pdev->dev, "Error in mapping PCI BARs\n");
6b7c5b94
SP
5586 be_unmap_pci_bars(adapter);
5587 return -ENOMEM;
5588}
5589
78fad34e 5590static void be_drv_cleanup(struct be_adapter *adapter)
6b7c5b94 5591{
8788fdc2 5592 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
78fad34e 5593 struct device *dev = &adapter->pdev->dev;
6b7c5b94
SP
5594
5595 if (mem->va)
78fad34e 5596 dma_free_coherent(dev, mem->size, mem->va, mem->dma);
e7b909a6 5597
5b8821b7 5598 mem = &adapter->rx_filter;
e7b909a6 5599 if (mem->va)
78fad34e
SP
5600 dma_free_coherent(dev, mem->size, mem->va, mem->dma);
5601
5602 mem = &adapter->stats_cmd;
5603 if (mem->va)
5604 dma_free_coherent(dev, mem->size, mem->va, mem->dma);
6b7c5b94
SP
5605}
5606
78fad34e
SP
5607/* Allocate and initialize various fields in be_adapter struct */
5608static int be_drv_init(struct be_adapter *adapter)
6b7c5b94 5609{
8788fdc2
SP
5610 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
5611 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
5b8821b7 5612 struct be_dma_mem *rx_filter = &adapter->rx_filter;
78fad34e
SP
5613 struct be_dma_mem *stats_cmd = &adapter->stats_cmd;
5614 struct device *dev = &adapter->pdev->dev;
5615 int status = 0;
6b7c5b94
SP
5616
5617 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
e51000db
SB
5618 mbox_mem_alloc->va = dma_zalloc_coherent(dev, mbox_mem_alloc->size,
5619 &mbox_mem_alloc->dma,
5620 GFP_KERNEL);
78fad34e
SP
5621 if (!mbox_mem_alloc->va)
5622 return -ENOMEM;
5623
6b7c5b94
SP
5624 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
5625 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
5626 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
e7b909a6 5627
5b8821b7 5628 rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
78fad34e
SP
5629 rx_filter->va = dma_zalloc_coherent(dev, rx_filter->size,
5630 &rx_filter->dma, GFP_KERNEL);
ddf1169f 5631 if (!rx_filter->va) {
e7b909a6
SP
5632 status = -ENOMEM;
5633 goto free_mbox;
5634 }
1f9061d2 5635
78fad34e
SP
5636 if (lancer_chip(adapter))
5637 stats_cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
5638 else if (BE2_chip(adapter))
5639 stats_cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
5640 else if (BE3_chip(adapter))
5641 stats_cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
5642 else
5643 stats_cmd->size = sizeof(struct be_cmd_req_get_stats_v2);
5644 stats_cmd->va = dma_zalloc_coherent(dev, stats_cmd->size,
5645 &stats_cmd->dma, GFP_KERNEL);
5646 if (!stats_cmd->va) {
5647 status = -ENOMEM;
5648 goto free_rx_filter;
5649 }
5650
2984961c 5651 mutex_init(&adapter->mbox_lock);
8788fdc2
SP
5652 spin_lock_init(&adapter->mcc_lock);
5653 spin_lock_init(&adapter->mcc_cq_lock);
5eeff635 5654 init_completion(&adapter->et_cmd_compl);
e7b909a6 5655
78fad34e 5656 pci_save_state(adapter->pdev);
6b7c5b94 5657
78fad34e 5658 INIT_DELAYED_WORK(&adapter->work, be_worker);
eb7dd46c
SP
5659 INIT_DELAYED_WORK(&adapter->be_err_detection_work,
5660 be_err_detection_task);
6b7c5b94 5661
78fad34e
SP
5662 adapter->rx_fc = true;
5663 adapter->tx_fc = true;
6b7c5b94 5664
78fad34e
SP
5665 /* Must be a power of 2 or else MODULO will BUG_ON */
5666 adapter->be_get_temp_freq = 64;
ca34fe38 5667
6b7c5b94 5668 return 0;
78fad34e
SP
5669
5670free_rx_filter:
5671 dma_free_coherent(dev, rx_filter->size, rx_filter->va, rx_filter->dma);
5672free_mbox:
5673 dma_free_coherent(dev, mbox_mem_alloc->size, mbox_mem_alloc->va,
5674 mbox_mem_alloc->dma);
5675 return status;
6b7c5b94
SP
5676}
5677
3bc6b06c 5678static void be_remove(struct pci_dev *pdev)
6b7c5b94
SP
5679{
5680 struct be_adapter *adapter = pci_get_drvdata(pdev);
8d56ff11 5681
6b7c5b94
SP
5682 if (!adapter)
5683 return;
5684
045508a8 5685 be_roce_dev_remove(adapter);
8cef7a78 5686 be_intr_set(adapter, false);
045508a8 5687
eb7dd46c 5688 be_cancel_err_detection(adapter);
f67ef7ba 5689
6b7c5b94
SP
5690 unregister_netdev(adapter->netdev);
5691
5fb379ee
SP
5692 be_clear(adapter);
5693
bf99e50d
PR
5694 /* tell fw we're done with firing cmds */
5695 be_cmd_fw_clean(adapter);
5696
78fad34e
SP
5697 be_unmap_pci_bars(adapter);
5698 be_drv_cleanup(adapter);
6b7c5b94 5699
d6b6d987
SP
5700 pci_disable_pcie_error_reporting(pdev);
5701
6b7c5b94
SP
5702 pci_release_regions(pdev);
5703 pci_disable_device(pdev);
5704
5705 free_netdev(adapter->netdev);
5706}
5707
9a03259c
AB
5708static ssize_t be_hwmon_show_temp(struct device *dev,
5709 struct device_attribute *dev_attr,
5710 char *buf)
29e9122b
VD
5711{
5712 struct be_adapter *adapter = dev_get_drvdata(dev);
5713
5714 /* Unit: millidegree Celsius */
5715 if (adapter->hwmon_info.be_on_die_temp == BE_INVALID_DIE_TEMP)
5716 return -EIO;
5717 else
5718 return sprintf(buf, "%u\n",
5719 adapter->hwmon_info.be_on_die_temp * 1000);
5720}
5721
5722static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO,
5723 be_hwmon_show_temp, NULL, 1);
5724
5725static struct attribute *be_hwmon_attrs[] = {
5726 &sensor_dev_attr_temp1_input.dev_attr.attr,
5727 NULL
5728};
5729
5730ATTRIBUTE_GROUPS(be_hwmon);
5731
d379142b
SP
5732static char *mc_name(struct be_adapter *adapter)
5733{
f93f160b
VV
5734 char *str = ""; /* default */
5735
5736 switch (adapter->mc_type) {
5737 case UMC:
5738 str = "UMC";
5739 break;
5740 case FLEX10:
5741 str = "FLEX10";
5742 break;
5743 case vNIC1:
5744 str = "vNIC-1";
5745 break;
5746 case nPAR:
5747 str = "nPAR";
5748 break;
5749 case UFP:
5750 str = "UFP";
5751 break;
5752 case vNIC2:
5753 str = "vNIC-2";
5754 break;
5755 default:
5756 str = "";
5757 }
5758
5759 return str;
d379142b
SP
5760}
5761
5762static inline char *func_name(struct be_adapter *adapter)
5763{
5764 return be_physfn(adapter) ? "PF" : "VF";
5765}
5766
f7062ee5
SP
5767static inline char *nic_name(struct pci_dev *pdev)
5768{
5769 switch (pdev->device) {
5770 case OC_DEVICE_ID1:
5771 return OC_NAME;
5772 case OC_DEVICE_ID2:
5773 return OC_NAME_BE;
5774 case OC_DEVICE_ID3:
5775 case OC_DEVICE_ID4:
5776 return OC_NAME_LANCER;
5777 case BE_DEVICE_ID2:
5778 return BE3_NAME;
5779 case OC_DEVICE_ID5:
5780 case OC_DEVICE_ID6:
5781 return OC_NAME_SH;
5782 default:
5783 return BE_NAME;
5784 }
5785}
5786
1dd06ae8 5787static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id)
6b7c5b94 5788{
6b7c5b94
SP
5789 struct be_adapter *adapter;
5790 struct net_device *netdev;
21252377 5791 int status = 0;
6b7c5b94 5792
acbafeb1
SP
5793 dev_info(&pdev->dev, "%s version is %s\n", DRV_NAME, DRV_VER);
5794
6b7c5b94
SP
5795 status = pci_enable_device(pdev);
5796 if (status)
5797 goto do_none;
5798
5799 status = pci_request_regions(pdev, DRV_NAME);
5800 if (status)
5801 goto disable_dev;
5802 pci_set_master(pdev);
5803
7f640062 5804 netdev = alloc_etherdev_mqs(sizeof(*adapter), MAX_TX_QS, MAX_RX_QS);
ddf1169f 5805 if (!netdev) {
6b7c5b94
SP
5806 status = -ENOMEM;
5807 goto rel_reg;
5808 }
5809 adapter = netdev_priv(netdev);
5810 adapter->pdev = pdev;
5811 pci_set_drvdata(pdev, adapter);
5812 adapter->netdev = netdev;
2243e2e9 5813 SET_NETDEV_DEV(netdev, &pdev->dev);
6b7c5b94 5814
4c15c243 5815 status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
6b7c5b94
SP
5816 if (!status) {
5817 netdev->features |= NETIF_F_HIGHDMA;
5818 } else {
4c15c243 5819 status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
6b7c5b94
SP
5820 if (status) {
5821 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
5822 goto free_netdev;
5823 }
5824 }
5825
2f951a9a
KA
5826 status = pci_enable_pcie_error_reporting(pdev);
5827 if (!status)
5828 dev_info(&pdev->dev, "PCIe error reporting enabled\n");
d6b6d987 5829
78fad34e 5830 status = be_map_pci_bars(adapter);
6b7c5b94 5831 if (status)
39f1d94d 5832 goto free_netdev;
6b7c5b94 5833
78fad34e
SP
5834 status = be_drv_init(adapter);
5835 if (status)
5836 goto unmap_bars;
5837
5fb379ee
SP
5838 status = be_setup(adapter);
5839 if (status)
78fad34e 5840 goto drv_cleanup;
2243e2e9 5841
3abcdeda 5842 be_netdev_init(netdev);
6b7c5b94
SP
5843 status = register_netdev(netdev);
5844 if (status != 0)
5fb379ee 5845 goto unsetup;
6b7c5b94 5846
045508a8
PP
5847 be_roce_dev_add(adapter);
5848
eb7dd46c 5849 be_schedule_err_detection(adapter);
b4e32a71 5850
29e9122b 5851 /* On Die temperature not supported for VF. */
9a03259c 5852 if (be_physfn(adapter) && IS_ENABLED(CONFIG_BE2NET_HWMON)) {
29e9122b
VD
5853 adapter->hwmon_info.hwmon_dev =
5854 devm_hwmon_device_register_with_groups(&pdev->dev,
5855 DRV_NAME,
5856 adapter,
5857 be_hwmon_groups);
5858 adapter->hwmon_info.be_on_die_temp = BE_INVALID_DIE_TEMP;
5859 }
5860
d379142b 5861 dev_info(&pdev->dev, "%s: %s %s port %c\n", nic_name(pdev),
21252377 5862 func_name(adapter), mc_name(adapter), adapter->port_name);
34b1ef04 5863
6b7c5b94
SP
5864 return 0;
5865
5fb379ee
SP
5866unsetup:
5867 be_clear(adapter);
78fad34e
SP
5868drv_cleanup:
5869 be_drv_cleanup(adapter);
5870unmap_bars:
5871 be_unmap_pci_bars(adapter);
f9449ab7 5872free_netdev:
fe6d2a38 5873 free_netdev(netdev);
6b7c5b94
SP
5874rel_reg:
5875 pci_release_regions(pdev);
5876disable_dev:
5877 pci_disable_device(pdev);
5878do_none:
c4ca2374 5879 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
6b7c5b94
SP
5880 return status;
5881}
5882
5883static int be_suspend(struct pci_dev *pdev, pm_message_t state)
5884{
5885 struct be_adapter *adapter = pci_get_drvdata(pdev);
6b7c5b94 5886
76a9e08e 5887 if (adapter->wol_en)
71d8d1b5
AK
5888 be_setup_wol(adapter, true);
5889
d4360d6f 5890 be_intr_set(adapter, false);
eb7dd46c 5891 be_cancel_err_detection(adapter);
f67ef7ba 5892
87ac1a52 5893 be_cleanup(adapter);
6b7c5b94
SP
5894
5895 pci_save_state(pdev);
5896 pci_disable_device(pdev);
5897 pci_set_power_state(pdev, pci_choose_state(pdev, state));
5898 return 0;
5899}
5900
484d76fd 5901static int be_pci_resume(struct pci_dev *pdev)
6b7c5b94 5902{
6b7c5b94 5903 struct be_adapter *adapter = pci_get_drvdata(pdev);
484d76fd 5904 int status = 0;
6b7c5b94
SP
5905
5906 status = pci_enable_device(pdev);
5907 if (status)
5908 return status;
5909
6b7c5b94
SP
5910 pci_restore_state(pdev);
5911
484d76fd 5912 status = be_resume(adapter);
2243e2e9
SP
5913 if (status)
5914 return status;
5915
eb7dd46c
SP
5916 be_schedule_err_detection(adapter);
5917
76a9e08e 5918 if (adapter->wol_en)
71d8d1b5 5919 be_setup_wol(adapter, false);
a4ca055f 5920
6b7c5b94
SP
5921 return 0;
5922}
5923
82456b03
SP
5924/*
5925 * An FLR will stop BE from DMAing any data.
5926 */
5927static void be_shutdown(struct pci_dev *pdev)
5928{
5929 struct be_adapter *adapter = pci_get_drvdata(pdev);
82456b03 5930
2d5d4154
AK
5931 if (!adapter)
5932 return;
82456b03 5933
d114f99a 5934 be_roce_dev_shutdown(adapter);
0f4a6828 5935 cancel_delayed_work_sync(&adapter->work);
eb7dd46c 5936 be_cancel_err_detection(adapter);
a4ca055f 5937
2d5d4154 5938 netif_device_detach(adapter->netdev);
82456b03 5939
57841869
AK
5940 be_cmd_reset_function(adapter);
5941
82456b03 5942 pci_disable_device(pdev);
82456b03
SP
5943}
5944
cf588477 5945static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
748b539a 5946 pci_channel_state_t state)
cf588477
SP
5947{
5948 struct be_adapter *adapter = pci_get_drvdata(pdev);
cf588477
SP
5949
5950 dev_err(&adapter->pdev->dev, "EEH error detected\n");
5951
954f6825
VD
5952 if (!be_check_error(adapter, BE_ERROR_EEH)) {
5953 be_set_error(adapter, BE_ERROR_EEH);
cf588477 5954
eb7dd46c 5955 be_cancel_err_detection(adapter);
cf588477 5956
87ac1a52 5957 be_cleanup(adapter);
cf588477 5958 }
cf588477
SP
5959
5960 if (state == pci_channel_io_perm_failure)
5961 return PCI_ERS_RESULT_DISCONNECT;
5962
5963 pci_disable_device(pdev);
5964
eeb7fc7b
SK
5965 /* The error could cause the FW to trigger a flash debug dump.
5966 * Resetting the card while flash dump is in progress
c8a54163
PR
5967 * can cause it not to recover; wait for it to finish.
5968 * Wait only for first function as it is needed only once per
5969 * adapter.
eeb7fc7b 5970 */
c8a54163
PR
5971 if (pdev->devfn == 0)
5972 ssleep(30);
5973
cf588477
SP
5974 return PCI_ERS_RESULT_NEED_RESET;
5975}
5976
5977static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
5978{
5979 struct be_adapter *adapter = pci_get_drvdata(pdev);
5980 int status;
5981
5982 dev_info(&adapter->pdev->dev, "EEH reset\n");
cf588477
SP
5983
5984 status = pci_enable_device(pdev);
5985 if (status)
5986 return PCI_ERS_RESULT_DISCONNECT;
5987
5988 pci_set_master(pdev);
cf588477
SP
5989 pci_restore_state(pdev);
5990
5991 /* Check if card is ok and fw is ready */
c5b3ad4c
SP
5992 dev_info(&adapter->pdev->dev,
5993 "Waiting for FW to be ready after EEH reset\n");
bf99e50d 5994 status = be_fw_wait_ready(adapter);
cf588477
SP
5995 if (status)
5996 return PCI_ERS_RESULT_DISCONNECT;
5997
d6b6d987 5998 pci_cleanup_aer_uncorrect_error_status(pdev);
954f6825 5999 be_clear_error(adapter, BE_CLEAR_ALL);
cf588477
SP
6000 return PCI_ERS_RESULT_RECOVERED;
6001}
6002
6003static void be_eeh_resume(struct pci_dev *pdev)
6004{
6005 int status = 0;
6006 struct be_adapter *adapter = pci_get_drvdata(pdev);
cf588477
SP
6007
6008 dev_info(&adapter->pdev->dev, "EEH resume\n");
6009
6010 pci_save_state(pdev);
6011
484d76fd 6012 status = be_resume(adapter);
bf99e50d
PR
6013 if (status)
6014 goto err;
6015
eb7dd46c 6016 be_schedule_err_detection(adapter);
cf588477
SP
6017 return;
6018err:
6019 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
cf588477
SP
6020}
6021
ace40aff
VV
6022static int be_pci_sriov_configure(struct pci_dev *pdev, int num_vfs)
6023{
6024 struct be_adapter *adapter = pci_get_drvdata(pdev);
6025 u16 num_vf_qs;
6026 int status;
6027
6028 if (!num_vfs)
6029 be_vf_clear(adapter);
6030
6031 adapter->num_vfs = num_vfs;
6032
6033 if (adapter->num_vfs == 0 && pci_vfs_assigned(pdev)) {
6034 dev_warn(&pdev->dev,
6035 "Cannot disable VFs while they are assigned\n");
6036 return -EBUSY;
6037 }
6038
6039 /* When the HW is in SRIOV capable configuration, the PF-pool resources
6040 * are equally distributed across the max-number of VFs. The user may
6041 * request only a subset of the max-vfs to be enabled.
6042 * Based on num_vfs, redistribute the resources across num_vfs so that
6043 * each VF will have access to more number of resources.
6044 * This facility is not available in BE3 FW.
6045 * Also, this is done by FW in Lancer chip.
6046 */
6047 if (skyhawk_chip(adapter) && !pci_num_vf(pdev)) {
6048 num_vf_qs = be_calculate_vf_qs(adapter, adapter->num_vfs);
6049 status = be_cmd_set_sriov_config(adapter, adapter->pool_res,
6050 adapter->num_vfs, num_vf_qs);
6051 if (status)
6052 dev_err(&pdev->dev,
6053 "Failed to optimize SR-IOV resources\n");
6054 }
6055
6056 status = be_get_resources(adapter);
6057 if (status)
6058 return be_cmd_status(status);
6059
6060 /* Updating real_num_tx/rx_queues() requires rtnl_lock() */
6061 rtnl_lock();
6062 status = be_update_queues(adapter);
6063 rtnl_unlock();
6064 if (status)
6065 return be_cmd_status(status);
6066
6067 if (adapter->num_vfs)
6068 status = be_vf_setup(adapter);
6069
6070 if (!status)
6071 return adapter->num_vfs;
6072
6073 return 0;
6074}
6075
3646f0e5 6076static const struct pci_error_handlers be_eeh_handlers = {
cf588477
SP
6077 .error_detected = be_eeh_err_detected,
6078 .slot_reset = be_eeh_reset,
6079 .resume = be_eeh_resume,
6080};
6081
6b7c5b94
SP
6082static struct pci_driver be_driver = {
6083 .name = DRV_NAME,
6084 .id_table = be_dev_ids,
6085 .probe = be_probe,
6086 .remove = be_remove,
6087 .suspend = be_suspend,
484d76fd 6088 .resume = be_pci_resume,
82456b03 6089 .shutdown = be_shutdown,
ace40aff 6090 .sriov_configure = be_pci_sriov_configure,
cf588477 6091 .err_handler = &be_eeh_handlers
6b7c5b94
SP
6092};
6093
6094static int __init be_init_module(void)
6095{
8e95a202
JP
6096 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
6097 rx_frag_size != 2048) {
6b7c5b94
SP
6098 printk(KERN_WARNING DRV_NAME
6099 " : Module param rx_frag_size must be 2048/4096/8192."
6100 " Using 2048\n");
6101 rx_frag_size = 2048;
6102 }
6b7c5b94 6103
ace40aff
VV
6104 if (num_vfs > 0) {
6105 pr_info(DRV_NAME " : Module param num_vfs is obsolete.");
6106 pr_info(DRV_NAME " : Use sysfs method to enable VFs\n");
6107 }
6108
6b7c5b94
SP
6109 return pci_register_driver(&be_driver);
6110}
6111module_init(be_init_module);
6112
6113static void __exit be_exit_module(void)
6114{
6115 pci_unregister_driver(&be_driver);
6116}
6117module_exit(be_exit_module);