be2net: Declare some u16 fields as u32 to improve performance
[linux-2.6-block.git] / drivers / net / ethernet / emulex / benet / be_main.c
CommitLineData
6b7c5b94 1/*
d19261b8 2 * Copyright (C) 2005 - 2015 Emulex
6b7c5b94
SP
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
d2145cde 11 * linux-drivers@emulex.com
6b7c5b94 12 *
d2145cde
AK
13 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
6b7c5b94
SP
16 */
17
70c71606 18#include <linux/prefetch.h>
9d9779e7 19#include <linux/module.h>
6b7c5b94 20#include "be.h"
8788fdc2 21#include "be_cmds.h"
65f71b8b 22#include <asm/div64.h>
d6b6d987 23#include <linux/aer.h>
a77dcb8c 24#include <linux/if_bridge.h>
6384a4d0 25#include <net/busy_poll.h>
c9c47142 26#include <net/vxlan.h>
6b7c5b94
SP
27
28MODULE_VERSION(DRV_VER);
6b7c5b94 29MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
00d3d51e 30MODULE_AUTHOR("Emulex Corporation");
6b7c5b94
SP
31MODULE_LICENSE("GPL");
32
ace40aff
VV
33/* num_vfs module param is obsolete.
34 * Use sysfs method to enable/disable VFs.
35 */
ba343c77 36static unsigned int num_vfs;
ba343c77 37module_param(num_vfs, uint, S_IRUGO);
ba343c77 38MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
6b7c5b94 39
11ac75ed
SP
40static ushort rx_frag_size = 2048;
41module_param(rx_frag_size, ushort, S_IRUGO);
42MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
43
9baa3c34 44static const struct pci_device_id be_dev_ids[] = {
c4ca2374 45 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
59fd5d87 46 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
c4ca2374
AK
47 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
48 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
fe6d2a38 49 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
12f4d0a8 50 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
ecedb6ae 51 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
76b73530 52 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID6)},
6b7c5b94
SP
53 { 0 }
54};
55MODULE_DEVICE_TABLE(pci, be_dev_ids);
7c185276 56/* UE Status Low CSR */
42c8b11e 57static const char * const ue_status_low_desc[] = {
7c185276
AK
58 "CEV",
59 "CTX",
60 "DBUF",
61 "ERX",
62 "Host",
63 "MPU",
64 "NDMA",
65 "PTC ",
66 "RDMA ",
67 "RXF ",
68 "RXIPS ",
69 "RXULP0 ",
70 "RXULP1 ",
71 "RXULP2 ",
72 "TIM ",
73 "TPOST ",
74 "TPRE ",
75 "TXIPS ",
76 "TXULP0 ",
77 "TXULP1 ",
78 "UC ",
79 "WDMA ",
80 "TXULP2 ",
81 "HOST1 ",
82 "P0_OB_LINK ",
83 "P1_OB_LINK ",
84 "HOST_GPIO ",
85 "MBOX ",
6bdf8f55
VV
86 "ERX2 ",
87 "SPARE ",
88 "JTAG ",
89 "MPU_INTPEND "
7c185276 90};
e2fb1afa 91
7c185276 92/* UE Status High CSR */
42c8b11e 93static const char * const ue_status_hi_desc[] = {
7c185276
AK
94 "LPCMEMHOST",
95 "MGMT_MAC",
96 "PCS0ONLINE",
97 "MPU_IRAM",
98 "PCS1ONLINE",
99 "PCTL0",
100 "PCTL1",
101 "PMEM",
102 "RR",
103 "TXPB",
104 "RXPP",
105 "XAUI",
106 "TXP",
107 "ARM",
108 "IPC",
109 "HOST2",
110 "HOST3",
111 "HOST4",
112 "HOST5",
113 "HOST6",
114 "HOST7",
6bdf8f55
VV
115 "ECRC",
116 "Poison TLP",
42c8b11e 117 "NETC",
6bdf8f55
VV
118 "PERIPH",
119 "LLTXULP",
120 "D2P",
121 "RCON",
122 "LDMA",
123 "LLTXP",
124 "LLTXPB",
7c185276
AK
125 "Unknown"
126};
6b7c5b94
SP
127
128static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
129{
130 struct be_dma_mem *mem = &q->dma_mem;
03d28ffe 131
1cfafab9 132 if (mem->va) {
2b7bcebf
IV
133 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
134 mem->dma);
1cfafab9
SP
135 mem->va = NULL;
136 }
6b7c5b94
SP
137}
138
139static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
748b539a 140 u16 len, u16 entry_size)
6b7c5b94
SP
141{
142 struct be_dma_mem *mem = &q->dma_mem;
143
144 memset(q, 0, sizeof(*q));
145 q->len = len;
146 q->entry_size = entry_size;
147 mem->size = len * entry_size;
ede23fa8
JP
148 mem->va = dma_zalloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
149 GFP_KERNEL);
6b7c5b94 150 if (!mem->va)
10ef9ab4 151 return -ENOMEM;
6b7c5b94
SP
152 return 0;
153}
154
68c45a2d 155static void be_reg_intr_set(struct be_adapter *adapter, bool enable)
6b7c5b94 156{
db3ea781 157 u32 reg, enabled;
5f0b849e 158
db3ea781 159 pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
748b539a 160 &reg);
db3ea781
SP
161 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
162
5f0b849e 163 if (!enabled && enable)
6b7c5b94 164 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
5f0b849e 165 else if (enabled && !enable)
6b7c5b94 166 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
5f0b849e 167 else
6b7c5b94 168 return;
5f0b849e 169
db3ea781 170 pci_write_config_dword(adapter->pdev,
748b539a 171 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
6b7c5b94
SP
172}
173
68c45a2d
SK
174static void be_intr_set(struct be_adapter *adapter, bool enable)
175{
176 int status = 0;
177
178 /* On lancer interrupts can't be controlled via this register */
179 if (lancer_chip(adapter))
180 return;
181
954f6825 182 if (be_check_error(adapter, BE_ERROR_EEH))
68c45a2d
SK
183 return;
184
185 status = be_cmd_intr_set(adapter, enable);
186 if (status)
187 be_reg_intr_set(adapter, enable);
188}
189
8788fdc2 190static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
6b7c5b94
SP
191{
192 u32 val = 0;
03d28ffe 193
954f6825
VD
194 if (be_check_error(adapter, BE_ERROR_HW))
195 return;
196
6b7c5b94
SP
197 val |= qid & DB_RQ_RING_ID_MASK;
198 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
f3eb62d2
SP
199
200 wmb();
8788fdc2 201 iowrite32(val, adapter->db + DB_RQ_OFFSET);
6b7c5b94
SP
202}
203
94d73aaa
VV
204static void be_txq_notify(struct be_adapter *adapter, struct be_tx_obj *txo,
205 u16 posted)
6b7c5b94
SP
206{
207 u32 val = 0;
03d28ffe 208
954f6825
VD
209 if (be_check_error(adapter, BE_ERROR_HW))
210 return;
211
94d73aaa 212 val |= txo->q.id & DB_TXULP_RING_ID_MASK;
6b7c5b94 213 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
f3eb62d2
SP
214
215 wmb();
94d73aaa 216 iowrite32(val, adapter->db + txo->db_offset);
6b7c5b94
SP
217}
218
8788fdc2 219static void be_eq_notify(struct be_adapter *adapter, u16 qid,
20947770
PR
220 bool arm, bool clear_int, u16 num_popped,
221 u32 eq_delay_mult_enc)
6b7c5b94
SP
222{
223 u32 val = 0;
03d28ffe 224
6b7c5b94 225 val |= qid & DB_EQ_RING_ID_MASK;
748b539a 226 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) << DB_EQ_RING_ID_EXT_MASK_SHIFT);
cf588477 227
954f6825 228 if (be_check_error(adapter, BE_ERROR_HW))
cf588477
SP
229 return;
230
6b7c5b94
SP
231 if (arm)
232 val |= 1 << DB_EQ_REARM_SHIFT;
233 if (clear_int)
234 val |= 1 << DB_EQ_CLR_SHIFT;
235 val |= 1 << DB_EQ_EVNT_SHIFT;
236 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
20947770 237 val |= eq_delay_mult_enc << DB_EQ_R2I_DLY_SHIFT;
8788fdc2 238 iowrite32(val, adapter->db + DB_EQ_OFFSET);
6b7c5b94
SP
239}
240
8788fdc2 241void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
6b7c5b94
SP
242{
243 u32 val = 0;
03d28ffe 244
6b7c5b94 245 val |= qid & DB_CQ_RING_ID_MASK;
fe6d2a38
SP
246 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
247 DB_CQ_RING_ID_EXT_MASK_SHIFT);
cf588477 248
954f6825 249 if (be_check_error(adapter, BE_ERROR_HW))
cf588477
SP
250 return;
251
6b7c5b94
SP
252 if (arm)
253 val |= 1 << DB_CQ_REARM_SHIFT;
254 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
8788fdc2 255 iowrite32(val, adapter->db + DB_CQ_OFFSET);
6b7c5b94
SP
256}
257
6b7c5b94
SP
258static int be_mac_addr_set(struct net_device *netdev, void *p)
259{
260 struct be_adapter *adapter = netdev_priv(netdev);
5a712c13 261 struct device *dev = &adapter->pdev->dev;
6b7c5b94 262 struct sockaddr *addr = p;
5a712c13
SP
263 int status;
264 u8 mac[ETH_ALEN];
265 u32 old_pmac_id = adapter->pmac_id[0], curr_pmac_id = 0;
6b7c5b94 266
ca9e4988
AK
267 if (!is_valid_ether_addr(addr->sa_data))
268 return -EADDRNOTAVAIL;
269
ff32f8ab
VV
270 /* Proceed further only if, User provided MAC is different
271 * from active MAC
272 */
273 if (ether_addr_equal(addr->sa_data, netdev->dev_addr))
274 return 0;
275
bcc84140
KA
276 /* if device is not running, copy MAC to netdev->dev_addr */
277 if (!netif_running(netdev))
278 goto done;
279
5a712c13
SP
280 /* The PMAC_ADD cmd may fail if the VF doesn't have FILTMGMT
281 * privilege or if PF did not provision the new MAC address.
282 * On BE3, this cmd will always fail if the VF doesn't have the
283 * FILTMGMT privilege. This failure is OK, only if the PF programmed
284 * the MAC for the VF.
704e4c88 285 */
5a712c13
SP
286 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
287 adapter->if_handle, &adapter->pmac_id[0], 0);
288 if (!status) {
289 curr_pmac_id = adapter->pmac_id[0];
290
291 /* Delete the old programmed MAC. This call may fail if the
292 * old MAC was already deleted by the PF driver.
293 */
294 if (adapter->pmac_id[0] != old_pmac_id)
295 be_cmd_pmac_del(adapter, adapter->if_handle,
296 old_pmac_id, 0);
704e4c88
PR
297 }
298
5a712c13
SP
299 /* Decide if the new MAC is successfully activated only after
300 * querying the FW
704e4c88 301 */
b188f090
SR
302 status = be_cmd_get_active_mac(adapter, curr_pmac_id, mac,
303 adapter->if_handle, true, 0);
a65027e4 304 if (status)
e3a7ae2c 305 goto err;
6b7c5b94 306
5a712c13
SP
307 /* The MAC change did not happen, either due to lack of privilege
308 * or PF didn't pre-provision.
309 */
61d23e9f 310 if (!ether_addr_equal(addr->sa_data, mac)) {
5a712c13
SP
311 status = -EPERM;
312 goto err;
313 }
bcc84140
KA
314done:
315 ether_addr_copy(netdev->dev_addr, addr->sa_data);
316 dev_info(dev, "MAC address changed to %pM\n", addr->sa_data);
e3a7ae2c
SK
317 return 0;
318err:
5a712c13 319 dev_warn(dev, "MAC address change to %pM failed\n", addr->sa_data);
6b7c5b94
SP
320 return status;
321}
322
ca34fe38
SP
323/* BE2 supports only v0 cmd */
324static void *hw_stats_from_cmd(struct be_adapter *adapter)
325{
326 if (BE2_chip(adapter)) {
327 struct be_cmd_resp_get_stats_v0 *cmd = adapter->stats_cmd.va;
328
329 return &cmd->hw_stats;
61000861 330 } else if (BE3_chip(adapter)) {
ca34fe38
SP
331 struct be_cmd_resp_get_stats_v1 *cmd = adapter->stats_cmd.va;
332
61000861
AK
333 return &cmd->hw_stats;
334 } else {
335 struct be_cmd_resp_get_stats_v2 *cmd = adapter->stats_cmd.va;
336
ca34fe38
SP
337 return &cmd->hw_stats;
338 }
339}
340
341/* BE2 supports only v0 cmd */
342static void *be_erx_stats_from_cmd(struct be_adapter *adapter)
343{
344 if (BE2_chip(adapter)) {
345 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
346
347 return &hw_stats->erx;
61000861 348 } else if (BE3_chip(adapter)) {
ca34fe38
SP
349 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
350
61000861
AK
351 return &hw_stats->erx;
352 } else {
353 struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
354
ca34fe38
SP
355 return &hw_stats->erx;
356 }
357}
358
359static void populate_be_v0_stats(struct be_adapter *adapter)
89a88ab8 360{
ac124ff9
SP
361 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
362 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
363 struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
89a88ab8 364 struct be_port_rxf_stats_v0 *port_stats =
ac124ff9
SP
365 &rxf_stats->port[adapter->port_num];
366 struct be_drv_stats *drvs = &adapter->drv_stats;
89a88ab8 367
ac124ff9 368 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
89a88ab8
AK
369 drvs->rx_pause_frames = port_stats->rx_pause_frames;
370 drvs->rx_crc_errors = port_stats->rx_crc_errors;
371 drvs->rx_control_frames = port_stats->rx_control_frames;
372 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
373 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
374 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
375 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
376 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
377 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
378 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
379 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
380 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
381 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
382 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
ac124ff9 383 drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
89a88ab8
AK
384 drvs->rx_dropped_header_too_small =
385 port_stats->rx_dropped_header_too_small;
18fb06a1
SR
386 drvs->rx_address_filtered =
387 port_stats->rx_address_filtered +
388 port_stats->rx_vlan_filtered;
89a88ab8
AK
389 drvs->rx_alignment_symbol_errors =
390 port_stats->rx_alignment_symbol_errors;
391
392 drvs->tx_pauseframes = port_stats->tx_pauseframes;
393 drvs->tx_controlframes = port_stats->tx_controlframes;
394
395 if (adapter->port_num)
ac124ff9 396 drvs->jabber_events = rxf_stats->port1_jabber_events;
89a88ab8 397 else
ac124ff9 398 drvs->jabber_events = rxf_stats->port0_jabber_events;
89a88ab8 399 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
89a88ab8 400 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
89a88ab8
AK
401 drvs->forwarded_packets = rxf_stats->forwarded_packets;
402 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
ac124ff9
SP
403 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
404 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
89a88ab8
AK
405 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
406}
407
ca34fe38 408static void populate_be_v1_stats(struct be_adapter *adapter)
89a88ab8 409{
ac124ff9
SP
410 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
411 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
412 struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
89a88ab8 413 struct be_port_rxf_stats_v1 *port_stats =
ac124ff9
SP
414 &rxf_stats->port[adapter->port_num];
415 struct be_drv_stats *drvs = &adapter->drv_stats;
89a88ab8 416
ac124ff9 417 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
02fe7027
AK
418 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
419 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
89a88ab8
AK
420 drvs->rx_pause_frames = port_stats->rx_pause_frames;
421 drvs->rx_crc_errors = port_stats->rx_crc_errors;
422 drvs->rx_control_frames = port_stats->rx_control_frames;
423 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
424 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
425 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
426 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
427 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
428 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
429 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
430 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
431 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
432 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
433 drvs->rx_dropped_header_too_small =
434 port_stats->rx_dropped_header_too_small;
435 drvs->rx_input_fifo_overflow_drop =
436 port_stats->rx_input_fifo_overflow_drop;
18fb06a1 437 drvs->rx_address_filtered = port_stats->rx_address_filtered;
89a88ab8
AK
438 drvs->rx_alignment_symbol_errors =
439 port_stats->rx_alignment_symbol_errors;
ac124ff9 440 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
89a88ab8
AK
441 drvs->tx_pauseframes = port_stats->tx_pauseframes;
442 drvs->tx_controlframes = port_stats->tx_controlframes;
b5adffc4 443 drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
89a88ab8
AK
444 drvs->jabber_events = port_stats->jabber_events;
445 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
89a88ab8 446 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
89a88ab8
AK
447 drvs->forwarded_packets = rxf_stats->forwarded_packets;
448 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
ac124ff9
SP
449 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
450 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
89a88ab8
AK
451 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
452}
453
61000861
AK
454static void populate_be_v2_stats(struct be_adapter *adapter)
455{
456 struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
457 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
458 struct be_rxf_stats_v2 *rxf_stats = &hw_stats->rxf;
459 struct be_port_rxf_stats_v2 *port_stats =
460 &rxf_stats->port[adapter->port_num];
461 struct be_drv_stats *drvs = &adapter->drv_stats;
462
463 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
464 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
465 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
466 drvs->rx_pause_frames = port_stats->rx_pause_frames;
467 drvs->rx_crc_errors = port_stats->rx_crc_errors;
468 drvs->rx_control_frames = port_stats->rx_control_frames;
469 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
470 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
471 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
472 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
473 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
474 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
475 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
476 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
477 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
478 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
479 drvs->rx_dropped_header_too_small =
480 port_stats->rx_dropped_header_too_small;
481 drvs->rx_input_fifo_overflow_drop =
482 port_stats->rx_input_fifo_overflow_drop;
483 drvs->rx_address_filtered = port_stats->rx_address_filtered;
484 drvs->rx_alignment_symbol_errors =
485 port_stats->rx_alignment_symbol_errors;
486 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
487 drvs->tx_pauseframes = port_stats->tx_pauseframes;
488 drvs->tx_controlframes = port_stats->tx_controlframes;
489 drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
490 drvs->jabber_events = port_stats->jabber_events;
491 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
492 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
493 drvs->forwarded_packets = rxf_stats->forwarded_packets;
494 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
495 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
496 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
497 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
748b539a 498 if (be_roce_supported(adapter)) {
461ae379
AK
499 drvs->rx_roce_bytes_lsd = port_stats->roce_bytes_received_lsd;
500 drvs->rx_roce_bytes_msd = port_stats->roce_bytes_received_msd;
501 drvs->rx_roce_frames = port_stats->roce_frames_received;
502 drvs->roce_drops_crc = port_stats->roce_drops_crc;
503 drvs->roce_drops_payload_len =
504 port_stats->roce_drops_payload_len;
505 }
61000861
AK
506}
507
005d5696
SX
508static void populate_lancer_stats(struct be_adapter *adapter)
509{
005d5696 510 struct be_drv_stats *drvs = &adapter->drv_stats;
748b539a 511 struct lancer_pport_stats *pport_stats = pport_stats_from_cmd(adapter);
ac124ff9
SP
512
513 be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
514 drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
515 drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
516 drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
005d5696 517 drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
ac124ff9 518 drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
005d5696
SX
519 drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
520 drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
521 drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
522 drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
523 drvs->rx_dropped_tcp_length =
524 pport_stats->rx_dropped_invalid_tcp_length;
525 drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
526 drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
527 drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
528 drvs->rx_dropped_header_too_small =
529 pport_stats->rx_dropped_header_too_small;
530 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
18fb06a1
SR
531 drvs->rx_address_filtered =
532 pport_stats->rx_address_filtered +
533 pport_stats->rx_vlan_filtered;
ac124ff9 534 drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
005d5696 535 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
ac124ff9
SP
536 drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
537 drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
005d5696 538 drvs->jabber_events = pport_stats->rx_jabbers;
ac124ff9
SP
539 drvs->forwarded_packets = pport_stats->num_forwards_lo;
540 drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
005d5696 541 drvs->rx_drops_too_many_frags =
ac124ff9 542 pport_stats->rx_drops_too_many_frags_lo;
005d5696 543}
89a88ab8 544
09c1c68f
SP
545static void accumulate_16bit_val(u32 *acc, u16 val)
546{
547#define lo(x) (x & 0xFFFF)
548#define hi(x) (x & 0xFFFF0000)
549 bool wrapped = val < lo(*acc);
550 u32 newacc = hi(*acc) + val;
551
552 if (wrapped)
553 newacc += 65536;
554 ACCESS_ONCE(*acc) = newacc;
555}
556
4188e7df 557static void populate_erx_stats(struct be_adapter *adapter,
748b539a 558 struct be_rx_obj *rxo, u32 erx_stat)
a6c578ef
AK
559{
560 if (!BEx_chip(adapter))
561 rx_stats(rxo)->rx_drops_no_frags = erx_stat;
562 else
563 /* below erx HW counter can actually wrap around after
564 * 65535. Driver accumulates a 32-bit value
565 */
566 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
567 (u16)erx_stat);
568}
569
89a88ab8
AK
570void be_parse_stats(struct be_adapter *adapter)
571{
61000861 572 struct be_erx_stats_v2 *erx = be_erx_stats_from_cmd(adapter);
ac124ff9
SP
573 struct be_rx_obj *rxo;
574 int i;
a6c578ef 575 u32 erx_stat;
ac124ff9 576
ca34fe38
SP
577 if (lancer_chip(adapter)) {
578 populate_lancer_stats(adapter);
005d5696 579 } else {
ca34fe38
SP
580 if (BE2_chip(adapter))
581 populate_be_v0_stats(adapter);
61000861
AK
582 else if (BE3_chip(adapter))
583 /* for BE3 */
ca34fe38 584 populate_be_v1_stats(adapter);
61000861
AK
585 else
586 populate_be_v2_stats(adapter);
d51ebd33 587
61000861 588 /* erx_v2 is longer than v0, v1. use v2 for v0, v1 access */
ca34fe38 589 for_all_rx_queues(adapter, rxo, i) {
a6c578ef
AK
590 erx_stat = erx->rx_drops_no_fragments[rxo->q.id];
591 populate_erx_stats(adapter, rxo, erx_stat);
ca34fe38 592 }
09c1c68f 593 }
89a88ab8
AK
594}
595
ab1594e9 596static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
748b539a 597 struct rtnl_link_stats64 *stats)
6b7c5b94 598{
ab1594e9 599 struct be_adapter *adapter = netdev_priv(netdev);
89a88ab8 600 struct be_drv_stats *drvs = &adapter->drv_stats;
3abcdeda 601 struct be_rx_obj *rxo;
3c8def97 602 struct be_tx_obj *txo;
ab1594e9
SP
603 u64 pkts, bytes;
604 unsigned int start;
3abcdeda 605 int i;
6b7c5b94 606
3abcdeda 607 for_all_rx_queues(adapter, rxo, i) {
ab1594e9 608 const struct be_rx_stats *rx_stats = rx_stats(rxo);
03d28ffe 609
ab1594e9 610 do {
57a7744e 611 start = u64_stats_fetch_begin_irq(&rx_stats->sync);
ab1594e9
SP
612 pkts = rx_stats(rxo)->rx_pkts;
613 bytes = rx_stats(rxo)->rx_bytes;
57a7744e 614 } while (u64_stats_fetch_retry_irq(&rx_stats->sync, start));
ab1594e9
SP
615 stats->rx_packets += pkts;
616 stats->rx_bytes += bytes;
617 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
618 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
619 rx_stats(rxo)->rx_drops_no_frags;
3abcdeda
SP
620 }
621
3c8def97 622 for_all_tx_queues(adapter, txo, i) {
ab1594e9 623 const struct be_tx_stats *tx_stats = tx_stats(txo);
03d28ffe 624
ab1594e9 625 do {
57a7744e 626 start = u64_stats_fetch_begin_irq(&tx_stats->sync);
ab1594e9
SP
627 pkts = tx_stats(txo)->tx_pkts;
628 bytes = tx_stats(txo)->tx_bytes;
57a7744e 629 } while (u64_stats_fetch_retry_irq(&tx_stats->sync, start));
ab1594e9
SP
630 stats->tx_packets += pkts;
631 stats->tx_bytes += bytes;
3c8def97 632 }
6b7c5b94
SP
633
634 /* bad pkts received */
ab1594e9 635 stats->rx_errors = drvs->rx_crc_errors +
89a88ab8
AK
636 drvs->rx_alignment_symbol_errors +
637 drvs->rx_in_range_errors +
638 drvs->rx_out_range_errors +
639 drvs->rx_frame_too_long +
640 drvs->rx_dropped_too_small +
641 drvs->rx_dropped_too_short +
642 drvs->rx_dropped_header_too_small +
643 drvs->rx_dropped_tcp_length +
ab1594e9 644 drvs->rx_dropped_runt;
68110868 645
6b7c5b94 646 /* detailed rx errors */
ab1594e9 647 stats->rx_length_errors = drvs->rx_in_range_errors +
89a88ab8
AK
648 drvs->rx_out_range_errors +
649 drvs->rx_frame_too_long;
68110868 650
ab1594e9 651 stats->rx_crc_errors = drvs->rx_crc_errors;
6b7c5b94
SP
652
653 /* frame alignment errors */
ab1594e9 654 stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
68110868 655
6b7c5b94
SP
656 /* receiver fifo overrun */
657 /* drops_no_pbuf is no per i/f, it's per BE card */
ab1594e9 658 stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
89a88ab8
AK
659 drvs->rx_input_fifo_overflow_drop +
660 drvs->rx_drops_no_pbuf;
ab1594e9 661 return stats;
6b7c5b94
SP
662}
663
b236916a 664void be_link_status_update(struct be_adapter *adapter, u8 link_status)
6b7c5b94 665{
6b7c5b94
SP
666 struct net_device *netdev = adapter->netdev;
667
b236916a 668 if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
ea172a01 669 netif_carrier_off(netdev);
b236916a 670 adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
6b7c5b94 671 }
b236916a 672
bdce2ad7 673 if (link_status)
b236916a
AK
674 netif_carrier_on(netdev);
675 else
676 netif_carrier_off(netdev);
18824894
IV
677
678 netdev_info(netdev, "Link is %s\n", link_status ? "Up" : "Down");
6b7c5b94
SP
679}
680
5f07b3c5 681static void be_tx_stats_update(struct be_tx_obj *txo, struct sk_buff *skb)
6b7c5b94 682{
3c8def97 683 struct be_tx_stats *stats = tx_stats(txo);
8670f2a5 684 u64 tx_pkts = skb_shinfo(skb)->gso_segs ? : 1;
3c8def97 685
ab1594e9 686 u64_stats_update_begin(&stats->sync);
ac124ff9 687 stats->tx_reqs++;
5f07b3c5 688 stats->tx_bytes += skb->len;
8670f2a5
SB
689 stats->tx_pkts += tx_pkts;
690 if (skb->encapsulation && skb->ip_summed == CHECKSUM_PARTIAL)
691 stats->tx_vxlan_offload_pkts += tx_pkts;
ab1594e9 692 u64_stats_update_end(&stats->sync);
6b7c5b94
SP
693}
694
5f07b3c5
SP
695/* Returns number of WRBs needed for the skb */
696static u32 skb_wrb_cnt(struct sk_buff *skb)
6b7c5b94 697{
5f07b3c5
SP
698 /* +1 for the header wrb */
699 return 1 + (skb_headlen(skb) ? 1 : 0) + skb_shinfo(skb)->nr_frags;
6b7c5b94
SP
700}
701
702static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
703{
f986afcb
SP
704 wrb->frag_pa_hi = cpu_to_le32(upper_32_bits(addr));
705 wrb->frag_pa_lo = cpu_to_le32(lower_32_bits(addr));
706 wrb->frag_len = cpu_to_le32(len & ETH_WRB_FRAG_LEN_MASK);
707 wrb->rsvd0 = 0;
708}
709
710/* A dummy wrb is just all zeros. Using a separate routine for dummy-wrb
711 * to avoid the swap and shift/mask operations in wrb_fill().
712 */
713static inline void wrb_fill_dummy(struct be_eth_wrb *wrb)
714{
715 wrb->frag_pa_hi = 0;
716 wrb->frag_pa_lo = 0;
717 wrb->frag_len = 0;
89b1f496 718 wrb->rsvd0 = 0;
6b7c5b94
SP
719}
720
1ded132d 721static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
748b539a 722 struct sk_buff *skb)
1ded132d
AK
723{
724 u8 vlan_prio;
725 u16 vlan_tag;
726
df8a39de 727 vlan_tag = skb_vlan_tag_get(skb);
1ded132d
AK
728 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
729 /* If vlan priority provided by OS is NOT in available bmap */
730 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
731 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
fdf81bfb 732 adapter->recommended_prio_bits;
1ded132d
AK
733
734 return vlan_tag;
735}
736
c9c47142
SP
737/* Used only for IP tunnel packets */
738static u16 skb_inner_ip_proto(struct sk_buff *skb)
739{
740 return (inner_ip_hdr(skb)->version == 4) ?
741 inner_ip_hdr(skb)->protocol : inner_ipv6_hdr(skb)->nexthdr;
742}
743
744static u16 skb_ip_proto(struct sk_buff *skb)
745{
746 return (ip_hdr(skb)->version == 4) ?
747 ip_hdr(skb)->protocol : ipv6_hdr(skb)->nexthdr;
748}
749
cf5671e6
SB
750static inline bool be_is_txq_full(struct be_tx_obj *txo)
751{
752 return atomic_read(&txo->q.used) + BE_MAX_TX_FRAG_COUNT >= txo->q.len;
753}
754
755static inline bool be_can_txq_wake(struct be_tx_obj *txo)
756{
757 return atomic_read(&txo->q.used) < txo->q.len / 2;
758}
759
760static inline bool be_is_tx_compl_pending(struct be_tx_obj *txo)
761{
762 return atomic_read(&txo->q.used) > txo->pend_wrb_cnt;
763}
764
804abcdb
SB
765static void be_get_wrb_params_from_skb(struct be_adapter *adapter,
766 struct sk_buff *skb,
767 struct be_wrb_params *wrb_params)
6b7c5b94 768{
804abcdb 769 u16 proto;
6b7c5b94 770
49e4b847 771 if (skb_is_gso(skb)) {
804abcdb
SB
772 BE_WRB_F_SET(wrb_params->features, LSO, 1);
773 wrb_params->lso_mss = skb_shinfo(skb)->gso_size;
fe6d2a38 774 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
804abcdb 775 BE_WRB_F_SET(wrb_params->features, LSO6, 1);
6b7c5b94 776 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
c9c47142 777 if (skb->encapsulation) {
804abcdb 778 BE_WRB_F_SET(wrb_params->features, IPCS, 1);
c9c47142
SP
779 proto = skb_inner_ip_proto(skb);
780 } else {
781 proto = skb_ip_proto(skb);
782 }
783 if (proto == IPPROTO_TCP)
804abcdb 784 BE_WRB_F_SET(wrb_params->features, TCPCS, 1);
c9c47142 785 else if (proto == IPPROTO_UDP)
804abcdb 786 BE_WRB_F_SET(wrb_params->features, UDPCS, 1);
6b7c5b94
SP
787 }
788
df8a39de 789 if (skb_vlan_tag_present(skb)) {
804abcdb
SB
790 BE_WRB_F_SET(wrb_params->features, VLAN, 1);
791 wrb_params->vlan_tag = be_get_tx_vlan_tag(adapter, skb);
6b7c5b94
SP
792 }
793
804abcdb
SB
794 BE_WRB_F_SET(wrb_params->features, CRC, 1);
795}
5f07b3c5 796
804abcdb
SB
797static void wrb_fill_hdr(struct be_adapter *adapter,
798 struct be_eth_hdr_wrb *hdr,
799 struct be_wrb_params *wrb_params,
800 struct sk_buff *skb)
801{
802 memset(hdr, 0, sizeof(*hdr));
803
804 SET_TX_WRB_HDR_BITS(crc, hdr,
805 BE_WRB_F_GET(wrb_params->features, CRC));
806 SET_TX_WRB_HDR_BITS(ipcs, hdr,
807 BE_WRB_F_GET(wrb_params->features, IPCS));
808 SET_TX_WRB_HDR_BITS(tcpcs, hdr,
809 BE_WRB_F_GET(wrb_params->features, TCPCS));
810 SET_TX_WRB_HDR_BITS(udpcs, hdr,
811 BE_WRB_F_GET(wrb_params->features, UDPCS));
812
813 SET_TX_WRB_HDR_BITS(lso, hdr,
814 BE_WRB_F_GET(wrb_params->features, LSO));
815 SET_TX_WRB_HDR_BITS(lso6, hdr,
816 BE_WRB_F_GET(wrb_params->features, LSO6));
817 SET_TX_WRB_HDR_BITS(lso_mss, hdr, wrb_params->lso_mss);
818
819 /* Hack to skip HW VLAN tagging needs evt = 1, compl = 0. When this
820 * hack is not needed, the evt bit is set while ringing DB.
5f07b3c5 821 */
804abcdb
SB
822 SET_TX_WRB_HDR_BITS(event, hdr,
823 BE_WRB_F_GET(wrb_params->features, VLAN_SKIP_HW));
824 SET_TX_WRB_HDR_BITS(vlan, hdr,
825 BE_WRB_F_GET(wrb_params->features, VLAN));
826 SET_TX_WRB_HDR_BITS(vlan_tag, hdr, wrb_params->vlan_tag);
827
828 SET_TX_WRB_HDR_BITS(num_wrb, hdr, skb_wrb_cnt(skb));
829 SET_TX_WRB_HDR_BITS(len, hdr, skb->len);
760c295e
VD
830 SET_TX_WRB_HDR_BITS(mgmt, hdr,
831 BE_WRB_F_GET(wrb_params->features, OS2BMC));
6b7c5b94
SP
832}
833
2b7bcebf 834static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
748b539a 835 bool unmap_single)
7101e111
SP
836{
837 dma_addr_t dma;
f986afcb 838 u32 frag_len = le32_to_cpu(wrb->frag_len);
7101e111 839
7101e111 840
f986afcb
SP
841 dma = (u64)le32_to_cpu(wrb->frag_pa_hi) << 32 |
842 (u64)le32_to_cpu(wrb->frag_pa_lo);
843 if (frag_len) {
7101e111 844 if (unmap_single)
f986afcb 845 dma_unmap_single(dev, dma, frag_len, DMA_TO_DEVICE);
7101e111 846 else
f986afcb 847 dma_unmap_page(dev, dma, frag_len, DMA_TO_DEVICE);
7101e111
SP
848 }
849}
6b7c5b94 850
79a0d7d8 851/* Grab a WRB header for xmit */
b0fd2eb2 852static u32 be_tx_get_wrb_hdr(struct be_tx_obj *txo)
79a0d7d8 853{
b0fd2eb2 854 u32 head = txo->q.head;
79a0d7d8
SB
855
856 queue_head_inc(&txo->q);
857 return head;
858}
859
860/* Set up the WRB header for xmit */
861static void be_tx_setup_wrb_hdr(struct be_adapter *adapter,
862 struct be_tx_obj *txo,
863 struct be_wrb_params *wrb_params,
864 struct sk_buff *skb, u16 head)
865{
866 u32 num_frags = skb_wrb_cnt(skb);
867 struct be_queue_info *txq = &txo->q;
868 struct be_eth_hdr_wrb *hdr = queue_index_node(txq, head);
869
870 wrb_fill_hdr(adapter, hdr, wrb_params, skb);
871 be_dws_cpu_to_le(hdr, sizeof(*hdr));
872
873 BUG_ON(txo->sent_skb_list[head]);
874 txo->sent_skb_list[head] = skb;
875 txo->last_req_hdr = head;
876 atomic_add(num_frags, &txq->used);
877 txo->last_req_wrb_cnt = num_frags;
878 txo->pend_wrb_cnt += num_frags;
879}
880
881/* Setup a WRB fragment (buffer descriptor) for xmit */
882static void be_tx_setup_wrb_frag(struct be_tx_obj *txo, dma_addr_t busaddr,
883 int len)
884{
885 struct be_eth_wrb *wrb;
886 struct be_queue_info *txq = &txo->q;
887
888 wrb = queue_head_node(txq);
889 wrb_fill(wrb, busaddr, len);
890 queue_head_inc(txq);
891}
892
893/* Bring the queue back to the state it was in before be_xmit_enqueue() routine
894 * was invoked. The producer index is restored to the previous packet and the
895 * WRBs of the current packet are unmapped. Invoked to handle tx setup errors.
896 */
897static void be_xmit_restore(struct be_adapter *adapter,
b0fd2eb2 898 struct be_tx_obj *txo, u32 head, bool map_single,
79a0d7d8
SB
899 u32 copied)
900{
901 struct device *dev;
902 struct be_eth_wrb *wrb;
903 struct be_queue_info *txq = &txo->q;
904
905 dev = &adapter->pdev->dev;
906 txq->head = head;
907
908 /* skip the first wrb (hdr); it's not mapped */
909 queue_head_inc(txq);
910 while (copied) {
911 wrb = queue_head_node(txq);
912 unmap_tx_frag(dev, wrb, map_single);
913 map_single = false;
914 copied -= le32_to_cpu(wrb->frag_len);
915 queue_head_inc(txq);
916 }
917
918 txq->head = head;
919}
920
921/* Enqueue the given packet for transmit. This routine allocates WRBs for the
922 * packet, dma maps the packet buffers and sets up the WRBs. Returns the number
923 * of WRBs used up by the packet.
924 */
5f07b3c5 925static u32 be_xmit_enqueue(struct be_adapter *adapter, struct be_tx_obj *txo,
804abcdb
SB
926 struct sk_buff *skb,
927 struct be_wrb_params *wrb_params)
6b7c5b94 928{
5f07b3c5 929 u32 i, copied = 0, wrb_cnt = skb_wrb_cnt(skb);
2b7bcebf 930 struct device *dev = &adapter->pdev->dev;
5f07b3c5 931 struct be_queue_info *txq = &txo->q;
7101e111 932 bool map_single = false;
b0fd2eb2 933 u32 head = txq->head;
79a0d7d8
SB
934 dma_addr_t busaddr;
935 int len;
6b7c5b94 936
79a0d7d8 937 head = be_tx_get_wrb_hdr(txo);
6b7c5b94 938
ebc8d2ab 939 if (skb->len > skb->data_len) {
79a0d7d8 940 len = skb_headlen(skb);
03d28ffe 941
2b7bcebf
IV
942 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
943 if (dma_mapping_error(dev, busaddr))
7101e111
SP
944 goto dma_err;
945 map_single = true;
79a0d7d8 946 be_tx_setup_wrb_frag(txo, busaddr, len);
ebc8d2ab
DM
947 copied += len;
948 }
6b7c5b94 949
ebc8d2ab 950 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
748b539a 951 const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
79a0d7d8 952 len = skb_frag_size(frag);
03d28ffe 953
79a0d7d8 954 busaddr = skb_frag_dma_map(dev, frag, 0, len, DMA_TO_DEVICE);
2b7bcebf 955 if (dma_mapping_error(dev, busaddr))
7101e111 956 goto dma_err;
79a0d7d8
SB
957 be_tx_setup_wrb_frag(txo, busaddr, len);
958 copied += len;
6b7c5b94
SP
959 }
960
79a0d7d8 961 be_tx_setup_wrb_hdr(adapter, txo, wrb_params, skb, head);
6b7c5b94 962
5f07b3c5
SP
963 be_tx_stats_update(txo, skb);
964 return wrb_cnt;
6b7c5b94 965
7101e111 966dma_err:
79a0d7d8
SB
967 adapter->drv_stats.dma_map_errors++;
968 be_xmit_restore(adapter, txo, head, map_single, copied);
7101e111 969 return 0;
6b7c5b94
SP
970}
971
f7062ee5
SP
972static inline int qnq_async_evt_rcvd(struct be_adapter *adapter)
973{
974 return adapter->flags & BE_FLAGS_QNQ_ASYNC_EVT_RCVD;
975}
976
93040ae5 977static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
bc0c3405 978 struct sk_buff *skb,
804abcdb
SB
979 struct be_wrb_params
980 *wrb_params)
93040ae5
SK
981{
982 u16 vlan_tag = 0;
983
984 skb = skb_share_check(skb, GFP_ATOMIC);
985 if (unlikely(!skb))
986 return skb;
987
df8a39de 988 if (skb_vlan_tag_present(skb))
93040ae5 989 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
52fe29e4
SB
990
991 if (qnq_async_evt_rcvd(adapter) && adapter->pvid) {
992 if (!vlan_tag)
993 vlan_tag = adapter->pvid;
994 /* f/w workaround to set skip_hw_vlan = 1, informs the F/W to
995 * skip VLAN insertion
996 */
804abcdb 997 BE_WRB_F_SET(wrb_params->features, VLAN_SKIP_HW, 1);
52fe29e4 998 }
bc0c3405
AK
999
1000 if (vlan_tag) {
62749e2c
JP
1001 skb = vlan_insert_tag_set_proto(skb, htons(ETH_P_8021Q),
1002 vlan_tag);
bc0c3405
AK
1003 if (unlikely(!skb))
1004 return skb;
bc0c3405
AK
1005 skb->vlan_tci = 0;
1006 }
1007
1008 /* Insert the outer VLAN, if any */
1009 if (adapter->qnq_vid) {
1010 vlan_tag = adapter->qnq_vid;
62749e2c
JP
1011 skb = vlan_insert_tag_set_proto(skb, htons(ETH_P_8021Q),
1012 vlan_tag);
bc0c3405
AK
1013 if (unlikely(!skb))
1014 return skb;
804abcdb 1015 BE_WRB_F_SET(wrb_params->features, VLAN_SKIP_HW, 1);
bc0c3405
AK
1016 }
1017
93040ae5
SK
1018 return skb;
1019}
1020
bc0c3405
AK
1021static bool be_ipv6_exthdr_check(struct sk_buff *skb)
1022{
1023 struct ethhdr *eh = (struct ethhdr *)skb->data;
1024 u16 offset = ETH_HLEN;
1025
1026 if (eh->h_proto == htons(ETH_P_IPV6)) {
1027 struct ipv6hdr *ip6h = (struct ipv6hdr *)(skb->data + offset);
1028
1029 offset += sizeof(struct ipv6hdr);
1030 if (ip6h->nexthdr != NEXTHDR_TCP &&
1031 ip6h->nexthdr != NEXTHDR_UDP) {
1032 struct ipv6_opt_hdr *ehdr =
504fbf1e 1033 (struct ipv6_opt_hdr *)(skb->data + offset);
bc0c3405
AK
1034
1035 /* offending pkt: 2nd byte following IPv6 hdr is 0xff */
1036 if (ehdr->hdrlen == 0xff)
1037 return true;
1038 }
1039 }
1040 return false;
1041}
1042
1043static int be_vlan_tag_tx_chk(struct be_adapter *adapter, struct sk_buff *skb)
1044{
df8a39de 1045 return skb_vlan_tag_present(skb) || adapter->pvid || adapter->qnq_vid;
bc0c3405
AK
1046}
1047
748b539a 1048static int be_ipv6_tx_stall_chk(struct be_adapter *adapter, struct sk_buff *skb)
bc0c3405 1049{
ee9c799c 1050 return BE3_chip(adapter) && be_ipv6_exthdr_check(skb);
bc0c3405
AK
1051}
1052
ec495fac
VV
1053static struct sk_buff *be_lancer_xmit_workarounds(struct be_adapter *adapter,
1054 struct sk_buff *skb,
804abcdb
SB
1055 struct be_wrb_params
1056 *wrb_params)
6b7c5b94 1057{
d2cb6ce7 1058 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
ee9c799c
SP
1059 unsigned int eth_hdr_len;
1060 struct iphdr *ip;
93040ae5 1061
1297f9db
AK
1062 /* For padded packets, BE HW modifies tot_len field in IP header
1063 * incorrecly when VLAN tag is inserted by HW.
3904dcc4 1064 * For padded packets, Lancer computes incorrect checksum.
1ded132d 1065 */
ee9c799c
SP
1066 eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
1067 VLAN_ETH_HLEN : ETH_HLEN;
3904dcc4 1068 if (skb->len <= 60 &&
df8a39de 1069 (lancer_chip(adapter) || skb_vlan_tag_present(skb)) &&
ee9c799c 1070 is_ipv4_pkt(skb)) {
93040ae5
SK
1071 ip = (struct iphdr *)ip_hdr(skb);
1072 pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len));
1073 }
1ded132d 1074
d2cb6ce7 1075 /* If vlan tag is already inlined in the packet, skip HW VLAN
f93f160b 1076 * tagging in pvid-tagging mode
d2cb6ce7 1077 */
f93f160b 1078 if (be_pvid_tagging_enabled(adapter) &&
d2cb6ce7 1079 veh->h_vlan_proto == htons(ETH_P_8021Q))
804abcdb 1080 BE_WRB_F_SET(wrb_params->features, VLAN_SKIP_HW, 1);
d2cb6ce7 1081
93040ae5
SK
1082 /* HW has a bug wherein it will calculate CSUM for VLAN
1083 * pkts even though it is disabled.
1084 * Manually insert VLAN in pkt.
1085 */
1086 if (skb->ip_summed != CHECKSUM_PARTIAL &&
df8a39de 1087 skb_vlan_tag_present(skb)) {
804abcdb 1088 skb = be_insert_vlan_in_pkt(adapter, skb, wrb_params);
bc0c3405 1089 if (unlikely(!skb))
c9128951 1090 goto err;
bc0c3405
AK
1091 }
1092
1093 /* HW may lockup when VLAN HW tagging is requested on
1094 * certain ipv6 packets. Drop such pkts if the HW workaround to
1095 * skip HW tagging is not enabled by FW.
1096 */
1097 if (unlikely(be_ipv6_tx_stall_chk(adapter, skb) &&
cd3307aa
KA
1098 (adapter->pvid || adapter->qnq_vid) &&
1099 !qnq_async_evt_rcvd(adapter)))
bc0c3405
AK
1100 goto tx_drop;
1101
1102 /* Manual VLAN tag insertion to prevent:
1103 * ASIC lockup when the ASIC inserts VLAN tag into
1104 * certain ipv6 packets. Insert VLAN tags in driver,
1105 * and set event, completion, vlan bits accordingly
1106 * in the Tx WRB.
1107 */
1108 if (be_ipv6_tx_stall_chk(adapter, skb) &&
1109 be_vlan_tag_tx_chk(adapter, skb)) {
804abcdb 1110 skb = be_insert_vlan_in_pkt(adapter, skb, wrb_params);
1ded132d 1111 if (unlikely(!skb))
c9128951 1112 goto err;
1ded132d
AK
1113 }
1114
ee9c799c
SP
1115 return skb;
1116tx_drop:
1117 dev_kfree_skb_any(skb);
c9128951 1118err:
ee9c799c
SP
1119 return NULL;
1120}
1121
ec495fac
VV
1122static struct sk_buff *be_xmit_workarounds(struct be_adapter *adapter,
1123 struct sk_buff *skb,
804abcdb 1124 struct be_wrb_params *wrb_params)
ec495fac 1125{
8227e990
SR
1126 /* Lancer, SH and BE3 in SRIOV mode have a bug wherein
1127 * packets that are 32b or less may cause a transmit stall
1128 * on that port. The workaround is to pad such packets
1129 * (len <= 32 bytes) to a minimum length of 36b.
ec495fac 1130 */
8227e990 1131 if (skb->len <= 32) {
74b6939d 1132 if (skb_put_padto(skb, 36))
ec495fac 1133 return NULL;
ec495fac
VV
1134 }
1135
1136 if (BEx_chip(adapter) || lancer_chip(adapter)) {
804abcdb 1137 skb = be_lancer_xmit_workarounds(adapter, skb, wrb_params);
ec495fac
VV
1138 if (!skb)
1139 return NULL;
1140 }
1141
1142 return skb;
1143}
1144
5f07b3c5
SP
1145static void be_xmit_flush(struct be_adapter *adapter, struct be_tx_obj *txo)
1146{
1147 struct be_queue_info *txq = &txo->q;
1148 struct be_eth_hdr_wrb *hdr = queue_index_node(txq, txo->last_req_hdr);
1149
1150 /* Mark the last request eventable if it hasn't been marked already */
1151 if (!(hdr->dw[2] & cpu_to_le32(TX_HDR_WRB_EVT)))
1152 hdr->dw[2] |= cpu_to_le32(TX_HDR_WRB_EVT | TX_HDR_WRB_COMPL);
1153
1154 /* compose a dummy wrb if there are odd set of wrbs to notify */
1155 if (!lancer_chip(adapter) && (txo->pend_wrb_cnt & 1)) {
f986afcb 1156 wrb_fill_dummy(queue_head_node(txq));
5f07b3c5
SP
1157 queue_head_inc(txq);
1158 atomic_inc(&txq->used);
1159 txo->pend_wrb_cnt++;
1160 hdr->dw[2] &= ~cpu_to_le32(TX_HDR_WRB_NUM_MASK <<
1161 TX_HDR_WRB_NUM_SHIFT);
1162 hdr->dw[2] |= cpu_to_le32((txo->last_req_wrb_cnt + 1) <<
1163 TX_HDR_WRB_NUM_SHIFT);
1164 }
1165 be_txq_notify(adapter, txo, txo->pend_wrb_cnt);
1166 txo->pend_wrb_cnt = 0;
1167}
1168
760c295e
VD
1169/* OS2BMC related */
1170
1171#define DHCP_CLIENT_PORT 68
1172#define DHCP_SERVER_PORT 67
1173#define NET_BIOS_PORT1 137
1174#define NET_BIOS_PORT2 138
1175#define DHCPV6_RAS_PORT 547
1176
1177#define is_mc_allowed_on_bmc(adapter, eh) \
1178 (!is_multicast_filt_enabled(adapter) && \
1179 is_multicast_ether_addr(eh->h_dest) && \
1180 !is_broadcast_ether_addr(eh->h_dest))
1181
1182#define is_bc_allowed_on_bmc(adapter, eh) \
1183 (!is_broadcast_filt_enabled(adapter) && \
1184 is_broadcast_ether_addr(eh->h_dest))
1185
1186#define is_arp_allowed_on_bmc(adapter, skb) \
1187 (is_arp(skb) && is_arp_filt_enabled(adapter))
1188
1189#define is_broadcast_packet(eh, adapter) \
1190 (is_multicast_ether_addr(eh->h_dest) && \
1191 !compare_ether_addr(eh->h_dest, adapter->netdev->broadcast))
1192
1193#define is_arp(skb) (skb->protocol == htons(ETH_P_ARP))
1194
1195#define is_arp_filt_enabled(adapter) \
1196 (adapter->bmc_filt_mask & (BMC_FILT_BROADCAST_ARP))
1197
1198#define is_dhcp_client_filt_enabled(adapter) \
1199 (adapter->bmc_filt_mask & BMC_FILT_BROADCAST_DHCP_CLIENT)
1200
1201#define is_dhcp_srvr_filt_enabled(adapter) \
1202 (adapter->bmc_filt_mask & BMC_FILT_BROADCAST_DHCP_SERVER)
1203
1204#define is_nbios_filt_enabled(adapter) \
1205 (adapter->bmc_filt_mask & BMC_FILT_BROADCAST_NET_BIOS)
1206
1207#define is_ipv6_na_filt_enabled(adapter) \
1208 (adapter->bmc_filt_mask & \
1209 BMC_FILT_MULTICAST_IPV6_NEIGH_ADVER)
1210
1211#define is_ipv6_ra_filt_enabled(adapter) \
1212 (adapter->bmc_filt_mask & BMC_FILT_MULTICAST_IPV6_RA)
1213
1214#define is_ipv6_ras_filt_enabled(adapter) \
1215 (adapter->bmc_filt_mask & BMC_FILT_MULTICAST_IPV6_RAS)
1216
1217#define is_broadcast_filt_enabled(adapter) \
1218 (adapter->bmc_filt_mask & BMC_FILT_BROADCAST)
1219
1220#define is_multicast_filt_enabled(adapter) \
1221 (adapter->bmc_filt_mask & BMC_FILT_MULTICAST)
1222
1223static bool be_send_pkt_to_bmc(struct be_adapter *adapter,
1224 struct sk_buff **skb)
1225{
1226 struct ethhdr *eh = (struct ethhdr *)(*skb)->data;
1227 bool os2bmc = false;
1228
1229 if (!be_is_os2bmc_enabled(adapter))
1230 goto done;
1231
1232 if (!is_multicast_ether_addr(eh->h_dest))
1233 goto done;
1234
1235 if (is_mc_allowed_on_bmc(adapter, eh) ||
1236 is_bc_allowed_on_bmc(adapter, eh) ||
1237 is_arp_allowed_on_bmc(adapter, (*skb))) {
1238 os2bmc = true;
1239 goto done;
1240 }
1241
1242 if ((*skb)->protocol == htons(ETH_P_IPV6)) {
1243 struct ipv6hdr *hdr = ipv6_hdr((*skb));
1244 u8 nexthdr = hdr->nexthdr;
1245
1246 if (nexthdr == IPPROTO_ICMPV6) {
1247 struct icmp6hdr *icmp6 = icmp6_hdr((*skb));
1248
1249 switch (icmp6->icmp6_type) {
1250 case NDISC_ROUTER_ADVERTISEMENT:
1251 os2bmc = is_ipv6_ra_filt_enabled(adapter);
1252 goto done;
1253 case NDISC_NEIGHBOUR_ADVERTISEMENT:
1254 os2bmc = is_ipv6_na_filt_enabled(adapter);
1255 goto done;
1256 default:
1257 break;
1258 }
1259 }
1260 }
1261
1262 if (is_udp_pkt((*skb))) {
1263 struct udphdr *udp = udp_hdr((*skb));
1264
1645d997 1265 switch (ntohs(udp->dest)) {
760c295e
VD
1266 case DHCP_CLIENT_PORT:
1267 os2bmc = is_dhcp_client_filt_enabled(adapter);
1268 goto done;
1269 case DHCP_SERVER_PORT:
1270 os2bmc = is_dhcp_srvr_filt_enabled(adapter);
1271 goto done;
1272 case NET_BIOS_PORT1:
1273 case NET_BIOS_PORT2:
1274 os2bmc = is_nbios_filt_enabled(adapter);
1275 goto done;
1276 case DHCPV6_RAS_PORT:
1277 os2bmc = is_ipv6_ras_filt_enabled(adapter);
1278 goto done;
1279 default:
1280 break;
1281 }
1282 }
1283done:
1284 /* For packets over a vlan, which are destined
1285 * to BMC, asic expects the vlan to be inline in the packet.
1286 */
1287 if (os2bmc)
1288 *skb = be_insert_vlan_in_pkt(adapter, *skb, NULL);
1289
1290 return os2bmc;
1291}
1292
ee9c799c
SP
1293static netdev_tx_t be_xmit(struct sk_buff *skb, struct net_device *netdev)
1294{
1295 struct be_adapter *adapter = netdev_priv(netdev);
5f07b3c5
SP
1296 u16 q_idx = skb_get_queue_mapping(skb);
1297 struct be_tx_obj *txo = &adapter->tx_obj[q_idx];
804abcdb 1298 struct be_wrb_params wrb_params = { 0 };
804abcdb 1299 bool flush = !skb->xmit_more;
5f07b3c5 1300 u16 wrb_cnt;
ee9c799c 1301
804abcdb 1302 skb = be_xmit_workarounds(adapter, skb, &wrb_params);
5f07b3c5
SP
1303 if (unlikely(!skb))
1304 goto drop;
6b7c5b94 1305
804abcdb
SB
1306 be_get_wrb_params_from_skb(adapter, skb, &wrb_params);
1307
1308 wrb_cnt = be_xmit_enqueue(adapter, txo, skb, &wrb_params);
5f07b3c5
SP
1309 if (unlikely(!wrb_cnt)) {
1310 dev_kfree_skb_any(skb);
1311 goto drop;
1312 }
cd8f76c0 1313
760c295e
VD
1314 /* if os2bmc is enabled and if the pkt is destined to bmc,
1315 * enqueue the pkt a 2nd time with mgmt bit set.
1316 */
1317 if (be_send_pkt_to_bmc(adapter, &skb)) {
1318 BE_WRB_F_SET(wrb_params.features, OS2BMC, 1);
1319 wrb_cnt = be_xmit_enqueue(adapter, txo, skb, &wrb_params);
1320 if (unlikely(!wrb_cnt))
1321 goto drop;
1322 else
1323 skb_get(skb);
1324 }
1325
cf5671e6 1326 if (be_is_txq_full(txo)) {
5f07b3c5
SP
1327 netif_stop_subqueue(netdev, q_idx);
1328 tx_stats(txo)->tx_stops++;
1329 }
c190e3c8 1330
5f07b3c5
SP
1331 if (flush || __netif_subqueue_stopped(netdev, q_idx))
1332 be_xmit_flush(adapter, txo);
6b7c5b94 1333
5f07b3c5
SP
1334 return NETDEV_TX_OK;
1335drop:
1336 tx_stats(txo)->tx_drv_drops++;
1337 /* Flush the already enqueued tx requests */
1338 if (flush && txo->pend_wrb_cnt)
1339 be_xmit_flush(adapter, txo);
6b7c5b94 1340
6b7c5b94
SP
1341 return NETDEV_TX_OK;
1342}
1343
1344static int be_change_mtu(struct net_device *netdev, int new_mtu)
1345{
1346 struct be_adapter *adapter = netdev_priv(netdev);
0d3f5cce
KA
1347 struct device *dev = &adapter->pdev->dev;
1348
1349 if (new_mtu < BE_MIN_MTU || new_mtu > BE_MAX_MTU) {
1350 dev_info(dev, "MTU must be between %d and %d bytes\n",
1351 BE_MIN_MTU, BE_MAX_MTU);
6b7c5b94
SP
1352 return -EINVAL;
1353 }
0d3f5cce
KA
1354
1355 dev_info(dev, "MTU changed from %d to %d bytes\n",
748b539a 1356 netdev->mtu, new_mtu);
6b7c5b94
SP
1357 netdev->mtu = new_mtu;
1358 return 0;
1359}
1360
f66b7cfd
SP
1361static inline bool be_in_all_promisc(struct be_adapter *adapter)
1362{
1363 return (adapter->if_flags & BE_IF_FLAGS_ALL_PROMISCUOUS) ==
1364 BE_IF_FLAGS_ALL_PROMISCUOUS;
1365}
1366
1367static int be_set_vlan_promisc(struct be_adapter *adapter)
1368{
1369 struct device *dev = &adapter->pdev->dev;
1370 int status;
1371
1372 if (adapter->if_flags & BE_IF_FLAGS_VLAN_PROMISCUOUS)
1373 return 0;
1374
1375 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_VLAN_PROMISCUOUS, ON);
1376 if (!status) {
1377 dev_info(dev, "Enabled VLAN promiscuous mode\n");
1378 adapter->if_flags |= BE_IF_FLAGS_VLAN_PROMISCUOUS;
1379 } else {
1380 dev_err(dev, "Failed to enable VLAN promiscuous mode\n");
1381 }
1382 return status;
1383}
1384
1385static int be_clear_vlan_promisc(struct be_adapter *adapter)
1386{
1387 struct device *dev = &adapter->pdev->dev;
1388 int status;
1389
1390 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_VLAN_PROMISCUOUS, OFF);
1391 if (!status) {
1392 dev_info(dev, "Disabling VLAN promiscuous mode\n");
1393 adapter->if_flags &= ~BE_IF_FLAGS_VLAN_PROMISCUOUS;
1394 }
1395 return status;
1396}
1397
6b7c5b94 1398/*
82903e4b
AK
1399 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
1400 * If the user configures more, place BE in vlan promiscuous mode.
6b7c5b94 1401 */
10329df8 1402static int be_vid_config(struct be_adapter *adapter)
6b7c5b94 1403{
50762667 1404 struct device *dev = &adapter->pdev->dev;
10329df8 1405 u16 vids[BE_NUM_VLANS_SUPPORTED];
f6cbd364 1406 u16 num = 0, i = 0;
82903e4b 1407 int status = 0;
1da87b7f 1408
c0e64ef4 1409 /* No need to further configure vids if in promiscuous mode */
f66b7cfd 1410 if (be_in_all_promisc(adapter))
c0e64ef4
SP
1411 return 0;
1412
92bf14ab 1413 if (adapter->vlans_added > be_max_vlans(adapter))
f66b7cfd 1414 return be_set_vlan_promisc(adapter);
0fc16ebf
PR
1415
1416 /* Construct VLAN Table to give to HW */
f6cbd364
RN
1417 for_each_set_bit(i, adapter->vids, VLAN_N_VID)
1418 vids[num++] = cpu_to_le16(i);
0fc16ebf 1419
435452aa 1420 status = be_cmd_vlan_config(adapter, adapter->if_handle, vids, num, 0);
0fc16ebf 1421 if (status) {
f66b7cfd 1422 dev_err(dev, "Setting HW VLAN filtering failed\n");
d9d604f8 1423 /* Set to VLAN promisc mode as setting VLAN filter failed */
77be8c1c
KA
1424 if (addl_status(status) == MCC_ADDL_STATUS_INSUFFICIENT_VLANS ||
1425 addl_status(status) ==
4c60005f 1426 MCC_ADDL_STATUS_INSUFFICIENT_RESOURCES)
f66b7cfd
SP
1427 return be_set_vlan_promisc(adapter);
1428 } else if (adapter->if_flags & BE_IF_FLAGS_VLAN_PROMISCUOUS) {
1429 status = be_clear_vlan_promisc(adapter);
6b7c5b94 1430 }
0fc16ebf 1431 return status;
6b7c5b94
SP
1432}
1433
80d5c368 1434static int be_vlan_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
6b7c5b94
SP
1435{
1436 struct be_adapter *adapter = netdev_priv(netdev);
80817cbf 1437 int status = 0;
6b7c5b94 1438
a85e9986
PR
1439 /* Packets with VID 0 are always received by Lancer by default */
1440 if (lancer_chip(adapter) && vid == 0)
48291c22
VV
1441 return status;
1442
f6cbd364 1443 if (test_bit(vid, adapter->vids))
48291c22 1444 return status;
a85e9986 1445
f6cbd364 1446 set_bit(vid, adapter->vids);
a6b74e01 1447 adapter->vlans_added++;
8e586137 1448
a6b74e01
SK
1449 status = be_vid_config(adapter);
1450 if (status) {
1451 adapter->vlans_added--;
f6cbd364 1452 clear_bit(vid, adapter->vids);
a6b74e01 1453 }
48291c22 1454
80817cbf 1455 return status;
6b7c5b94
SP
1456}
1457
80d5c368 1458static int be_vlan_rem_vid(struct net_device *netdev, __be16 proto, u16 vid)
6b7c5b94
SP
1459{
1460 struct be_adapter *adapter = netdev_priv(netdev);
1461
a85e9986
PR
1462 /* Packets with VID 0 are always received by Lancer by default */
1463 if (lancer_chip(adapter) && vid == 0)
9d4dfe4a 1464 return 0;
a85e9986 1465
41dcdfbd
SB
1466 if (!test_bit(vid, adapter->vids))
1467 return 0;
1468
f6cbd364 1469 clear_bit(vid, adapter->vids);
9d4dfe4a
KA
1470 adapter->vlans_added--;
1471
1472 return be_vid_config(adapter);
6b7c5b94
SP
1473}
1474
f66b7cfd 1475static void be_clear_all_promisc(struct be_adapter *adapter)
7ad09458 1476{
ac34b743 1477 be_cmd_rx_filter(adapter, BE_IF_FLAGS_ALL_PROMISCUOUS, OFF);
f66b7cfd 1478 adapter->if_flags &= ~BE_IF_FLAGS_ALL_PROMISCUOUS;
7ad09458
S
1479}
1480
f66b7cfd
SP
1481static void be_set_all_promisc(struct be_adapter *adapter)
1482{
1483 be_cmd_rx_filter(adapter, BE_IF_FLAGS_ALL_PROMISCUOUS, ON);
1484 adapter->if_flags |= BE_IF_FLAGS_ALL_PROMISCUOUS;
1485}
1486
1487static void be_set_mc_promisc(struct be_adapter *adapter)
6b7c5b94 1488{
0fc16ebf 1489 int status;
6b7c5b94 1490
f66b7cfd
SP
1491 if (adapter->if_flags & BE_IF_FLAGS_MCAST_PROMISCUOUS)
1492 return;
6b7c5b94 1493
f66b7cfd
SP
1494 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_MCAST_PROMISCUOUS, ON);
1495 if (!status)
1496 adapter->if_flags |= BE_IF_FLAGS_MCAST_PROMISCUOUS;
1497}
1498
1499static void be_set_mc_list(struct be_adapter *adapter)
1500{
1501 int status;
1502
1503 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_MULTICAST, ON);
1504 if (!status)
1505 adapter->if_flags &= ~BE_IF_FLAGS_MCAST_PROMISCUOUS;
1506 else
1507 be_set_mc_promisc(adapter);
1508}
1509
1510static void be_set_uc_list(struct be_adapter *adapter)
1511{
1512 struct netdev_hw_addr *ha;
1513 int i = 1; /* First slot is claimed by the Primary MAC */
1514
1515 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++)
1516 be_cmd_pmac_del(adapter, adapter->if_handle,
1517 adapter->pmac_id[i], 0);
1518
1519 if (netdev_uc_count(adapter->netdev) > be_max_uc(adapter)) {
1520 be_set_all_promisc(adapter);
1521 return;
6b7c5b94
SP
1522 }
1523
f66b7cfd
SP
1524 netdev_for_each_uc_addr(ha, adapter->netdev) {
1525 adapter->uc_macs++; /* First slot is for Primary MAC */
1526 be_cmd_pmac_add(adapter, (u8 *)ha->addr, adapter->if_handle,
1527 &adapter->pmac_id[adapter->uc_macs], 0);
1528 }
1529}
6b7c5b94 1530
f66b7cfd
SP
1531static void be_clear_uc_list(struct be_adapter *adapter)
1532{
1533 int i;
fbc13f01 1534
f66b7cfd
SP
1535 for (i = 1; i < (adapter->uc_macs + 1); i++)
1536 be_cmd_pmac_del(adapter, adapter->if_handle,
1537 adapter->pmac_id[i], 0);
1538 adapter->uc_macs = 0;
1539}
fbc13f01 1540
f66b7cfd
SP
1541static void be_set_rx_mode(struct net_device *netdev)
1542{
1543 struct be_adapter *adapter = netdev_priv(netdev);
fbc13f01 1544
f66b7cfd
SP
1545 if (netdev->flags & IFF_PROMISC) {
1546 be_set_all_promisc(adapter);
1547 return;
fbc13f01
AK
1548 }
1549
f66b7cfd
SP
1550 /* Interface was previously in promiscuous mode; disable it */
1551 if (be_in_all_promisc(adapter)) {
1552 be_clear_all_promisc(adapter);
1553 if (adapter->vlans_added)
1554 be_vid_config(adapter);
0fc16ebf 1555 }
a0794885 1556
f66b7cfd
SP
1557 /* Enable multicast promisc if num configured exceeds what we support */
1558 if (netdev->flags & IFF_ALLMULTI ||
1559 netdev_mc_count(netdev) > be_max_mc(adapter)) {
1560 be_set_mc_promisc(adapter);
a0794885 1561 return;
f66b7cfd 1562 }
a0794885 1563
f66b7cfd
SP
1564 if (netdev_uc_count(netdev) != adapter->uc_macs)
1565 be_set_uc_list(adapter);
1566
1567 be_set_mc_list(adapter);
6b7c5b94
SP
1568}
1569
ba343c77
SB
1570static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
1571{
1572 struct be_adapter *adapter = netdev_priv(netdev);
11ac75ed 1573 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
ba343c77
SB
1574 int status;
1575
11ac75ed 1576 if (!sriov_enabled(adapter))
ba343c77
SB
1577 return -EPERM;
1578
11ac75ed 1579 if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
ba343c77
SB
1580 return -EINVAL;
1581
3c31aaf3
VV
1582 /* Proceed further only if user provided MAC is different
1583 * from active MAC
1584 */
1585 if (ether_addr_equal(mac, vf_cfg->mac_addr))
1586 return 0;
1587
3175d8c2
SP
1588 if (BEx_chip(adapter)) {
1589 be_cmd_pmac_del(adapter, vf_cfg->if_handle, vf_cfg->pmac_id,
1590 vf + 1);
ba343c77 1591
11ac75ed
SP
1592 status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
1593 &vf_cfg->pmac_id, vf + 1);
3175d8c2
SP
1594 } else {
1595 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
1596 vf + 1);
590c391d
PR
1597 }
1598
abccf23e
KA
1599 if (status) {
1600 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed: %#x",
1601 mac, vf, status);
1602 return be_cmd_status(status);
1603 }
64600ea5 1604
abccf23e
KA
1605 ether_addr_copy(vf_cfg->mac_addr, mac);
1606
1607 return 0;
ba343c77
SB
1608}
1609
64600ea5 1610static int be_get_vf_config(struct net_device *netdev, int vf,
748b539a 1611 struct ifla_vf_info *vi)
64600ea5
AK
1612{
1613 struct be_adapter *adapter = netdev_priv(netdev);
11ac75ed 1614 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
64600ea5 1615
11ac75ed 1616 if (!sriov_enabled(adapter))
64600ea5
AK
1617 return -EPERM;
1618
11ac75ed 1619 if (vf >= adapter->num_vfs)
64600ea5
AK
1620 return -EINVAL;
1621
1622 vi->vf = vf;
ed616689
SC
1623 vi->max_tx_rate = vf_cfg->tx_rate;
1624 vi->min_tx_rate = 0;
a60b3a13
AK
1625 vi->vlan = vf_cfg->vlan_tag & VLAN_VID_MASK;
1626 vi->qos = vf_cfg->vlan_tag >> VLAN_PRIO_SHIFT;
11ac75ed 1627 memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
bdce2ad7 1628 vi->linkstate = adapter->vf_cfg[vf].plink_tracking;
e7bcbd7b 1629 vi->spoofchk = adapter->vf_cfg[vf].spoofchk;
64600ea5
AK
1630
1631 return 0;
1632}
1633
435452aa
VV
1634static int be_set_vf_tvt(struct be_adapter *adapter, int vf, u16 vlan)
1635{
1636 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1637 u16 vids[BE_NUM_VLANS_SUPPORTED];
1638 int vf_if_id = vf_cfg->if_handle;
1639 int status;
1640
1641 /* Enable Transparent VLAN Tagging */
e7bcbd7b 1642 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1, vf_if_id, 0, 0);
435452aa
VV
1643 if (status)
1644 return status;
1645
1646 /* Clear pre-programmed VLAN filters on VF if any, if TVT is enabled */
1647 vids[0] = 0;
1648 status = be_cmd_vlan_config(adapter, vf_if_id, vids, 1, vf + 1);
1649 if (!status)
1650 dev_info(&adapter->pdev->dev,
1651 "Cleared guest VLANs on VF%d", vf);
1652
1653 /* After TVT is enabled, disallow VFs to program VLAN filters */
1654 if (vf_cfg->privileges & BE_PRIV_FILTMGMT) {
1655 status = be_cmd_set_fn_privileges(adapter, vf_cfg->privileges &
1656 ~BE_PRIV_FILTMGMT, vf + 1);
1657 if (!status)
1658 vf_cfg->privileges &= ~BE_PRIV_FILTMGMT;
1659 }
1660 return 0;
1661}
1662
1663static int be_clear_vf_tvt(struct be_adapter *adapter, int vf)
1664{
1665 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1666 struct device *dev = &adapter->pdev->dev;
1667 int status;
1668
1669 /* Reset Transparent VLAN Tagging. */
1670 status = be_cmd_set_hsw_config(adapter, BE_RESET_VLAN_TAG_ID, vf + 1,
e7bcbd7b 1671 vf_cfg->if_handle, 0, 0);
435452aa
VV
1672 if (status)
1673 return status;
1674
1675 /* Allow VFs to program VLAN filtering */
1676 if (!(vf_cfg->privileges & BE_PRIV_FILTMGMT)) {
1677 status = be_cmd_set_fn_privileges(adapter, vf_cfg->privileges |
1678 BE_PRIV_FILTMGMT, vf + 1);
1679 if (!status) {
1680 vf_cfg->privileges |= BE_PRIV_FILTMGMT;
1681 dev_info(dev, "VF%d: FILTMGMT priv enabled", vf);
1682 }
1683 }
1684
1685 dev_info(dev,
1686 "Disable/re-enable i/f in VM to clear Transparent VLAN tag");
1687 return 0;
1688}
1689
748b539a 1690static int be_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos)
1da87b7f
AK
1691{
1692 struct be_adapter *adapter = netdev_priv(netdev);
b9fc0e53 1693 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
435452aa 1694 int status;
1da87b7f 1695
11ac75ed 1696 if (!sriov_enabled(adapter))
1da87b7f
AK
1697 return -EPERM;
1698
b9fc0e53 1699 if (vf >= adapter->num_vfs || vlan > 4095 || qos > 7)
1da87b7f
AK
1700 return -EINVAL;
1701
b9fc0e53
AK
1702 if (vlan || qos) {
1703 vlan |= qos << VLAN_PRIO_SHIFT;
435452aa 1704 status = be_set_vf_tvt(adapter, vf, vlan);
1da87b7f 1705 } else {
435452aa 1706 status = be_clear_vf_tvt(adapter, vf);
1da87b7f
AK
1707 }
1708
abccf23e
KA
1709 if (status) {
1710 dev_err(&adapter->pdev->dev,
435452aa
VV
1711 "VLAN %d config on VF %d failed : %#x\n", vlan, vf,
1712 status);
abccf23e
KA
1713 return be_cmd_status(status);
1714 }
1715
1716 vf_cfg->vlan_tag = vlan;
abccf23e 1717 return 0;
1da87b7f
AK
1718}
1719
ed616689
SC
1720static int be_set_vf_tx_rate(struct net_device *netdev, int vf,
1721 int min_tx_rate, int max_tx_rate)
e1d18735
AK
1722{
1723 struct be_adapter *adapter = netdev_priv(netdev);
0f77ba73
RN
1724 struct device *dev = &adapter->pdev->dev;
1725 int percent_rate, status = 0;
1726 u16 link_speed = 0;
1727 u8 link_status;
e1d18735 1728
11ac75ed 1729 if (!sriov_enabled(adapter))
e1d18735
AK
1730 return -EPERM;
1731
94f434c2 1732 if (vf >= adapter->num_vfs)
e1d18735
AK
1733 return -EINVAL;
1734
ed616689
SC
1735 if (min_tx_rate)
1736 return -EINVAL;
1737
0f77ba73
RN
1738 if (!max_tx_rate)
1739 goto config_qos;
1740
1741 status = be_cmd_link_status_query(adapter, &link_speed,
1742 &link_status, 0);
1743 if (status)
1744 goto err;
1745
1746 if (!link_status) {
1747 dev_err(dev, "TX-rate setting not allowed when link is down\n");
940a3fcd 1748 status = -ENETDOWN;
0f77ba73
RN
1749 goto err;
1750 }
1751
1752 if (max_tx_rate < 100 || max_tx_rate > link_speed) {
1753 dev_err(dev, "TX-rate must be between 100 and %d Mbps\n",
1754 link_speed);
1755 status = -EINVAL;
1756 goto err;
1757 }
1758
1759 /* On Skyhawk the QOS setting must be done only as a % value */
1760 percent_rate = link_speed / 100;
1761 if (skyhawk_chip(adapter) && (max_tx_rate % percent_rate)) {
1762 dev_err(dev, "TX-rate must be a multiple of %d Mbps\n",
1763 percent_rate);
1764 status = -EINVAL;
1765 goto err;
94f434c2 1766 }
e1d18735 1767
0f77ba73
RN
1768config_qos:
1769 status = be_cmd_config_qos(adapter, max_tx_rate, link_speed, vf + 1);
e1d18735 1770 if (status)
0f77ba73
RN
1771 goto err;
1772
1773 adapter->vf_cfg[vf].tx_rate = max_tx_rate;
1774 return 0;
1775
1776err:
1777 dev_err(dev, "TX-rate setting of %dMbps on VF%d failed\n",
1778 max_tx_rate, vf);
abccf23e 1779 return be_cmd_status(status);
e1d18735 1780}
e2fb1afa 1781
bdce2ad7
SR
1782static int be_set_vf_link_state(struct net_device *netdev, int vf,
1783 int link_state)
1784{
1785 struct be_adapter *adapter = netdev_priv(netdev);
1786 int status;
1787
1788 if (!sriov_enabled(adapter))
1789 return -EPERM;
1790
1791 if (vf >= adapter->num_vfs)
1792 return -EINVAL;
1793
1794 status = be_cmd_set_logical_link_config(adapter, link_state, vf+1);
abccf23e
KA
1795 if (status) {
1796 dev_err(&adapter->pdev->dev,
1797 "Link state change on VF %d failed: %#x\n", vf, status);
1798 return be_cmd_status(status);
1799 }
bdce2ad7 1800
abccf23e
KA
1801 adapter->vf_cfg[vf].plink_tracking = link_state;
1802
1803 return 0;
bdce2ad7 1804}
e1d18735 1805
e7bcbd7b
KA
1806static int be_set_vf_spoofchk(struct net_device *netdev, int vf, bool enable)
1807{
1808 struct be_adapter *adapter = netdev_priv(netdev);
1809 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1810 u8 spoofchk;
1811 int status;
1812
1813 if (!sriov_enabled(adapter))
1814 return -EPERM;
1815
1816 if (vf >= adapter->num_vfs)
1817 return -EINVAL;
1818
1819 if (BEx_chip(adapter))
1820 return -EOPNOTSUPP;
1821
1822 if (enable == vf_cfg->spoofchk)
1823 return 0;
1824
1825 spoofchk = enable ? ENABLE_MAC_SPOOFCHK : DISABLE_MAC_SPOOFCHK;
1826
1827 status = be_cmd_set_hsw_config(adapter, 0, vf + 1, vf_cfg->if_handle,
1828 0, spoofchk);
1829 if (status) {
1830 dev_err(&adapter->pdev->dev,
1831 "Spoofchk change on VF %d failed: %#x\n", vf, status);
1832 return be_cmd_status(status);
1833 }
1834
1835 vf_cfg->spoofchk = enable;
1836 return 0;
1837}
1838
2632bafd
SP
1839static void be_aic_update(struct be_aic_obj *aic, u64 rx_pkts, u64 tx_pkts,
1840 ulong now)
6b7c5b94 1841{
2632bafd
SP
1842 aic->rx_pkts_prev = rx_pkts;
1843 aic->tx_reqs_prev = tx_pkts;
1844 aic->jiffies = now;
1845}
ac124ff9 1846
20947770 1847static int be_get_new_eqd(struct be_eq_obj *eqo)
2632bafd 1848{
20947770
PR
1849 struct be_adapter *adapter = eqo->adapter;
1850 int eqd, start;
2632bafd 1851 struct be_aic_obj *aic;
2632bafd
SP
1852 struct be_rx_obj *rxo;
1853 struct be_tx_obj *txo;
20947770 1854 u64 rx_pkts = 0, tx_pkts = 0;
2632bafd
SP
1855 ulong now;
1856 u32 pps, delta;
20947770 1857 int i;
10ef9ab4 1858
20947770
PR
1859 aic = &adapter->aic_obj[eqo->idx];
1860 if (!aic->enable) {
1861 if (aic->jiffies)
1862 aic->jiffies = 0;
1863 eqd = aic->et_eqd;
1864 return eqd;
1865 }
6b7c5b94 1866
20947770 1867 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
2632bafd 1868 do {
57a7744e 1869 start = u64_stats_fetch_begin_irq(&rxo->stats.sync);
20947770 1870 rx_pkts += rxo->stats.rx_pkts;
57a7744e 1871 } while (u64_stats_fetch_retry_irq(&rxo->stats.sync, start));
20947770 1872 }
10ef9ab4 1873
20947770 1874 for_all_tx_queues_on_eq(adapter, eqo, txo, i) {
2632bafd 1875 do {
57a7744e 1876 start = u64_stats_fetch_begin_irq(&txo->stats.sync);
20947770 1877 tx_pkts += txo->stats.tx_reqs;
57a7744e 1878 } while (u64_stats_fetch_retry_irq(&txo->stats.sync, start));
20947770 1879 }
6b7c5b94 1880
20947770
PR
1881 /* Skip, if wrapped around or first calculation */
1882 now = jiffies;
1883 if (!aic->jiffies || time_before(now, aic->jiffies) ||
1884 rx_pkts < aic->rx_pkts_prev ||
1885 tx_pkts < aic->tx_reqs_prev) {
1886 be_aic_update(aic, rx_pkts, tx_pkts, now);
1887 return aic->prev_eqd;
1888 }
2632bafd 1889
20947770
PR
1890 delta = jiffies_to_msecs(now - aic->jiffies);
1891 if (delta == 0)
1892 return aic->prev_eqd;
10ef9ab4 1893
20947770
PR
1894 pps = (((u32)(rx_pkts - aic->rx_pkts_prev) * 1000) / delta) +
1895 (((u32)(tx_pkts - aic->tx_reqs_prev) * 1000) / delta);
1896 eqd = (pps / 15000) << 2;
2632bafd 1897
20947770
PR
1898 if (eqd < 8)
1899 eqd = 0;
1900 eqd = min_t(u32, eqd, aic->max_eqd);
1901 eqd = max_t(u32, eqd, aic->min_eqd);
1902
1903 be_aic_update(aic, rx_pkts, tx_pkts, now);
1904
1905 return eqd;
1906}
1907
1908/* For Skyhawk-R only */
1909static u32 be_get_eq_delay_mult_enc(struct be_eq_obj *eqo)
1910{
1911 struct be_adapter *adapter = eqo->adapter;
1912 struct be_aic_obj *aic = &adapter->aic_obj[eqo->idx];
1913 ulong now = jiffies;
1914 int eqd;
1915 u32 mult_enc;
1916
1917 if (!aic->enable)
1918 return 0;
1919
3c0d49aa 1920 if (jiffies_to_msecs(now - aic->jiffies) < 1)
20947770
PR
1921 eqd = aic->prev_eqd;
1922 else
1923 eqd = be_get_new_eqd(eqo);
1924
1925 if (eqd > 100)
1926 mult_enc = R2I_DLY_ENC_1;
1927 else if (eqd > 60)
1928 mult_enc = R2I_DLY_ENC_2;
1929 else if (eqd > 20)
1930 mult_enc = R2I_DLY_ENC_3;
1931 else
1932 mult_enc = R2I_DLY_ENC_0;
1933
1934 aic->prev_eqd = eqd;
1935
1936 return mult_enc;
1937}
1938
1939void be_eqd_update(struct be_adapter *adapter, bool force_update)
1940{
1941 struct be_set_eqd set_eqd[MAX_EVT_QS];
1942 struct be_aic_obj *aic;
1943 struct be_eq_obj *eqo;
1944 int i, num = 0, eqd;
1945
1946 for_all_evt_queues(adapter, eqo, i) {
1947 aic = &adapter->aic_obj[eqo->idx];
1948 eqd = be_get_new_eqd(eqo);
1949 if (force_update || eqd != aic->prev_eqd) {
2632bafd
SP
1950 set_eqd[num].delay_multiplier = (eqd * 65)/100;
1951 set_eqd[num].eq_id = eqo->q.id;
1952 aic->prev_eqd = eqd;
1953 num++;
1954 }
ac124ff9 1955 }
2632bafd
SP
1956
1957 if (num)
1958 be_cmd_modify_eqd(adapter, set_eqd, num);
6b7c5b94
SP
1959}
1960
3abcdeda 1961static void be_rx_stats_update(struct be_rx_obj *rxo,
748b539a 1962 struct be_rx_compl_info *rxcp)
4097f663 1963{
ac124ff9 1964 struct be_rx_stats *stats = rx_stats(rxo);
1ef78abe 1965
ab1594e9 1966 u64_stats_update_begin(&stats->sync);
3abcdeda 1967 stats->rx_compl++;
2e588f84 1968 stats->rx_bytes += rxcp->pkt_size;
3abcdeda 1969 stats->rx_pkts++;
8670f2a5
SB
1970 if (rxcp->tunneled)
1971 stats->rx_vxlan_offload_pkts++;
2e588f84 1972 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
3abcdeda 1973 stats->rx_mcast_pkts++;
2e588f84 1974 if (rxcp->err)
ac124ff9 1975 stats->rx_compl_err++;
ab1594e9 1976 u64_stats_update_end(&stats->sync);
4097f663
SP
1977}
1978
2e588f84 1979static inline bool csum_passed(struct be_rx_compl_info *rxcp)
728a9972 1980{
19fad86f 1981 /* L4 checksum is not reliable for non TCP/UDP packets.
c9c47142
SP
1982 * Also ignore ipcksm for ipv6 pkts
1983 */
2e588f84 1984 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
c9c47142 1985 (rxcp->ip_csum || rxcp->ipv6) && !rxcp->err;
728a9972
AK
1986}
1987
0b0ef1d0 1988static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo)
6b7c5b94 1989{
10ef9ab4 1990 struct be_adapter *adapter = rxo->adapter;
6b7c5b94 1991 struct be_rx_page_info *rx_page_info;
3abcdeda 1992 struct be_queue_info *rxq = &rxo->q;
b0fd2eb2 1993 u32 frag_idx = rxq->tail;
6b7c5b94 1994
3abcdeda 1995 rx_page_info = &rxo->page_info_tbl[frag_idx];
6b7c5b94
SP
1996 BUG_ON(!rx_page_info->page);
1997
e50287be 1998 if (rx_page_info->last_frag) {
2b7bcebf
IV
1999 dma_unmap_page(&adapter->pdev->dev,
2000 dma_unmap_addr(rx_page_info, bus),
2001 adapter->big_page_size, DMA_FROM_DEVICE);
e50287be
SP
2002 rx_page_info->last_frag = false;
2003 } else {
2004 dma_sync_single_for_cpu(&adapter->pdev->dev,
2005 dma_unmap_addr(rx_page_info, bus),
2006 rx_frag_size, DMA_FROM_DEVICE);
205859a2 2007 }
6b7c5b94 2008
0b0ef1d0 2009 queue_tail_inc(rxq);
6b7c5b94
SP
2010 atomic_dec(&rxq->used);
2011 return rx_page_info;
2012}
2013
2014/* Throwaway the data in the Rx completion */
10ef9ab4
SP
2015static void be_rx_compl_discard(struct be_rx_obj *rxo,
2016 struct be_rx_compl_info *rxcp)
6b7c5b94 2017{
6b7c5b94 2018 struct be_rx_page_info *page_info;
2e588f84 2019 u16 i, num_rcvd = rxcp->num_rcvd;
6b7c5b94 2020
e80d9da6 2021 for (i = 0; i < num_rcvd; i++) {
0b0ef1d0 2022 page_info = get_rx_page_info(rxo);
e80d9da6
PR
2023 put_page(page_info->page);
2024 memset(page_info, 0, sizeof(*page_info));
6b7c5b94
SP
2025 }
2026}
2027
2028/*
2029 * skb_fill_rx_data forms a complete skb for an ether frame
2030 * indicated by rxcp.
2031 */
10ef9ab4
SP
2032static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
2033 struct be_rx_compl_info *rxcp)
6b7c5b94 2034{
6b7c5b94 2035 struct be_rx_page_info *page_info;
2e588f84
SP
2036 u16 i, j;
2037 u16 hdr_len, curr_frag_len, remaining;
6b7c5b94 2038 u8 *start;
6b7c5b94 2039
0b0ef1d0 2040 page_info = get_rx_page_info(rxo);
6b7c5b94
SP
2041 start = page_address(page_info->page) + page_info->page_offset;
2042 prefetch(start);
2043
2044 /* Copy data in the first descriptor of this completion */
2e588f84 2045 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
6b7c5b94 2046
6b7c5b94
SP
2047 skb->len = curr_frag_len;
2048 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
ac1ae5f3 2049 memcpy(skb->data, start, curr_frag_len);
6b7c5b94
SP
2050 /* Complete packet has now been moved to data */
2051 put_page(page_info->page);
2052 skb->data_len = 0;
2053 skb->tail += curr_frag_len;
2054 } else {
ac1ae5f3
ED
2055 hdr_len = ETH_HLEN;
2056 memcpy(skb->data, start, hdr_len);
6b7c5b94 2057 skb_shinfo(skb)->nr_frags = 1;
b061b39e 2058 skb_frag_set_page(skb, 0, page_info->page);
6b7c5b94
SP
2059 skb_shinfo(skb)->frags[0].page_offset =
2060 page_info->page_offset + hdr_len;
748b539a
SP
2061 skb_frag_size_set(&skb_shinfo(skb)->frags[0],
2062 curr_frag_len - hdr_len);
6b7c5b94 2063 skb->data_len = curr_frag_len - hdr_len;
bdb28a97 2064 skb->truesize += rx_frag_size;
6b7c5b94
SP
2065 skb->tail += hdr_len;
2066 }
205859a2 2067 page_info->page = NULL;
6b7c5b94 2068
2e588f84
SP
2069 if (rxcp->pkt_size <= rx_frag_size) {
2070 BUG_ON(rxcp->num_rcvd != 1);
2071 return;
6b7c5b94
SP
2072 }
2073
2074 /* More frags present for this completion */
2e588f84
SP
2075 remaining = rxcp->pkt_size - curr_frag_len;
2076 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
0b0ef1d0 2077 page_info = get_rx_page_info(rxo);
2e588f84 2078 curr_frag_len = min(remaining, rx_frag_size);
6b7c5b94 2079
bd46cb6c
AK
2080 /* Coalesce all frags from the same physical page in one slot */
2081 if (page_info->page_offset == 0) {
2082 /* Fresh page */
2083 j++;
b061b39e 2084 skb_frag_set_page(skb, j, page_info->page);
bd46cb6c
AK
2085 skb_shinfo(skb)->frags[j].page_offset =
2086 page_info->page_offset;
9e903e08 2087 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
bd46cb6c
AK
2088 skb_shinfo(skb)->nr_frags++;
2089 } else {
2090 put_page(page_info->page);
2091 }
2092
9e903e08 2093 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
6b7c5b94
SP
2094 skb->len += curr_frag_len;
2095 skb->data_len += curr_frag_len;
bdb28a97 2096 skb->truesize += rx_frag_size;
2e588f84 2097 remaining -= curr_frag_len;
205859a2 2098 page_info->page = NULL;
6b7c5b94 2099 }
bd46cb6c 2100 BUG_ON(j > MAX_SKB_FRAGS);
6b7c5b94
SP
2101}
2102
5be93b9a 2103/* Process the RX completion indicated by rxcp when GRO is disabled */
6384a4d0 2104static void be_rx_compl_process(struct be_rx_obj *rxo, struct napi_struct *napi,
10ef9ab4 2105 struct be_rx_compl_info *rxcp)
6b7c5b94 2106{
10ef9ab4 2107 struct be_adapter *adapter = rxo->adapter;
6332c8d3 2108 struct net_device *netdev = adapter->netdev;
6b7c5b94 2109 struct sk_buff *skb;
89420424 2110
bb349bb4 2111 skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE);
a058a632 2112 if (unlikely(!skb)) {
ac124ff9 2113 rx_stats(rxo)->rx_drops_no_skbs++;
10ef9ab4 2114 be_rx_compl_discard(rxo, rxcp);
6b7c5b94
SP
2115 return;
2116 }
2117
10ef9ab4 2118 skb_fill_rx_data(rxo, skb, rxcp);
6b7c5b94 2119
6332c8d3 2120 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
728a9972 2121 skb->ip_summed = CHECKSUM_UNNECESSARY;
c6ce2f4b
SK
2122 else
2123 skb_checksum_none_assert(skb);
6b7c5b94 2124
6332c8d3 2125 skb->protocol = eth_type_trans(skb, netdev);
aaa6daec 2126 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
10ef9ab4 2127 if (netdev->features & NETIF_F_RXHASH)
d2464c8c 2128 skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
c9c47142 2129
b6c0e89d 2130 skb->csum_level = rxcp->tunneled;
6384a4d0 2131 skb_mark_napi_id(skb, napi);
6b7c5b94 2132
343e43c0 2133 if (rxcp->vlanf)
86a9bad3 2134 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
4c5102f9
AK
2135
2136 netif_receive_skb(skb);
6b7c5b94
SP
2137}
2138
5be93b9a 2139/* Process the RX completion indicated by rxcp when GRO is enabled */
4188e7df
JH
2140static void be_rx_compl_process_gro(struct be_rx_obj *rxo,
2141 struct napi_struct *napi,
2142 struct be_rx_compl_info *rxcp)
6b7c5b94 2143{
10ef9ab4 2144 struct be_adapter *adapter = rxo->adapter;
6b7c5b94 2145 struct be_rx_page_info *page_info;
5be93b9a 2146 struct sk_buff *skb = NULL;
2e588f84
SP
2147 u16 remaining, curr_frag_len;
2148 u16 i, j;
3968fa1e 2149
10ef9ab4 2150 skb = napi_get_frags(napi);
5be93b9a 2151 if (!skb) {
10ef9ab4 2152 be_rx_compl_discard(rxo, rxcp);
5be93b9a
AK
2153 return;
2154 }
2155
2e588f84
SP
2156 remaining = rxcp->pkt_size;
2157 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
0b0ef1d0 2158 page_info = get_rx_page_info(rxo);
6b7c5b94
SP
2159
2160 curr_frag_len = min(remaining, rx_frag_size);
2161
bd46cb6c
AK
2162 /* Coalesce all frags from the same physical page in one slot */
2163 if (i == 0 || page_info->page_offset == 0) {
2164 /* First frag or Fresh page */
2165 j++;
b061b39e 2166 skb_frag_set_page(skb, j, page_info->page);
5be93b9a
AK
2167 skb_shinfo(skb)->frags[j].page_offset =
2168 page_info->page_offset;
9e903e08 2169 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
bd46cb6c
AK
2170 } else {
2171 put_page(page_info->page);
2172 }
9e903e08 2173 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
bdb28a97 2174 skb->truesize += rx_frag_size;
bd46cb6c 2175 remaining -= curr_frag_len;
6b7c5b94
SP
2176 memset(page_info, 0, sizeof(*page_info));
2177 }
bd46cb6c 2178 BUG_ON(j > MAX_SKB_FRAGS);
6b7c5b94 2179
5be93b9a 2180 skb_shinfo(skb)->nr_frags = j + 1;
2e588f84
SP
2181 skb->len = rxcp->pkt_size;
2182 skb->data_len = rxcp->pkt_size;
5be93b9a 2183 skb->ip_summed = CHECKSUM_UNNECESSARY;
aaa6daec 2184 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
4b972914 2185 if (adapter->netdev->features & NETIF_F_RXHASH)
d2464c8c 2186 skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
c9c47142 2187
b6c0e89d 2188 skb->csum_level = rxcp->tunneled;
5be93b9a 2189
343e43c0 2190 if (rxcp->vlanf)
86a9bad3 2191 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
4c5102f9 2192
10ef9ab4 2193 napi_gro_frags(napi);
2e588f84
SP
2194}
2195
10ef9ab4
SP
2196static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
2197 struct be_rx_compl_info *rxcp)
2e588f84 2198{
c3c18bc1
SP
2199 rxcp->pkt_size = GET_RX_COMPL_V1_BITS(pktsize, compl);
2200 rxcp->vlanf = GET_RX_COMPL_V1_BITS(vtp, compl);
2201 rxcp->err = GET_RX_COMPL_V1_BITS(err, compl);
2202 rxcp->tcpf = GET_RX_COMPL_V1_BITS(tcpf, compl);
2203 rxcp->udpf = GET_RX_COMPL_V1_BITS(udpf, compl);
2204 rxcp->ip_csum = GET_RX_COMPL_V1_BITS(ipcksm, compl);
2205 rxcp->l4_csum = GET_RX_COMPL_V1_BITS(l4_cksm, compl);
2206 rxcp->ipv6 = GET_RX_COMPL_V1_BITS(ip_version, compl);
2207 rxcp->num_rcvd = GET_RX_COMPL_V1_BITS(numfrags, compl);
2208 rxcp->pkt_type = GET_RX_COMPL_V1_BITS(cast_enc, compl);
2209 rxcp->rss_hash = GET_RX_COMPL_V1_BITS(rsshash, compl);
15d72184 2210 if (rxcp->vlanf) {
c3c18bc1
SP
2211 rxcp->qnq = GET_RX_COMPL_V1_BITS(qnq, compl);
2212 rxcp->vlan_tag = GET_RX_COMPL_V1_BITS(vlan_tag, compl);
15d72184 2213 }
c3c18bc1 2214 rxcp->port = GET_RX_COMPL_V1_BITS(port, compl);
c9c47142 2215 rxcp->tunneled =
c3c18bc1 2216 GET_RX_COMPL_V1_BITS(tunneled, compl);
2e588f84
SP
2217}
2218
10ef9ab4
SP
2219static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
2220 struct be_rx_compl_info *rxcp)
2e588f84 2221{
c3c18bc1
SP
2222 rxcp->pkt_size = GET_RX_COMPL_V0_BITS(pktsize, compl);
2223 rxcp->vlanf = GET_RX_COMPL_V0_BITS(vtp, compl);
2224 rxcp->err = GET_RX_COMPL_V0_BITS(err, compl);
2225 rxcp->tcpf = GET_RX_COMPL_V0_BITS(tcpf, compl);
2226 rxcp->udpf = GET_RX_COMPL_V0_BITS(udpf, compl);
2227 rxcp->ip_csum = GET_RX_COMPL_V0_BITS(ipcksm, compl);
2228 rxcp->l4_csum = GET_RX_COMPL_V0_BITS(l4_cksm, compl);
2229 rxcp->ipv6 = GET_RX_COMPL_V0_BITS(ip_version, compl);
2230 rxcp->num_rcvd = GET_RX_COMPL_V0_BITS(numfrags, compl);
2231 rxcp->pkt_type = GET_RX_COMPL_V0_BITS(cast_enc, compl);
2232 rxcp->rss_hash = GET_RX_COMPL_V0_BITS(rsshash, compl);
15d72184 2233 if (rxcp->vlanf) {
c3c18bc1
SP
2234 rxcp->qnq = GET_RX_COMPL_V0_BITS(qnq, compl);
2235 rxcp->vlan_tag = GET_RX_COMPL_V0_BITS(vlan_tag, compl);
15d72184 2236 }
c3c18bc1
SP
2237 rxcp->port = GET_RX_COMPL_V0_BITS(port, compl);
2238 rxcp->ip_frag = GET_RX_COMPL_V0_BITS(ip_frag, compl);
2e588f84
SP
2239}
2240
2241static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
2242{
2243 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
2244 struct be_rx_compl_info *rxcp = &rxo->rxcp;
2245 struct be_adapter *adapter = rxo->adapter;
6b7c5b94 2246
2e588f84
SP
2247 /* For checking the valid bit it is Ok to use either definition as the
2248 * valid bit is at the same position in both v0 and v1 Rx compl */
2249 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
2250 return NULL;
6b7c5b94 2251
2e588f84
SP
2252 rmb();
2253 be_dws_le_to_cpu(compl, sizeof(*compl));
6b7c5b94 2254
2e588f84 2255 if (adapter->be3_native)
10ef9ab4 2256 be_parse_rx_compl_v1(compl, rxcp);
2e588f84 2257 else
10ef9ab4 2258 be_parse_rx_compl_v0(compl, rxcp);
6b7c5b94 2259
e38b1706
SK
2260 if (rxcp->ip_frag)
2261 rxcp->l4_csum = 0;
2262
15d72184 2263 if (rxcp->vlanf) {
f93f160b
VV
2264 /* In QNQ modes, if qnq bit is not set, then the packet was
2265 * tagged only with the transparent outer vlan-tag and must
2266 * not be treated as a vlan packet by host
2267 */
2268 if (be_is_qnq_mode(adapter) && !rxcp->qnq)
15d72184 2269 rxcp->vlanf = 0;
6b7c5b94 2270
15d72184 2271 if (!lancer_chip(adapter))
3c709f8f 2272 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
6b7c5b94 2273
939cf306 2274 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
f6cbd364 2275 !test_bit(rxcp->vlan_tag, adapter->vids))
15d72184
SP
2276 rxcp->vlanf = 0;
2277 }
2e588f84
SP
2278
2279 /* As the compl has been parsed, reset it; we wont touch it again */
2280 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
6b7c5b94 2281
3abcdeda 2282 queue_tail_inc(&rxo->cq);
6b7c5b94
SP
2283 return rxcp;
2284}
2285
1829b086 2286static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
6b7c5b94 2287{
6b7c5b94 2288 u32 order = get_order(size);
1829b086 2289
6b7c5b94 2290 if (order > 0)
1829b086
ED
2291 gfp |= __GFP_COMP;
2292 return alloc_pages(gfp, order);
6b7c5b94
SP
2293}
2294
2295/*
2296 * Allocate a page, split it to fragments of size rx_frag_size and post as
2297 * receive buffers to BE
2298 */
c30d7266 2299static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp, u32 frags_needed)
6b7c5b94 2300{
3abcdeda 2301 struct be_adapter *adapter = rxo->adapter;
26d92f92 2302 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
3abcdeda 2303 struct be_queue_info *rxq = &rxo->q;
6b7c5b94 2304 struct page *pagep = NULL;
ba42fad0 2305 struct device *dev = &adapter->pdev->dev;
6b7c5b94
SP
2306 struct be_eth_rx_d *rxd;
2307 u64 page_dmaaddr = 0, frag_dmaaddr;
c30d7266 2308 u32 posted, page_offset = 0, notify = 0;
6b7c5b94 2309
3abcdeda 2310 page_info = &rxo->page_info_tbl[rxq->head];
c30d7266 2311 for (posted = 0; posted < frags_needed && !page_info->page; posted++) {
6b7c5b94 2312 if (!pagep) {
1829b086 2313 pagep = be_alloc_pages(adapter->big_page_size, gfp);
6b7c5b94 2314 if (unlikely(!pagep)) {
ac124ff9 2315 rx_stats(rxo)->rx_post_fail++;
6b7c5b94
SP
2316 break;
2317 }
ba42fad0
IV
2318 page_dmaaddr = dma_map_page(dev, pagep, 0,
2319 adapter->big_page_size,
2b7bcebf 2320 DMA_FROM_DEVICE);
ba42fad0
IV
2321 if (dma_mapping_error(dev, page_dmaaddr)) {
2322 put_page(pagep);
2323 pagep = NULL;
d3de1540 2324 adapter->drv_stats.dma_map_errors++;
ba42fad0
IV
2325 break;
2326 }
e50287be 2327 page_offset = 0;
6b7c5b94
SP
2328 } else {
2329 get_page(pagep);
e50287be 2330 page_offset += rx_frag_size;
6b7c5b94 2331 }
e50287be 2332 page_info->page_offset = page_offset;
6b7c5b94 2333 page_info->page = pagep;
6b7c5b94
SP
2334
2335 rxd = queue_head_node(rxq);
e50287be 2336 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
6b7c5b94
SP
2337 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
2338 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
6b7c5b94
SP
2339
2340 /* Any space left in the current big page for another frag? */
2341 if ((page_offset + rx_frag_size + rx_frag_size) >
2342 adapter->big_page_size) {
2343 pagep = NULL;
e50287be
SP
2344 page_info->last_frag = true;
2345 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
2346 } else {
2347 dma_unmap_addr_set(page_info, bus, frag_dmaaddr);
6b7c5b94 2348 }
26d92f92
SP
2349
2350 prev_page_info = page_info;
2351 queue_head_inc(rxq);
10ef9ab4 2352 page_info = &rxo->page_info_tbl[rxq->head];
6b7c5b94 2353 }
e50287be
SP
2354
2355 /* Mark the last frag of a page when we break out of the above loop
2356 * with no more slots available in the RXQ
2357 */
2358 if (pagep) {
2359 prev_page_info->last_frag = true;
2360 dma_unmap_addr_set(prev_page_info, bus, page_dmaaddr);
2361 }
6b7c5b94
SP
2362
2363 if (posted) {
6b7c5b94 2364 atomic_add(posted, &rxq->used);
6384a4d0
SP
2365 if (rxo->rx_post_starved)
2366 rxo->rx_post_starved = false;
c30d7266 2367 do {
69304cc9 2368 notify = min(MAX_NUM_POST_ERX_DB, posted);
c30d7266
AK
2369 be_rxq_notify(adapter, rxq->id, notify);
2370 posted -= notify;
2371 } while (posted);
ea1dae11
SP
2372 } else if (atomic_read(&rxq->used) == 0) {
2373 /* Let be_worker replenish when memory is available */
3abcdeda 2374 rxo->rx_post_starved = true;
6b7c5b94 2375 }
6b7c5b94
SP
2376}
2377
152ffe5b 2378static struct be_tx_compl_info *be_tx_compl_get(struct be_tx_obj *txo)
6b7c5b94 2379{
152ffe5b
SB
2380 struct be_queue_info *tx_cq = &txo->cq;
2381 struct be_tx_compl_info *txcp = &txo->txcp;
2382 struct be_eth_tx_compl *compl = queue_tail_node(tx_cq);
6b7c5b94 2383
152ffe5b 2384 if (compl->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
6b7c5b94
SP
2385 return NULL;
2386
152ffe5b 2387 /* Ensure load ordering of valid bit dword and other dwords below */
f3eb62d2 2388 rmb();
152ffe5b 2389 be_dws_le_to_cpu(compl, sizeof(*compl));
6b7c5b94 2390
152ffe5b
SB
2391 txcp->status = GET_TX_COMPL_BITS(status, compl);
2392 txcp->end_index = GET_TX_COMPL_BITS(wrb_index, compl);
6b7c5b94 2393
152ffe5b 2394 compl->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
6b7c5b94
SP
2395 queue_tail_inc(tx_cq);
2396 return txcp;
2397}
2398
3c8def97 2399static u16 be_tx_compl_process(struct be_adapter *adapter,
748b539a 2400 struct be_tx_obj *txo, u16 last_index)
6b7c5b94 2401{
5f07b3c5 2402 struct sk_buff **sent_skbs = txo->sent_skb_list;
3c8def97 2403 struct be_queue_info *txq = &txo->q;
5f07b3c5
SP
2404 struct sk_buff *skb = NULL;
2405 bool unmap_skb_hdr = false;
a73b796e 2406 struct be_eth_wrb *wrb;
b0fd2eb2 2407 u16 num_wrbs = 0;
2408 u32 frag_index;
6b7c5b94 2409
ec43b1a6 2410 do {
5f07b3c5
SP
2411 if (sent_skbs[txq->tail]) {
2412 /* Free skb from prev req */
2413 if (skb)
2414 dev_consume_skb_any(skb);
2415 skb = sent_skbs[txq->tail];
2416 sent_skbs[txq->tail] = NULL;
2417 queue_tail_inc(txq); /* skip hdr wrb */
2418 num_wrbs++;
2419 unmap_skb_hdr = true;
2420 }
a73b796e 2421 wrb = queue_tail_node(txq);
5f07b3c5 2422 frag_index = txq->tail;
2b7bcebf 2423 unmap_tx_frag(&adapter->pdev->dev, wrb,
5f07b3c5 2424 (unmap_skb_hdr && skb_headlen(skb)));
ec43b1a6 2425 unmap_skb_hdr = false;
6b7c5b94 2426 queue_tail_inc(txq);
5f07b3c5
SP
2427 num_wrbs++;
2428 } while (frag_index != last_index);
2429 dev_consume_skb_any(skb);
6b7c5b94 2430
4d586b82 2431 return num_wrbs;
6b7c5b94
SP
2432}
2433
10ef9ab4
SP
2434/* Return the number of events in the event queue */
2435static inline int events_get(struct be_eq_obj *eqo)
859b1e4e 2436{
10ef9ab4
SP
2437 struct be_eq_entry *eqe;
2438 int num = 0;
859b1e4e 2439
10ef9ab4
SP
2440 do {
2441 eqe = queue_tail_node(&eqo->q);
2442 if (eqe->evt == 0)
2443 break;
859b1e4e 2444
10ef9ab4
SP
2445 rmb();
2446 eqe->evt = 0;
2447 num++;
2448 queue_tail_inc(&eqo->q);
2449 } while (true);
2450
2451 return num;
859b1e4e
SP
2452}
2453
10ef9ab4
SP
2454/* Leaves the EQ is disarmed state */
2455static void be_eq_clean(struct be_eq_obj *eqo)
859b1e4e 2456{
10ef9ab4 2457 int num = events_get(eqo);
859b1e4e 2458
20947770 2459 be_eq_notify(eqo->adapter, eqo->q.id, false, true, num, 0);
859b1e4e
SP
2460}
2461
99b44304
KA
2462/* Free posted rx buffers that were not used */
2463static void be_rxq_clean(struct be_rx_obj *rxo)
6b7c5b94 2464{
3abcdeda 2465 struct be_queue_info *rxq = &rxo->q;
99b44304
KA
2466 struct be_rx_page_info *page_info;
2467
2468 while (atomic_read(&rxq->used) > 0) {
2469 page_info = get_rx_page_info(rxo);
2470 put_page(page_info->page);
2471 memset(page_info, 0, sizeof(*page_info));
2472 }
2473 BUG_ON(atomic_read(&rxq->used));
2474 rxq->tail = 0;
2475 rxq->head = 0;
2476}
2477
2478static void be_rx_cq_clean(struct be_rx_obj *rxo)
2479{
3abcdeda 2480 struct be_queue_info *rx_cq = &rxo->cq;
2e588f84 2481 struct be_rx_compl_info *rxcp;
d23e946c
SP
2482 struct be_adapter *adapter = rxo->adapter;
2483 int flush_wait = 0;
6b7c5b94 2484
d23e946c
SP
2485 /* Consume pending rx completions.
2486 * Wait for the flush completion (identified by zero num_rcvd)
2487 * to arrive. Notify CQ even when there are no more CQ entries
2488 * for HW to flush partially coalesced CQ entries.
2489 * In Lancer, there is no need to wait for flush compl.
2490 */
2491 for (;;) {
2492 rxcp = be_rx_compl_get(rxo);
ddf1169f 2493 if (!rxcp) {
d23e946c
SP
2494 if (lancer_chip(adapter))
2495 break;
2496
954f6825
VD
2497 if (flush_wait++ > 50 ||
2498 be_check_error(adapter,
2499 BE_ERROR_HW)) {
d23e946c
SP
2500 dev_warn(&adapter->pdev->dev,
2501 "did not receive flush compl\n");
2502 break;
2503 }
2504 be_cq_notify(adapter, rx_cq->id, true, 0);
2505 mdelay(1);
2506 } else {
2507 be_rx_compl_discard(rxo, rxcp);
3f5dffe6 2508 be_cq_notify(adapter, rx_cq->id, false, 1);
d23e946c
SP
2509 if (rxcp->num_rcvd == 0)
2510 break;
2511 }
6b7c5b94
SP
2512 }
2513
d23e946c
SP
2514 /* After cleanup, leave the CQ in unarmed state */
2515 be_cq_notify(adapter, rx_cq->id, false, 0);
6b7c5b94
SP
2516}
2517
0ae57bb3 2518static void be_tx_compl_clean(struct be_adapter *adapter)
6b7c5b94 2519{
5f07b3c5 2520 struct device *dev = &adapter->pdev->dev;
b0fd2eb2 2521 u16 cmpl = 0, timeo = 0, num_wrbs = 0;
152ffe5b 2522 struct be_tx_compl_info *txcp;
0ae57bb3 2523 struct be_queue_info *txq;
b0fd2eb2 2524 u32 end_idx, notified_idx;
152ffe5b 2525 struct be_tx_obj *txo;
0ae57bb3 2526 int i, pending_txqs;
a8e9179a 2527
1a3d0717 2528 /* Stop polling for compls when HW has been silent for 10ms */
a8e9179a 2529 do {
0ae57bb3
SP
2530 pending_txqs = adapter->num_tx_qs;
2531
2532 for_all_tx_queues(adapter, txo, i) {
1a3d0717
VV
2533 cmpl = 0;
2534 num_wrbs = 0;
0ae57bb3 2535 txq = &txo->q;
152ffe5b
SB
2536 while ((txcp = be_tx_compl_get(txo))) {
2537 num_wrbs +=
2538 be_tx_compl_process(adapter, txo,
2539 txcp->end_index);
0ae57bb3
SP
2540 cmpl++;
2541 }
2542 if (cmpl) {
2543 be_cq_notify(adapter, txo->cq.id, false, cmpl);
2544 atomic_sub(num_wrbs, &txq->used);
1a3d0717 2545 timeo = 0;
0ae57bb3 2546 }
cf5671e6 2547 if (!be_is_tx_compl_pending(txo))
0ae57bb3 2548 pending_txqs--;
a8e9179a
SP
2549 }
2550
954f6825
VD
2551 if (pending_txqs == 0 || ++timeo > 10 ||
2552 be_check_error(adapter, BE_ERROR_HW))
a8e9179a
SP
2553 break;
2554
2555 mdelay(1);
2556 } while (true);
2557
5f07b3c5 2558 /* Free enqueued TX that was never notified to HW */
0ae57bb3
SP
2559 for_all_tx_queues(adapter, txo, i) {
2560 txq = &txo->q;
0ae57bb3 2561
5f07b3c5
SP
2562 if (atomic_read(&txq->used)) {
2563 dev_info(dev, "txq%d: cleaning %d pending tx-wrbs\n",
2564 i, atomic_read(&txq->used));
2565 notified_idx = txq->tail;
0ae57bb3 2566 end_idx = txq->tail;
5f07b3c5
SP
2567 index_adv(&end_idx, atomic_read(&txq->used) - 1,
2568 txq->len);
2569 /* Use the tx-compl process logic to handle requests
2570 * that were not sent to the HW.
2571 */
0ae57bb3
SP
2572 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
2573 atomic_sub(num_wrbs, &txq->used);
5f07b3c5
SP
2574 BUG_ON(atomic_read(&txq->used));
2575 txo->pend_wrb_cnt = 0;
2576 /* Since hw was never notified of these requests,
2577 * reset TXQ indices
2578 */
2579 txq->head = notified_idx;
2580 txq->tail = notified_idx;
0ae57bb3 2581 }
b03388d6 2582 }
6b7c5b94
SP
2583}
2584
10ef9ab4
SP
2585static void be_evt_queues_destroy(struct be_adapter *adapter)
2586{
2587 struct be_eq_obj *eqo;
2588 int i;
2589
2590 for_all_evt_queues(adapter, eqo, i) {
19d59aa7
PR
2591 if (eqo->q.created) {
2592 be_eq_clean(eqo);
10ef9ab4 2593 be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
6384a4d0 2594 napi_hash_del(&eqo->napi);
68d7bdcb 2595 netif_napi_del(&eqo->napi);
649886a3 2596 free_cpumask_var(eqo->affinity_mask);
19d59aa7 2597 }
10ef9ab4
SP
2598 be_queue_free(adapter, &eqo->q);
2599 }
2600}
2601
2602static int be_evt_queues_create(struct be_adapter *adapter)
2603{
2604 struct be_queue_info *eq;
2605 struct be_eq_obj *eqo;
2632bafd 2606 struct be_aic_obj *aic;
10ef9ab4
SP
2607 int i, rc;
2608
92bf14ab
SP
2609 adapter->num_evt_qs = min_t(u16, num_irqs(adapter),
2610 adapter->cfg_num_qs);
10ef9ab4
SP
2611
2612 for_all_evt_queues(adapter, eqo, i) {
f36963c9 2613 int numa_node = dev_to_node(&adapter->pdev->dev);
649886a3 2614
2632bafd 2615 aic = &adapter->aic_obj[i];
10ef9ab4 2616 eqo->adapter = adapter;
10ef9ab4 2617 eqo->idx = i;
2632bafd
SP
2618 aic->max_eqd = BE_MAX_EQD;
2619 aic->enable = true;
10ef9ab4
SP
2620
2621 eq = &eqo->q;
2622 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
748b539a 2623 sizeof(struct be_eq_entry));
10ef9ab4
SP
2624 if (rc)
2625 return rc;
2626
f2f781a7 2627 rc = be_cmd_eq_create(adapter, eqo);
10ef9ab4
SP
2628 if (rc)
2629 return rc;
649886a3
KA
2630
2631 if (!zalloc_cpumask_var(&eqo->affinity_mask, GFP_KERNEL))
2632 return -ENOMEM;
2633 cpumask_set_cpu(cpumask_local_spread(i, numa_node),
2634 eqo->affinity_mask);
2635 netif_napi_add(adapter->netdev, &eqo->napi, be_poll,
2636 BE_NAPI_WEIGHT);
10ef9ab4 2637 }
1cfafab9 2638 return 0;
10ef9ab4
SP
2639}
2640
5fb379ee
SP
2641static void be_mcc_queues_destroy(struct be_adapter *adapter)
2642{
2643 struct be_queue_info *q;
5fb379ee 2644
8788fdc2 2645 q = &adapter->mcc_obj.q;
5fb379ee 2646 if (q->created)
8788fdc2 2647 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
5fb379ee
SP
2648 be_queue_free(adapter, q);
2649
8788fdc2 2650 q = &adapter->mcc_obj.cq;
5fb379ee 2651 if (q->created)
8788fdc2 2652 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
5fb379ee
SP
2653 be_queue_free(adapter, q);
2654}
2655
2656/* Must be called only after TX qs are created as MCC shares TX EQ */
2657static int be_mcc_queues_create(struct be_adapter *adapter)
2658{
2659 struct be_queue_info *q, *cq;
5fb379ee 2660
8788fdc2 2661 cq = &adapter->mcc_obj.cq;
5fb379ee 2662 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
748b539a 2663 sizeof(struct be_mcc_compl)))
5fb379ee
SP
2664 goto err;
2665
10ef9ab4
SP
2666 /* Use the default EQ for MCC completions */
2667 if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0))
5fb379ee
SP
2668 goto mcc_cq_free;
2669
8788fdc2 2670 q = &adapter->mcc_obj.q;
5fb379ee
SP
2671 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
2672 goto mcc_cq_destroy;
2673
8788fdc2 2674 if (be_cmd_mccq_create(adapter, q, cq))
5fb379ee
SP
2675 goto mcc_q_free;
2676
2677 return 0;
2678
2679mcc_q_free:
2680 be_queue_free(adapter, q);
2681mcc_cq_destroy:
8788fdc2 2682 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
5fb379ee
SP
2683mcc_cq_free:
2684 be_queue_free(adapter, cq);
2685err:
2686 return -1;
2687}
2688
6b7c5b94
SP
2689static void be_tx_queues_destroy(struct be_adapter *adapter)
2690{
2691 struct be_queue_info *q;
3c8def97
SP
2692 struct be_tx_obj *txo;
2693 u8 i;
6b7c5b94 2694
3c8def97
SP
2695 for_all_tx_queues(adapter, txo, i) {
2696 q = &txo->q;
2697 if (q->created)
2698 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
2699 be_queue_free(adapter, q);
6b7c5b94 2700
3c8def97
SP
2701 q = &txo->cq;
2702 if (q->created)
2703 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2704 be_queue_free(adapter, q);
2705 }
6b7c5b94
SP
2706}
2707
7707133c 2708static int be_tx_qs_create(struct be_adapter *adapter)
6b7c5b94 2709{
73f394e6 2710 struct be_queue_info *cq;
3c8def97 2711 struct be_tx_obj *txo;
73f394e6 2712 struct be_eq_obj *eqo;
92bf14ab 2713 int status, i;
6b7c5b94 2714
92bf14ab 2715 adapter->num_tx_qs = min(adapter->num_evt_qs, be_max_txqs(adapter));
dafc0fe3 2716
10ef9ab4
SP
2717 for_all_tx_queues(adapter, txo, i) {
2718 cq = &txo->cq;
2719 status = be_queue_alloc(adapter, cq, TX_CQ_LEN,
2720 sizeof(struct be_eth_tx_compl));
2721 if (status)
2722 return status;
3c8def97 2723
827da44c
JS
2724 u64_stats_init(&txo->stats.sync);
2725 u64_stats_init(&txo->stats.sync_compl);
2726
10ef9ab4
SP
2727 /* If num_evt_qs is less than num_tx_qs, then more than
2728 * one txq share an eq
2729 */
73f394e6
SP
2730 eqo = &adapter->eq_obj[i % adapter->num_evt_qs];
2731 status = be_cmd_cq_create(adapter, cq, &eqo->q, false, 3);
10ef9ab4
SP
2732 if (status)
2733 return status;
6b7c5b94 2734
10ef9ab4
SP
2735 status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
2736 sizeof(struct be_eth_wrb));
2737 if (status)
2738 return status;
6b7c5b94 2739
94d73aaa 2740 status = be_cmd_txq_create(adapter, txo);
10ef9ab4
SP
2741 if (status)
2742 return status;
73f394e6
SP
2743
2744 netif_set_xps_queue(adapter->netdev, eqo->affinity_mask,
2745 eqo->idx);
3c8def97 2746 }
6b7c5b94 2747
d379142b
SP
2748 dev_info(&adapter->pdev->dev, "created %d TX queue(s)\n",
2749 adapter->num_tx_qs);
10ef9ab4 2750 return 0;
6b7c5b94
SP
2751}
2752
10ef9ab4 2753static void be_rx_cqs_destroy(struct be_adapter *adapter)
6b7c5b94
SP
2754{
2755 struct be_queue_info *q;
3abcdeda
SP
2756 struct be_rx_obj *rxo;
2757 int i;
2758
2759 for_all_rx_queues(adapter, rxo, i) {
3abcdeda
SP
2760 q = &rxo->cq;
2761 if (q->created)
2762 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2763 be_queue_free(adapter, q);
ac6a0c4a
SP
2764 }
2765}
2766
10ef9ab4 2767static int be_rx_cqs_create(struct be_adapter *adapter)
6b7c5b94 2768{
10ef9ab4 2769 struct be_queue_info *eq, *cq;
3abcdeda
SP
2770 struct be_rx_obj *rxo;
2771 int rc, i;
6b7c5b94 2772
92bf14ab 2773 /* We can create as many RSS rings as there are EQs. */
71bb8bd0 2774 adapter->num_rss_qs = adapter->num_evt_qs;
92bf14ab 2775
71bb8bd0
VV
2776 /* We'll use RSS only if atleast 2 RSS rings are supported. */
2777 if (adapter->num_rss_qs <= 1)
2778 adapter->num_rss_qs = 0;
2779
2780 adapter->num_rx_qs = adapter->num_rss_qs + adapter->need_def_rxq;
2781
2782 /* When the interface is not capable of RSS rings (and there is no
2783 * need to create a default RXQ) we'll still need one RXQ
10ef9ab4 2784 */
71bb8bd0
VV
2785 if (adapter->num_rx_qs == 0)
2786 adapter->num_rx_qs = 1;
92bf14ab 2787
6b7c5b94 2788 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
3abcdeda
SP
2789 for_all_rx_queues(adapter, rxo, i) {
2790 rxo->adapter = adapter;
3abcdeda
SP
2791 cq = &rxo->cq;
2792 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
748b539a 2793 sizeof(struct be_eth_rx_compl));
3abcdeda 2794 if (rc)
10ef9ab4 2795 return rc;
3abcdeda 2796
827da44c 2797 u64_stats_init(&rxo->stats.sync);
10ef9ab4
SP
2798 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2799 rc = be_cmd_cq_create(adapter, cq, eq, false, 3);
3abcdeda 2800 if (rc)
10ef9ab4 2801 return rc;
3abcdeda 2802 }
6b7c5b94 2803
d379142b 2804 dev_info(&adapter->pdev->dev,
71bb8bd0 2805 "created %d RX queue(s)\n", adapter->num_rx_qs);
10ef9ab4 2806 return 0;
b628bde2
SP
2807}
2808
6b7c5b94
SP
2809static irqreturn_t be_intx(int irq, void *dev)
2810{
e49cc34f
SP
2811 struct be_eq_obj *eqo = dev;
2812 struct be_adapter *adapter = eqo->adapter;
2813 int num_evts = 0;
6b7c5b94 2814
d0b9cec3
SP
2815 /* IRQ is not expected when NAPI is scheduled as the EQ
2816 * will not be armed.
2817 * But, this can happen on Lancer INTx where it takes
2818 * a while to de-assert INTx or in BE2 where occasionaly
2819 * an interrupt may be raised even when EQ is unarmed.
2820 * If NAPI is already scheduled, then counting & notifying
2821 * events will orphan them.
e49cc34f 2822 */
d0b9cec3 2823 if (napi_schedule_prep(&eqo->napi)) {
e49cc34f 2824 num_evts = events_get(eqo);
d0b9cec3
SP
2825 __napi_schedule(&eqo->napi);
2826 if (num_evts)
2827 eqo->spurious_intr = 0;
2828 }
20947770 2829 be_eq_notify(adapter, eqo->q.id, false, true, num_evts, 0);
e49cc34f 2830
d0b9cec3
SP
2831 /* Return IRQ_HANDLED only for the the first spurious intr
2832 * after a valid intr to stop the kernel from branding
2833 * this irq as a bad one!
e49cc34f 2834 */
d0b9cec3
SP
2835 if (num_evts || eqo->spurious_intr++ == 0)
2836 return IRQ_HANDLED;
2837 else
2838 return IRQ_NONE;
6b7c5b94
SP
2839}
2840
10ef9ab4 2841static irqreturn_t be_msix(int irq, void *dev)
6b7c5b94 2842{
10ef9ab4 2843 struct be_eq_obj *eqo = dev;
6b7c5b94 2844
20947770 2845 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0, 0);
0b545a62 2846 napi_schedule(&eqo->napi);
6b7c5b94
SP
2847 return IRQ_HANDLED;
2848}
2849
2e588f84 2850static inline bool do_gro(struct be_rx_compl_info *rxcp)
6b7c5b94 2851{
e38b1706 2852 return (rxcp->tcpf && !rxcp->err && rxcp->l4_csum) ? true : false;
6b7c5b94
SP
2853}
2854
10ef9ab4 2855static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
748b539a 2856 int budget, int polling)
6b7c5b94 2857{
3abcdeda
SP
2858 struct be_adapter *adapter = rxo->adapter;
2859 struct be_queue_info *rx_cq = &rxo->cq;
2e588f84 2860 struct be_rx_compl_info *rxcp;
6b7c5b94 2861 u32 work_done;
c30d7266 2862 u32 frags_consumed = 0;
6b7c5b94
SP
2863
2864 for (work_done = 0; work_done < budget; work_done++) {
3abcdeda 2865 rxcp = be_rx_compl_get(rxo);
6b7c5b94
SP
2866 if (!rxcp)
2867 break;
2868
12004ae9
SP
2869 /* Is it a flush compl that has no data */
2870 if (unlikely(rxcp->num_rcvd == 0))
2871 goto loop_continue;
2872
2873 /* Discard compl with partial DMA Lancer B0 */
2874 if (unlikely(!rxcp->pkt_size)) {
10ef9ab4 2875 be_rx_compl_discard(rxo, rxcp);
12004ae9
SP
2876 goto loop_continue;
2877 }
2878
2879 /* On BE drop pkts that arrive due to imperfect filtering in
2880 * promiscuous mode on some skews
2881 */
2882 if (unlikely(rxcp->port != adapter->port_num &&
748b539a 2883 !lancer_chip(adapter))) {
10ef9ab4 2884 be_rx_compl_discard(rxo, rxcp);
12004ae9 2885 goto loop_continue;
64642811 2886 }
009dd872 2887
6384a4d0
SP
2888 /* Don't do gro when we're busy_polling */
2889 if (do_gro(rxcp) && polling != BUSY_POLLING)
10ef9ab4 2890 be_rx_compl_process_gro(rxo, napi, rxcp);
12004ae9 2891 else
6384a4d0
SP
2892 be_rx_compl_process(rxo, napi, rxcp);
2893
12004ae9 2894loop_continue:
c30d7266 2895 frags_consumed += rxcp->num_rcvd;
2e588f84 2896 be_rx_stats_update(rxo, rxcp);
6b7c5b94
SP
2897 }
2898
10ef9ab4
SP
2899 if (work_done) {
2900 be_cq_notify(adapter, rx_cq->id, true, work_done);
9372cacb 2901
6384a4d0
SP
2902 /* When an rx-obj gets into post_starved state, just
2903 * let be_worker do the posting.
2904 */
2905 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM &&
2906 !rxo->rx_post_starved)
c30d7266
AK
2907 be_post_rx_frags(rxo, GFP_ATOMIC,
2908 max_t(u32, MAX_RX_POST,
2909 frags_consumed));
6b7c5b94 2910 }
10ef9ab4 2911
6b7c5b94
SP
2912 return work_done;
2913}
2914
152ffe5b 2915static inline void be_update_tx_err(struct be_tx_obj *txo, u8 status)
512bb8a2
KA
2916{
2917 switch (status) {
2918 case BE_TX_COMP_HDR_PARSE_ERR:
2919 tx_stats(txo)->tx_hdr_parse_err++;
2920 break;
2921 case BE_TX_COMP_NDMA_ERR:
2922 tx_stats(txo)->tx_dma_err++;
2923 break;
2924 case BE_TX_COMP_ACL_ERR:
2925 tx_stats(txo)->tx_spoof_check_err++;
2926 break;
2927 }
2928}
2929
152ffe5b 2930static inline void lancer_update_tx_err(struct be_tx_obj *txo, u8 status)
512bb8a2
KA
2931{
2932 switch (status) {
2933 case LANCER_TX_COMP_LSO_ERR:
2934 tx_stats(txo)->tx_tso_err++;
2935 break;
2936 case LANCER_TX_COMP_HSW_DROP_MAC_ERR:
2937 case LANCER_TX_COMP_HSW_DROP_VLAN_ERR:
2938 tx_stats(txo)->tx_spoof_check_err++;
2939 break;
2940 case LANCER_TX_COMP_QINQ_ERR:
2941 tx_stats(txo)->tx_qinq_err++;
2942 break;
2943 case LANCER_TX_COMP_PARITY_ERR:
2944 tx_stats(txo)->tx_internal_parity_err++;
2945 break;
2946 case LANCER_TX_COMP_DMA_ERR:
2947 tx_stats(txo)->tx_dma_err++;
2948 break;
2949 }
2950}
2951
c8f64615
SP
2952static void be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
2953 int idx)
6b7c5b94 2954{
c8f64615 2955 int num_wrbs = 0, work_done = 0;
152ffe5b 2956 struct be_tx_compl_info *txcp;
c8f64615 2957
152ffe5b
SB
2958 while ((txcp = be_tx_compl_get(txo))) {
2959 num_wrbs += be_tx_compl_process(adapter, txo, txcp->end_index);
c8f64615 2960 work_done++;
3c8def97 2961
152ffe5b 2962 if (txcp->status) {
512bb8a2 2963 if (lancer_chip(adapter))
152ffe5b 2964 lancer_update_tx_err(txo, txcp->status);
512bb8a2 2965 else
152ffe5b 2966 be_update_tx_err(txo, txcp->status);
512bb8a2 2967 }
10ef9ab4 2968 }
6b7c5b94 2969
10ef9ab4
SP
2970 if (work_done) {
2971 be_cq_notify(adapter, txo->cq.id, true, work_done);
2972 atomic_sub(num_wrbs, &txo->q.used);
3c8def97 2973
10ef9ab4
SP
2974 /* As Tx wrbs have been freed up, wake up netdev queue
2975 * if it was stopped due to lack of tx wrbs. */
2976 if (__netif_subqueue_stopped(adapter->netdev, idx) &&
cf5671e6 2977 be_can_txq_wake(txo)) {
10ef9ab4 2978 netif_wake_subqueue(adapter->netdev, idx);
3c8def97 2979 }
10ef9ab4
SP
2980
2981 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
2982 tx_stats(txo)->tx_compl += work_done;
2983 u64_stats_update_end(&tx_stats(txo)->sync_compl);
6b7c5b94 2984 }
10ef9ab4 2985}
6b7c5b94 2986
f7062ee5
SP
2987#ifdef CONFIG_NET_RX_BUSY_POLL
2988static inline bool be_lock_napi(struct be_eq_obj *eqo)
2989{
2990 bool status = true;
2991
2992 spin_lock(&eqo->lock); /* BH is already disabled */
2993 if (eqo->state & BE_EQ_LOCKED) {
2994 WARN_ON(eqo->state & BE_EQ_NAPI);
2995 eqo->state |= BE_EQ_NAPI_YIELD;
2996 status = false;
2997 } else {
2998 eqo->state = BE_EQ_NAPI;
2999 }
3000 spin_unlock(&eqo->lock);
3001 return status;
3002}
3003
3004static inline void be_unlock_napi(struct be_eq_obj *eqo)
3005{
3006 spin_lock(&eqo->lock); /* BH is already disabled */
3007
3008 WARN_ON(eqo->state & (BE_EQ_POLL | BE_EQ_NAPI_YIELD));
3009 eqo->state = BE_EQ_IDLE;
3010
3011 spin_unlock(&eqo->lock);
3012}
3013
3014static inline bool be_lock_busy_poll(struct be_eq_obj *eqo)
3015{
3016 bool status = true;
3017
3018 spin_lock_bh(&eqo->lock);
3019 if (eqo->state & BE_EQ_LOCKED) {
3020 eqo->state |= BE_EQ_POLL_YIELD;
3021 status = false;
3022 } else {
3023 eqo->state |= BE_EQ_POLL;
3024 }
3025 spin_unlock_bh(&eqo->lock);
3026 return status;
3027}
3028
3029static inline void be_unlock_busy_poll(struct be_eq_obj *eqo)
3030{
3031 spin_lock_bh(&eqo->lock);
3032
3033 WARN_ON(eqo->state & (BE_EQ_NAPI));
3034 eqo->state = BE_EQ_IDLE;
3035
3036 spin_unlock_bh(&eqo->lock);
3037}
3038
3039static inline void be_enable_busy_poll(struct be_eq_obj *eqo)
3040{
3041 spin_lock_init(&eqo->lock);
3042 eqo->state = BE_EQ_IDLE;
3043}
3044
3045static inline void be_disable_busy_poll(struct be_eq_obj *eqo)
3046{
3047 local_bh_disable();
3048
3049 /* It's enough to just acquire napi lock on the eqo to stop
3050 * be_busy_poll() from processing any queueus.
3051 */
3052 while (!be_lock_napi(eqo))
3053 mdelay(1);
3054
3055 local_bh_enable();
3056}
3057
3058#else /* CONFIG_NET_RX_BUSY_POLL */
3059
3060static inline bool be_lock_napi(struct be_eq_obj *eqo)
3061{
3062 return true;
3063}
3064
3065static inline void be_unlock_napi(struct be_eq_obj *eqo)
3066{
3067}
3068
3069static inline bool be_lock_busy_poll(struct be_eq_obj *eqo)
3070{
3071 return false;
3072}
3073
3074static inline void be_unlock_busy_poll(struct be_eq_obj *eqo)
3075{
3076}
3077
3078static inline void be_enable_busy_poll(struct be_eq_obj *eqo)
3079{
3080}
3081
3082static inline void be_disable_busy_poll(struct be_eq_obj *eqo)
3083{
3084}
3085#endif /* CONFIG_NET_RX_BUSY_POLL */
3086
68d7bdcb 3087int be_poll(struct napi_struct *napi, int budget)
10ef9ab4
SP
3088{
3089 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
3090 struct be_adapter *adapter = eqo->adapter;
0b545a62 3091 int max_work = 0, work, i, num_evts;
6384a4d0 3092 struct be_rx_obj *rxo;
a4906ea0 3093 struct be_tx_obj *txo;
20947770 3094 u32 mult_enc = 0;
f31e50a8 3095
0b545a62
SP
3096 num_evts = events_get(eqo);
3097
a4906ea0
SP
3098 for_all_tx_queues_on_eq(adapter, eqo, txo, i)
3099 be_process_tx(adapter, txo, i);
f31e50a8 3100
6384a4d0
SP
3101 if (be_lock_napi(eqo)) {
3102 /* This loop will iterate twice for EQ0 in which
3103 * completions of the last RXQ (default one) are also processed
3104 * For other EQs the loop iterates only once
3105 */
3106 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
3107 work = be_process_rx(rxo, napi, budget, NAPI_POLLING);
3108 max_work = max(work, max_work);
3109 }
3110 be_unlock_napi(eqo);
3111 } else {
3112 max_work = budget;
10ef9ab4 3113 }
6b7c5b94 3114
10ef9ab4
SP
3115 if (is_mcc_eqo(eqo))
3116 be_process_mcc(adapter);
93c86700 3117
10ef9ab4
SP
3118 if (max_work < budget) {
3119 napi_complete(napi);
20947770
PR
3120
3121 /* Skyhawk EQ_DB has a provision to set the rearm to interrupt
3122 * delay via a delay multiplier encoding value
3123 */
3124 if (skyhawk_chip(adapter))
3125 mult_enc = be_get_eq_delay_mult_enc(eqo);
3126
3127 be_eq_notify(adapter, eqo->q.id, true, false, num_evts,
3128 mult_enc);
10ef9ab4
SP
3129 } else {
3130 /* As we'll continue in polling mode, count and clear events */
20947770 3131 be_eq_notify(adapter, eqo->q.id, false, false, num_evts, 0);
93c86700 3132 }
10ef9ab4 3133 return max_work;
6b7c5b94
SP
3134}
3135
6384a4d0
SP
3136#ifdef CONFIG_NET_RX_BUSY_POLL
3137static int be_busy_poll(struct napi_struct *napi)
3138{
3139 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
3140 struct be_adapter *adapter = eqo->adapter;
3141 struct be_rx_obj *rxo;
3142 int i, work = 0;
3143
3144 if (!be_lock_busy_poll(eqo))
3145 return LL_FLUSH_BUSY;
3146
3147 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
3148 work = be_process_rx(rxo, napi, 4, BUSY_POLLING);
3149 if (work)
3150 break;
3151 }
3152
3153 be_unlock_busy_poll(eqo);
3154 return work;
3155}
3156#endif
3157
f67ef7ba 3158void be_detect_error(struct be_adapter *adapter)
7c185276 3159{
e1cfb67a
PR
3160 u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
3161 u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
7c185276 3162 u32 i;
eb0eecc1 3163 struct device *dev = &adapter->pdev->dev;
7c185276 3164
954f6825 3165 if (be_check_error(adapter, BE_ERROR_HW))
72f02485
SP
3166 return;
3167
e1cfb67a
PR
3168 if (lancer_chip(adapter)) {
3169 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3170 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
954f6825 3171 be_set_error(adapter, BE_ERROR_UE);
e1cfb67a 3172 sliport_err1 = ioread32(adapter->db +
748b539a 3173 SLIPORT_ERROR1_OFFSET);
e1cfb67a 3174 sliport_err2 = ioread32(adapter->db +
748b539a 3175 SLIPORT_ERROR2_OFFSET);
eb0eecc1
SK
3176 /* Do not log error messages if its a FW reset */
3177 if (sliport_err1 == SLIPORT_ERROR_FW_RESET1 &&
3178 sliport_err2 == SLIPORT_ERROR_FW_RESET2) {
3179 dev_info(dev, "Firmware update in progress\n");
3180 } else {
eb0eecc1
SK
3181 dev_err(dev, "Error detected in the card\n");
3182 dev_err(dev, "ERR: sliport status 0x%x\n",
3183 sliport_status);
3184 dev_err(dev, "ERR: sliport error1 0x%x\n",
3185 sliport_err1);
3186 dev_err(dev, "ERR: sliport error2 0x%x\n",
3187 sliport_err2);
3188 }
e1cfb67a
PR
3189 }
3190 } else {
25848c90
SR
3191 ue_lo = ioread32(adapter->pcicfg + PCICFG_UE_STATUS_LOW);
3192 ue_hi = ioread32(adapter->pcicfg + PCICFG_UE_STATUS_HIGH);
3193 ue_lo_mask = ioread32(adapter->pcicfg +
3194 PCICFG_UE_STATUS_LOW_MASK);
3195 ue_hi_mask = ioread32(adapter->pcicfg +
3196 PCICFG_UE_STATUS_HI_MASK);
e1cfb67a 3197
f67ef7ba
PR
3198 ue_lo = (ue_lo & ~ue_lo_mask);
3199 ue_hi = (ue_hi & ~ue_hi_mask);
7c185276 3200
eb0eecc1
SK
3201 /* On certain platforms BE hardware can indicate spurious UEs.
3202 * Allow HW to stop working completely in case of a real UE.
3203 * Hence not setting the hw_error for UE detection.
3204 */
f67ef7ba 3205
eb0eecc1 3206 if (ue_lo || ue_hi) {
eb0eecc1
SK
3207 dev_err(dev,
3208 "Unrecoverable Error detected in the adapter");
3209 dev_err(dev, "Please reboot server to recover");
3210 if (skyhawk_chip(adapter))
954f6825
VD
3211 be_set_error(adapter, BE_ERROR_UE);
3212
eb0eecc1
SK
3213 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
3214 if (ue_lo & 1)
3215 dev_err(dev, "UE: %s bit set\n",
3216 ue_status_low_desc[i]);
3217 }
3218 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
3219 if (ue_hi & 1)
3220 dev_err(dev, "UE: %s bit set\n",
3221 ue_status_hi_desc[i]);
3222 }
7c185276
AK
3223 }
3224 }
7c185276
AK
3225}
3226
8d56ff11
SP
3227static void be_msix_disable(struct be_adapter *adapter)
3228{
ac6a0c4a 3229 if (msix_enabled(adapter)) {
8d56ff11 3230 pci_disable_msix(adapter->pdev);
ac6a0c4a 3231 adapter->num_msix_vec = 0;
68d7bdcb 3232 adapter->num_msix_roce_vec = 0;
3abcdeda
SP
3233 }
3234}
3235
c2bba3df 3236static int be_msix_enable(struct be_adapter *adapter)
6b7c5b94 3237{
7dc4c064 3238 int i, num_vec;
d379142b 3239 struct device *dev = &adapter->pdev->dev;
6b7c5b94 3240
92bf14ab
SP
3241 /* If RoCE is supported, program the max number of NIC vectors that
3242 * may be configured via set-channels, along with vectors needed for
3243 * RoCe. Else, just program the number we'll use initially.
3244 */
3245 if (be_roce_supported(adapter))
3246 num_vec = min_t(int, 2 * be_max_eqs(adapter),
3247 2 * num_online_cpus());
3248 else
3249 num_vec = adapter->cfg_num_qs;
3abcdeda 3250
ac6a0c4a 3251 for (i = 0; i < num_vec; i++)
6b7c5b94
SP
3252 adapter->msix_entries[i].entry = i;
3253
7dc4c064
AG
3254 num_vec = pci_enable_msix_range(adapter->pdev, adapter->msix_entries,
3255 MIN_MSIX_VECTORS, num_vec);
3256 if (num_vec < 0)
3257 goto fail;
92bf14ab 3258
92bf14ab
SP
3259 if (be_roce_supported(adapter) && num_vec > MIN_MSIX_VECTORS) {
3260 adapter->num_msix_roce_vec = num_vec / 2;
3261 dev_info(dev, "enabled %d MSI-x vector(s) for RoCE\n",
3262 adapter->num_msix_roce_vec);
3263 }
3264
3265 adapter->num_msix_vec = num_vec - adapter->num_msix_roce_vec;
3266
3267 dev_info(dev, "enabled %d MSI-x vector(s) for NIC\n",
3268 adapter->num_msix_vec);
c2bba3df 3269 return 0;
7dc4c064
AG
3270
3271fail:
3272 dev_warn(dev, "MSIx enable failed\n");
3273
3274 /* INTx is not supported in VFs, so fail probe if enable_msix fails */
18c57c74 3275 if (be_virtfn(adapter))
7dc4c064
AG
3276 return num_vec;
3277 return 0;
6b7c5b94
SP
3278}
3279
fe6d2a38 3280static inline int be_msix_vec_get(struct be_adapter *adapter,
748b539a 3281 struct be_eq_obj *eqo)
b628bde2 3282{
f2f781a7 3283 return adapter->msix_entries[eqo->msix_idx].vector;
b628bde2 3284}
6b7c5b94 3285
b628bde2
SP
3286static int be_msix_register(struct be_adapter *adapter)
3287{
10ef9ab4
SP
3288 struct net_device *netdev = adapter->netdev;
3289 struct be_eq_obj *eqo;
3290 int status, i, vec;
6b7c5b94 3291
10ef9ab4
SP
3292 for_all_evt_queues(adapter, eqo, i) {
3293 sprintf(eqo->desc, "%s-q%d", netdev->name, i);
3294 vec = be_msix_vec_get(adapter, eqo);
3295 status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
3abcdeda
SP
3296 if (status)
3297 goto err_msix;
d658d98a
PR
3298
3299 irq_set_affinity_hint(vec, eqo->affinity_mask);
3abcdeda 3300 }
b628bde2 3301
6b7c5b94 3302 return 0;
3abcdeda 3303err_msix:
6e3cd5fa
VD
3304 for (i--; i >= 0; i--) {
3305 eqo = &adapter->eq_obj[i];
10ef9ab4 3306 free_irq(be_msix_vec_get(adapter, eqo), eqo);
6e3cd5fa 3307 }
10ef9ab4 3308 dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
748b539a 3309 status);
ac6a0c4a 3310 be_msix_disable(adapter);
6b7c5b94
SP
3311 return status;
3312}
3313
3314static int be_irq_register(struct be_adapter *adapter)
3315{
3316 struct net_device *netdev = adapter->netdev;
3317 int status;
3318
ac6a0c4a 3319 if (msix_enabled(adapter)) {
6b7c5b94
SP
3320 status = be_msix_register(adapter);
3321 if (status == 0)
3322 goto done;
ba343c77 3323 /* INTx is not supported for VF */
18c57c74 3324 if (be_virtfn(adapter))
ba343c77 3325 return status;
6b7c5b94
SP
3326 }
3327
e49cc34f 3328 /* INTx: only the first EQ is used */
6b7c5b94
SP
3329 netdev->irq = adapter->pdev->irq;
3330 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
e49cc34f 3331 &adapter->eq_obj[0]);
6b7c5b94
SP
3332 if (status) {
3333 dev_err(&adapter->pdev->dev,
3334 "INTx request IRQ failed - err %d\n", status);
3335 return status;
3336 }
3337done:
3338 adapter->isr_registered = true;
3339 return 0;
3340}
3341
3342static void be_irq_unregister(struct be_adapter *adapter)
3343{
3344 struct net_device *netdev = adapter->netdev;
10ef9ab4 3345 struct be_eq_obj *eqo;
d658d98a 3346 int i, vec;
6b7c5b94
SP
3347
3348 if (!adapter->isr_registered)
3349 return;
3350
3351 /* INTx */
ac6a0c4a 3352 if (!msix_enabled(adapter)) {
e49cc34f 3353 free_irq(netdev->irq, &adapter->eq_obj[0]);
6b7c5b94
SP
3354 goto done;
3355 }
3356
3357 /* MSIx */
d658d98a
PR
3358 for_all_evt_queues(adapter, eqo, i) {
3359 vec = be_msix_vec_get(adapter, eqo);
3360 irq_set_affinity_hint(vec, NULL);
3361 free_irq(vec, eqo);
3362 }
3abcdeda 3363
6b7c5b94
SP
3364done:
3365 adapter->isr_registered = false;
6b7c5b94
SP
3366}
3367
10ef9ab4 3368static void be_rx_qs_destroy(struct be_adapter *adapter)
482c9e79 3369{
62219066 3370 struct rss_info *rss = &adapter->rss_info;
482c9e79
SP
3371 struct be_queue_info *q;
3372 struct be_rx_obj *rxo;
3373 int i;
3374
3375 for_all_rx_queues(adapter, rxo, i) {
3376 q = &rxo->q;
3377 if (q->created) {
99b44304
KA
3378 /* If RXQs are destroyed while in an "out of buffer"
3379 * state, there is a possibility of an HW stall on
3380 * Lancer. So, post 64 buffers to each queue to relieve
3381 * the "out of buffer" condition.
3382 * Make sure there's space in the RXQ before posting.
3383 */
3384 if (lancer_chip(adapter)) {
3385 be_rx_cq_clean(rxo);
3386 if (atomic_read(&q->used) == 0)
3387 be_post_rx_frags(rxo, GFP_KERNEL,
3388 MAX_RX_POST);
3389 }
3390
482c9e79 3391 be_cmd_rxq_destroy(adapter, q);
10ef9ab4 3392 be_rx_cq_clean(rxo);
99b44304 3393 be_rxq_clean(rxo);
482c9e79 3394 }
10ef9ab4 3395 be_queue_free(adapter, q);
482c9e79 3396 }
62219066
AK
3397
3398 if (rss->rss_flags) {
3399 rss->rss_flags = RSS_ENABLE_NONE;
3400 be_cmd_rss_config(adapter, rss->rsstable, rss->rss_flags,
3401 128, rss->rss_hkey);
3402 }
482c9e79
SP
3403}
3404
bcc84140
KA
3405static void be_disable_if_filters(struct be_adapter *adapter)
3406{
3407 be_cmd_pmac_del(adapter, adapter->if_handle,
3408 adapter->pmac_id[0], 0);
3409
3410 be_clear_uc_list(adapter);
3411
3412 /* The IFACE flags are enabled in the open path and cleared
3413 * in the close path. When a VF gets detached from the host and
3414 * assigned to a VM the following happens:
3415 * - VF's IFACE flags get cleared in the detach path
3416 * - IFACE create is issued by the VF in the attach path
3417 * Due to a bug in the BE3/Skyhawk-R FW
3418 * (Lancer FW doesn't have the bug), the IFACE capability flags
3419 * specified along with the IFACE create cmd issued by a VF are not
3420 * honoured by FW. As a consequence, if a *new* driver
3421 * (that enables/disables IFACE flags in open/close)
3422 * is loaded in the host and an *old* driver is * used by a VM/VF,
3423 * the IFACE gets created *without* the needed flags.
3424 * To avoid this, disable RX-filter flags only for Lancer.
3425 */
3426 if (lancer_chip(adapter)) {
3427 be_cmd_rx_filter(adapter, BE_IF_ALL_FILT_FLAGS, OFF);
3428 adapter->if_flags &= ~BE_IF_ALL_FILT_FLAGS;
3429 }
3430}
3431
889cd4b2
SP
3432static int be_close(struct net_device *netdev)
3433{
3434 struct be_adapter *adapter = netdev_priv(netdev);
10ef9ab4
SP
3435 struct be_eq_obj *eqo;
3436 int i;
889cd4b2 3437
e1ad8e33
KA
3438 /* This protection is needed as be_close() may be called even when the
3439 * adapter is in cleared state (after eeh perm failure)
3440 */
3441 if (!(adapter->flags & BE_FLAGS_SETUP_DONE))
3442 return 0;
3443
bcc84140
KA
3444 be_disable_if_filters(adapter);
3445
dff345c5
IV
3446 if (adapter->flags & BE_FLAGS_NAPI_ENABLED) {
3447 for_all_evt_queues(adapter, eqo, i) {
04d3d624 3448 napi_disable(&eqo->napi);
6384a4d0
SP
3449 be_disable_busy_poll(eqo);
3450 }
71237b6f 3451 adapter->flags &= ~BE_FLAGS_NAPI_ENABLED;
04d3d624 3452 }
a323d9bf
SP
3453
3454 be_async_mcc_disable(adapter);
3455
3456 /* Wait for all pending tx completions to arrive so that
3457 * all tx skbs are freed.
3458 */
fba87559 3459 netif_tx_disable(netdev);
6e1f9975 3460 be_tx_compl_clean(adapter);
a323d9bf
SP
3461
3462 be_rx_qs_destroy(adapter);
d11a347d 3463
a323d9bf 3464 for_all_evt_queues(adapter, eqo, i) {
10ef9ab4
SP
3465 if (msix_enabled(adapter))
3466 synchronize_irq(be_msix_vec_get(adapter, eqo));
3467 else
3468 synchronize_irq(netdev->irq);
3469 be_eq_clean(eqo);
63fcb27f
PR
3470 }
3471
889cd4b2
SP
3472 be_irq_unregister(adapter);
3473
482c9e79
SP
3474 return 0;
3475}
3476
10ef9ab4 3477static int be_rx_qs_create(struct be_adapter *adapter)
482c9e79 3478{
1dcf7b1c
ED
3479 struct rss_info *rss = &adapter->rss_info;
3480 u8 rss_key[RSS_HASH_KEY_LEN];
482c9e79 3481 struct be_rx_obj *rxo;
e9008ee9 3482 int rc, i, j;
482c9e79
SP
3483
3484 for_all_rx_queues(adapter, rxo, i) {
10ef9ab4
SP
3485 rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
3486 sizeof(struct be_eth_rx_d));
3487 if (rc)
3488 return rc;
3489 }
3490
71bb8bd0
VV
3491 if (adapter->need_def_rxq || !adapter->num_rss_qs) {
3492 rxo = default_rxo(adapter);
3493 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
3494 rx_frag_size, adapter->if_handle,
3495 false, &rxo->rss_id);
3496 if (rc)
3497 return rc;
3498 }
10ef9ab4
SP
3499
3500 for_all_rss_queues(adapter, rxo, i) {
482c9e79 3501 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
10ef9ab4
SP
3502 rx_frag_size, adapter->if_handle,
3503 true, &rxo->rss_id);
482c9e79
SP
3504 if (rc)
3505 return rc;
3506 }
3507
3508 if (be_multi_rxq(adapter)) {
71bb8bd0 3509 for (j = 0; j < RSS_INDIR_TABLE_LEN; j += adapter->num_rss_qs) {
e9008ee9 3510 for_all_rss_queues(adapter, rxo, i) {
e2557877 3511 if ((j + i) >= RSS_INDIR_TABLE_LEN)
e9008ee9 3512 break;
e2557877
VD
3513 rss->rsstable[j + i] = rxo->rss_id;
3514 rss->rss_queue[j + i] = i;
e9008ee9
PR
3515 }
3516 }
e2557877
VD
3517 rss->rss_flags = RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_IPV4 |
3518 RSS_ENABLE_TCP_IPV6 | RSS_ENABLE_IPV6;
594ad54a
SR
3519
3520 if (!BEx_chip(adapter))
e2557877
VD
3521 rss->rss_flags |= RSS_ENABLE_UDP_IPV4 |
3522 RSS_ENABLE_UDP_IPV6;
62219066
AK
3523
3524 netdev_rss_key_fill(rss_key, RSS_HASH_KEY_LEN);
3525 rc = be_cmd_rss_config(adapter, rss->rsstable, rss->rss_flags,
3526 RSS_INDIR_TABLE_LEN, rss_key);
3527 if (rc) {
3528 rss->rss_flags = RSS_ENABLE_NONE;
3529 return rc;
3530 }
3531
3532 memcpy(rss->rss_hkey, rss_key, RSS_HASH_KEY_LEN);
da1388d6
VV
3533 } else {
3534 /* Disable RSS, if only default RX Q is created */
e2557877 3535 rss->rss_flags = RSS_ENABLE_NONE;
da1388d6 3536 }
594ad54a 3537
e2557877 3538
b02e60c8
SR
3539 /* Post 1 less than RXQ-len to avoid head being equal to tail,
3540 * which is a queue empty condition
3541 */
10ef9ab4 3542 for_all_rx_queues(adapter, rxo, i)
b02e60c8
SR
3543 be_post_rx_frags(rxo, GFP_KERNEL, RX_Q_LEN - 1);
3544
889cd4b2
SP
3545 return 0;
3546}
3547
bcc84140
KA
3548static int be_enable_if_filters(struct be_adapter *adapter)
3549{
3550 int status;
3551
3552 status = be_cmd_rx_filter(adapter, BE_IF_EN_FLAGS, ON);
3553 if (status)
3554 return status;
3555
3556 /* For BE3 VFs, the PF programs the initial MAC address */
3557 if (!(BEx_chip(adapter) && be_virtfn(adapter))) {
3558 status = be_cmd_pmac_add(adapter, adapter->netdev->dev_addr,
3559 adapter->if_handle,
3560 &adapter->pmac_id[0], 0);
3561 if (status)
3562 return status;
3563 }
3564
3565 if (adapter->vlans_added)
3566 be_vid_config(adapter);
3567
3568 be_set_rx_mode(adapter->netdev);
3569
3570 return 0;
3571}
3572
6b7c5b94
SP
3573static int be_open(struct net_device *netdev)
3574{
3575 struct be_adapter *adapter = netdev_priv(netdev);
10ef9ab4 3576 struct be_eq_obj *eqo;
3abcdeda 3577 struct be_rx_obj *rxo;
10ef9ab4 3578 struct be_tx_obj *txo;
b236916a 3579 u8 link_status;
3abcdeda 3580 int status, i;
5fb379ee 3581
10ef9ab4 3582 status = be_rx_qs_create(adapter);
482c9e79
SP
3583 if (status)
3584 goto err;
3585
bcc84140
KA
3586 status = be_enable_if_filters(adapter);
3587 if (status)
3588 goto err;
3589
c2bba3df
SK
3590 status = be_irq_register(adapter);
3591 if (status)
3592 goto err;
5fb379ee 3593
10ef9ab4 3594 for_all_rx_queues(adapter, rxo, i)
3abcdeda 3595 be_cq_notify(adapter, rxo->cq.id, true, 0);
5fb379ee 3596
10ef9ab4
SP
3597 for_all_tx_queues(adapter, txo, i)
3598 be_cq_notify(adapter, txo->cq.id, true, 0);
3599
7a1e9b20
SP
3600 be_async_mcc_enable(adapter);
3601
10ef9ab4
SP
3602 for_all_evt_queues(adapter, eqo, i) {
3603 napi_enable(&eqo->napi);
6384a4d0 3604 be_enable_busy_poll(eqo);
20947770 3605 be_eq_notify(adapter, eqo->q.id, true, true, 0, 0);
10ef9ab4 3606 }
04d3d624 3607 adapter->flags |= BE_FLAGS_NAPI_ENABLED;
10ef9ab4 3608
323ff71e 3609 status = be_cmd_link_status_query(adapter, NULL, &link_status, 0);
b236916a
AK
3610 if (!status)
3611 be_link_status_update(adapter, link_status);
3612
fba87559 3613 netif_tx_start_all_queues(netdev);
c5abe7c0 3614#ifdef CONFIG_BE2NET_VXLAN
c9c47142
SP
3615 if (skyhawk_chip(adapter))
3616 vxlan_get_rx_port(netdev);
c5abe7c0
SP
3617#endif
3618
889cd4b2
SP
3619 return 0;
3620err:
3621 be_close(adapter->netdev);
3622 return -EIO;
5fb379ee
SP
3623}
3624
71d8d1b5
AK
3625static int be_setup_wol(struct be_adapter *adapter, bool enable)
3626{
145155e7 3627 struct device *dev = &adapter->pdev->dev;
71d8d1b5 3628 struct be_dma_mem cmd;
71d8d1b5 3629 u8 mac[ETH_ALEN];
145155e7 3630 int status;
71d8d1b5 3631
c7bf7169 3632 eth_zero_addr(mac);
71d8d1b5
AK
3633
3634 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
145155e7 3635 cmd.va = dma_zalloc_coherent(dev, cmd.size, &cmd.dma, GFP_KERNEL);
ddf1169f 3636 if (!cmd.va)
6b568689 3637 return -ENOMEM;
71d8d1b5
AK
3638
3639 if (enable) {
3640 status = pci_write_config_dword(adapter->pdev,
748b539a
SP
3641 PCICFG_PM_CONTROL_OFFSET,
3642 PCICFG_PM_CONTROL_MASK);
71d8d1b5 3643 if (status) {
145155e7
KP
3644 dev_err(dev, "Could not enable Wake-on-lan\n");
3645 goto err;
71d8d1b5 3646 }
71d8d1b5 3647 } else {
145155e7 3648 ether_addr_copy(mac, adapter->netdev->dev_addr);
71d8d1b5
AK
3649 }
3650
145155e7
KP
3651 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
3652 pci_enable_wake(adapter->pdev, PCI_D3hot, enable);
3653 pci_enable_wake(adapter->pdev, PCI_D3cold, enable);
3654err:
3655 dma_free_coherent(dev, cmd.size, cmd.va, cmd.dma);
71d8d1b5
AK
3656 return status;
3657}
3658
f7062ee5
SP
3659static void be_vf_eth_addr_generate(struct be_adapter *adapter, u8 *mac)
3660{
3661 u32 addr;
3662
3663 addr = jhash(adapter->netdev->dev_addr, ETH_ALEN, 0);
3664
3665 mac[5] = (u8)(addr & 0xFF);
3666 mac[4] = (u8)((addr >> 8) & 0xFF);
3667 mac[3] = (u8)((addr >> 16) & 0xFF);
3668 /* Use the OUI from the current MAC address */
3669 memcpy(mac, adapter->netdev->dev_addr, 3);
3670}
3671
6d87f5c3
AK
3672/*
3673 * Generate a seed MAC address from the PF MAC Address using jhash.
3674 * MAC Address for VFs are assigned incrementally starting from the seed.
3675 * These addresses are programmed in the ASIC by the PF and the VF driver
3676 * queries for the MAC address during its probe.
3677 */
4c876616 3678static int be_vf_eth_addr_config(struct be_adapter *adapter)
6d87f5c3 3679{
f9449ab7 3680 u32 vf;
3abcdeda 3681 int status = 0;
6d87f5c3 3682 u8 mac[ETH_ALEN];
11ac75ed 3683 struct be_vf_cfg *vf_cfg;
6d87f5c3
AK
3684
3685 be_vf_eth_addr_generate(adapter, mac);
3686
11ac75ed 3687 for_all_vfs(adapter, vf_cfg, vf) {
3175d8c2 3688 if (BEx_chip(adapter))
590c391d 3689 status = be_cmd_pmac_add(adapter, mac,
11ac75ed
SP
3690 vf_cfg->if_handle,
3691 &vf_cfg->pmac_id, vf + 1);
3175d8c2
SP
3692 else
3693 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
3694 vf + 1);
590c391d 3695
6d87f5c3
AK
3696 if (status)
3697 dev_err(&adapter->pdev->dev,
748b539a
SP
3698 "Mac address assignment failed for VF %d\n",
3699 vf);
6d87f5c3 3700 else
11ac75ed 3701 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
6d87f5c3
AK
3702
3703 mac[5] += 1;
3704 }
3705 return status;
3706}
3707
4c876616
SP
3708static int be_vfs_mac_query(struct be_adapter *adapter)
3709{
3710 int status, vf;
3711 u8 mac[ETH_ALEN];
3712 struct be_vf_cfg *vf_cfg;
4c876616
SP
3713
3714 for_all_vfs(adapter, vf_cfg, vf) {
b188f090
SR
3715 status = be_cmd_get_active_mac(adapter, vf_cfg->pmac_id,
3716 mac, vf_cfg->if_handle,
3717 false, vf+1);
4c876616
SP
3718 if (status)
3719 return status;
3720 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
3721 }
3722 return 0;
3723}
3724
f9449ab7 3725static void be_vf_clear(struct be_adapter *adapter)
6d87f5c3 3726{
11ac75ed 3727 struct be_vf_cfg *vf_cfg;
6d87f5c3
AK
3728 u32 vf;
3729
257a3feb 3730 if (pci_vfs_assigned(adapter->pdev)) {
4c876616
SP
3731 dev_warn(&adapter->pdev->dev,
3732 "VFs are assigned to VMs: not disabling VFs\n");
39f1d94d
SP
3733 goto done;
3734 }
3735
b4c1df93
SP
3736 pci_disable_sriov(adapter->pdev);
3737
11ac75ed 3738 for_all_vfs(adapter, vf_cfg, vf) {
3175d8c2 3739 if (BEx_chip(adapter))
11ac75ed
SP
3740 be_cmd_pmac_del(adapter, vf_cfg->if_handle,
3741 vf_cfg->pmac_id, vf + 1);
3175d8c2
SP
3742 else
3743 be_cmd_set_mac(adapter, NULL, vf_cfg->if_handle,
3744 vf + 1);
f9449ab7 3745
11ac75ed
SP
3746 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
3747 }
39f1d94d
SP
3748done:
3749 kfree(adapter->vf_cfg);
3750 adapter->num_vfs = 0;
f174c7ec 3751 adapter->flags &= ~BE_FLAGS_SRIOV_ENABLED;
6d87f5c3
AK
3752}
3753
7707133c
SP
3754static void be_clear_queues(struct be_adapter *adapter)
3755{
3756 be_mcc_queues_destroy(adapter);
3757 be_rx_cqs_destroy(adapter);
3758 be_tx_queues_destroy(adapter);
3759 be_evt_queues_destroy(adapter);
3760}
3761
68d7bdcb 3762static void be_cancel_worker(struct be_adapter *adapter)
a54769f5 3763{
191eb756
SP
3764 if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) {
3765 cancel_delayed_work_sync(&adapter->work);
3766 adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED;
3767 }
68d7bdcb
SP
3768}
3769
eb7dd46c
SP
3770static void be_cancel_err_detection(struct be_adapter *adapter)
3771{
3772 if (adapter->flags & BE_FLAGS_ERR_DETECTION_SCHEDULED) {
3773 cancel_delayed_work_sync(&adapter->be_err_detection_work);
3774 adapter->flags &= ~BE_FLAGS_ERR_DETECTION_SCHEDULED;
3775 }
3776}
3777
c5abe7c0 3778#ifdef CONFIG_BE2NET_VXLAN
c9c47142
SP
3779static void be_disable_vxlan_offloads(struct be_adapter *adapter)
3780{
630f4b70
SB
3781 struct net_device *netdev = adapter->netdev;
3782
c9c47142
SP
3783 if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS)
3784 be_cmd_manage_iface(adapter, adapter->if_handle,
3785 OP_CONVERT_TUNNEL_TO_NORMAL);
3786
3787 if (adapter->vxlan_port)
3788 be_cmd_set_vxlan_port(adapter, 0);
3789
3790 adapter->flags &= ~BE_FLAGS_VXLAN_OFFLOADS;
3791 adapter->vxlan_port = 0;
630f4b70
SB
3792
3793 netdev->hw_enc_features = 0;
3794 netdev->hw_features &= ~(NETIF_F_GSO_UDP_TUNNEL);
ac9a3d84 3795 netdev->features &= ~(NETIF_F_GSO_UDP_TUNNEL);
c9c47142 3796}
c5abe7c0 3797#endif
c9c47142 3798
f2858738
VV
3799static u16 be_calculate_vf_qs(struct be_adapter *adapter, u16 num_vfs)
3800{
3801 struct be_resources res = adapter->pool_res;
3802 u16 num_vf_qs = 1;
3803
ee9ad280 3804 /* Distribute the queue resources among the PF and it's VFs
f2858738
VV
3805 * Do not distribute queue resources in multi-channel configuration.
3806 */
3807 if (num_vfs && !be_is_mc(adapter)) {
ee9ad280
SB
3808 /* Divide the qpairs evenly among the VFs and the PF, capped
3809 * at VF-EQ-count. Any remainder qpairs belong to the PF.
3810 */
3811 num_vf_qs = min(SH_VF_MAX_NIC_EQS,
3812 res.max_rss_qs / (num_vfs + 1));
f2858738
VV
3813
3814 /* Skyhawk-R chip supports only MAX_RSS_IFACES RSS capable
3815 * interfaces per port. Provide RSS on VFs, only if number
3816 * of VFs requested is less than MAX_RSS_IFACES limit.
3817 */
3818 if (num_vfs >= MAX_RSS_IFACES)
3819 num_vf_qs = 1;
3820 }
3821 return num_vf_qs;
3822}
3823
b05004ad
SK
3824static int be_clear(struct be_adapter *adapter)
3825{
f2858738
VV
3826 struct pci_dev *pdev = adapter->pdev;
3827 u16 num_vf_qs;
3828
68d7bdcb 3829 be_cancel_worker(adapter);
191eb756 3830
11ac75ed 3831 if (sriov_enabled(adapter))
f9449ab7
SP
3832 be_vf_clear(adapter);
3833
bec84e6b
VV
3834 /* Re-configure FW to distribute resources evenly across max-supported
3835 * number of VFs, only when VFs are not already enabled.
3836 */
ace40aff
VV
3837 if (skyhawk_chip(adapter) && be_physfn(adapter) &&
3838 !pci_vfs_assigned(pdev)) {
f2858738
VV
3839 num_vf_qs = be_calculate_vf_qs(adapter,
3840 pci_sriov_get_totalvfs(pdev));
bec84e6b 3841 be_cmd_set_sriov_config(adapter, adapter->pool_res,
f2858738
VV
3842 pci_sriov_get_totalvfs(pdev),
3843 num_vf_qs);
3844 }
bec84e6b 3845
c5abe7c0 3846#ifdef CONFIG_BE2NET_VXLAN
c9c47142 3847 be_disable_vxlan_offloads(adapter);
c5abe7c0 3848#endif
bcc84140
KA
3849 kfree(adapter->pmac_id);
3850 adapter->pmac_id = NULL;
fbc13f01 3851
f9449ab7 3852 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
a54769f5 3853
7707133c 3854 be_clear_queues(adapter);
a54769f5 3855
10ef9ab4 3856 be_msix_disable(adapter);
e1ad8e33 3857 adapter->flags &= ~BE_FLAGS_SETUP_DONE;
a54769f5
SP
3858 return 0;
3859}
3860
4c876616 3861static int be_vfs_if_create(struct be_adapter *adapter)
abb93951 3862{
92bf14ab 3863 struct be_resources res = {0};
bcc84140 3864 u32 cap_flags, en_flags, vf;
4c876616 3865 struct be_vf_cfg *vf_cfg;
0700d816 3866 int status;
abb93951 3867
0700d816 3868 /* If a FW profile exists, then cap_flags are updated */
4c876616 3869 cap_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
0ed7d749 3870 BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
abb93951 3871
4c876616 3872 for_all_vfs(adapter, vf_cfg, vf) {
92bf14ab
SP
3873 if (!BE3_chip(adapter)) {
3874 status = be_cmd_get_profile_config(adapter, &res,
f2858738 3875 RESOURCE_LIMITS,
92bf14ab 3876 vf + 1);
435452aa 3877 if (!status) {
92bf14ab 3878 cap_flags = res.if_cap_flags;
435452aa
VV
3879 /* Prevent VFs from enabling VLAN promiscuous
3880 * mode
3881 */
3882 cap_flags &= ~BE_IF_FLAGS_VLAN_PROMISCUOUS;
3883 }
92bf14ab 3884 }
4c876616 3885
bcc84140
KA
3886 en_flags = cap_flags & (BE_IF_FLAGS_UNTAGGED |
3887 BE_IF_FLAGS_BROADCAST |
3888 BE_IF_FLAGS_MULTICAST |
3889 BE_IF_FLAGS_PASS_L3L4_ERRORS);
3890 status = be_cmd_if_create(adapter, cap_flags, en_flags,
3891 &vf_cfg->if_handle, vf + 1);
4c876616 3892 if (status)
0700d816 3893 return status;
4c876616 3894 }
0700d816
KA
3895
3896 return 0;
abb93951
PR
3897}
3898
39f1d94d 3899static int be_vf_setup_init(struct be_adapter *adapter)
30128031 3900{
11ac75ed 3901 struct be_vf_cfg *vf_cfg;
30128031
SP
3902 int vf;
3903
39f1d94d
SP
3904 adapter->vf_cfg = kcalloc(adapter->num_vfs, sizeof(*vf_cfg),
3905 GFP_KERNEL);
3906 if (!adapter->vf_cfg)
3907 return -ENOMEM;
3908
11ac75ed
SP
3909 for_all_vfs(adapter, vf_cfg, vf) {
3910 vf_cfg->if_handle = -1;
3911 vf_cfg->pmac_id = -1;
30128031 3912 }
39f1d94d 3913 return 0;
30128031
SP
3914}
3915
f9449ab7
SP
3916static int be_vf_setup(struct be_adapter *adapter)
3917{
c502224e 3918 struct device *dev = &adapter->pdev->dev;
11ac75ed 3919 struct be_vf_cfg *vf_cfg;
4c876616 3920 int status, old_vfs, vf;
e7bcbd7b 3921 bool spoofchk;
39f1d94d 3922
257a3feb 3923 old_vfs = pci_num_vf(adapter->pdev);
39f1d94d
SP
3924
3925 status = be_vf_setup_init(adapter);
3926 if (status)
3927 goto err;
30128031 3928
4c876616
SP
3929 if (old_vfs) {
3930 for_all_vfs(adapter, vf_cfg, vf) {
3931 status = be_cmd_get_if_id(adapter, vf_cfg, vf);
3932 if (status)
3933 goto err;
3934 }
f9449ab7 3935
4c876616
SP
3936 status = be_vfs_mac_query(adapter);
3937 if (status)
3938 goto err;
3939 } else {
bec84e6b
VV
3940 status = be_vfs_if_create(adapter);
3941 if (status)
3942 goto err;
3943
39f1d94d
SP
3944 status = be_vf_eth_addr_config(adapter);
3945 if (status)
3946 goto err;
3947 }
f9449ab7 3948
11ac75ed 3949 for_all_vfs(adapter, vf_cfg, vf) {
04a06028 3950 /* Allow VFs to programs MAC/VLAN filters */
435452aa
VV
3951 status = be_cmd_get_fn_privileges(adapter, &vf_cfg->privileges,
3952 vf + 1);
3953 if (!status && !(vf_cfg->privileges & BE_PRIV_FILTMGMT)) {
04a06028 3954 status = be_cmd_set_fn_privileges(adapter,
435452aa 3955 vf_cfg->privileges |
04a06028
SP
3956 BE_PRIV_FILTMGMT,
3957 vf + 1);
435452aa
VV
3958 if (!status) {
3959 vf_cfg->privileges |= BE_PRIV_FILTMGMT;
04a06028
SP
3960 dev_info(dev, "VF%d has FILTMGMT privilege\n",
3961 vf);
435452aa 3962 }
04a06028
SP
3963 }
3964
0f77ba73
RN
3965 /* Allow full available bandwidth */
3966 if (!old_vfs)
3967 be_cmd_config_qos(adapter, 0, 0, vf + 1);
f1f3ee1b 3968
e7bcbd7b
KA
3969 status = be_cmd_get_hsw_config(adapter, NULL, vf + 1,
3970 vf_cfg->if_handle, NULL,
3971 &spoofchk);
3972 if (!status)
3973 vf_cfg->spoofchk = spoofchk;
3974
bdce2ad7 3975 if (!old_vfs) {
0599863d 3976 be_cmd_enable_vf(adapter, vf + 1);
bdce2ad7
SR
3977 be_cmd_set_logical_link_config(adapter,
3978 IFLA_VF_LINK_STATE_AUTO,
3979 vf+1);
3980 }
f9449ab7 3981 }
b4c1df93
SP
3982
3983 if (!old_vfs) {
3984 status = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
3985 if (status) {
3986 dev_err(dev, "SRIOV enable failed\n");
3987 adapter->num_vfs = 0;
3988 goto err;
3989 }
3990 }
f174c7ec
VV
3991
3992 adapter->flags |= BE_FLAGS_SRIOV_ENABLED;
f9449ab7
SP
3993 return 0;
3994err:
4c876616
SP
3995 dev_err(dev, "VF setup failed\n");
3996 be_vf_clear(adapter);
f9449ab7
SP
3997 return status;
3998}
3999
f93f160b
VV
4000/* Converting function_mode bits on BE3 to SH mc_type enums */
4001
4002static u8 be_convert_mc_type(u32 function_mode)
4003{
66064dbc 4004 if (function_mode & VNIC_MODE && function_mode & QNQ_MODE)
f93f160b 4005 return vNIC1;
66064dbc 4006 else if (function_mode & QNQ_MODE)
f93f160b
VV
4007 return FLEX10;
4008 else if (function_mode & VNIC_MODE)
4009 return vNIC2;
4010 else if (function_mode & UMC_ENABLED)
4011 return UMC;
4012 else
4013 return MC_NONE;
4014}
4015
92bf14ab
SP
4016/* On BE2/BE3 FW does not suggest the supported limits */
4017static void BEx_get_resources(struct be_adapter *adapter,
4018 struct be_resources *res)
4019{
bec84e6b 4020 bool use_sriov = adapter->num_vfs ? 1 : 0;
92bf14ab
SP
4021
4022 if (be_physfn(adapter))
4023 res->max_uc_mac = BE_UC_PMAC_COUNT;
4024 else
4025 res->max_uc_mac = BE_VF_UC_PMAC_COUNT;
4026
f93f160b
VV
4027 adapter->mc_type = be_convert_mc_type(adapter->function_mode);
4028
4029 if (be_is_mc(adapter)) {
4030 /* Assuming that there are 4 channels per port,
4031 * when multi-channel is enabled
4032 */
4033 if (be_is_qnq_mode(adapter))
4034 res->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
4035 else
4036 /* In a non-qnq multichannel mode, the pvid
4037 * takes up one vlan entry
4038 */
4039 res->max_vlans = (BE_NUM_VLANS_SUPPORTED / 4) - 1;
4040 } else {
92bf14ab 4041 res->max_vlans = BE_NUM_VLANS_SUPPORTED;
f93f160b
VV
4042 }
4043
92bf14ab
SP
4044 res->max_mcast_mac = BE_MAX_MC;
4045
a5243dab
VV
4046 /* 1) For BE3 1Gb ports, FW does not support multiple TXQs
4047 * 2) Create multiple TX rings on a BE3-R multi-channel interface
4048 * *only* if it is RSS-capable.
4049 */
4050 if (BE2_chip(adapter) || use_sriov || (adapter->port_num > 1) ||
18c57c74
KA
4051 be_virtfn(adapter) ||
4052 (be_is_mc(adapter) &&
4053 !(adapter->function_caps & BE_FUNCTION_CAPS_RSS))) {
92bf14ab 4054 res->max_tx_qs = 1;
a28277dc
SR
4055 } else if (adapter->function_caps & BE_FUNCTION_CAPS_SUPER_NIC) {
4056 struct be_resources super_nic_res = {0};
4057
4058 /* On a SuperNIC profile, the driver needs to use the
4059 * GET_PROFILE_CONFIG cmd to query the per-function TXQ limits
4060 */
f2858738
VV
4061 be_cmd_get_profile_config(adapter, &super_nic_res,
4062 RESOURCE_LIMITS, 0);
a28277dc
SR
4063 /* Some old versions of BE3 FW don't report max_tx_qs value */
4064 res->max_tx_qs = super_nic_res.max_tx_qs ? : BE3_MAX_TX_QS;
4065 } else {
92bf14ab 4066 res->max_tx_qs = BE3_MAX_TX_QS;
a28277dc 4067 }
92bf14ab
SP
4068
4069 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
4070 !use_sriov && be_physfn(adapter))
4071 res->max_rss_qs = (adapter->be3_native) ?
4072 BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
4073 res->max_rx_qs = res->max_rss_qs + 1;
4074
e3dc867c 4075 if (be_physfn(adapter))
d3518e21 4076 res->max_evt_qs = (be_max_vfs(adapter) > 0) ?
e3dc867c
SR
4077 BE3_SRIOV_MAX_EVT_QS : BE3_MAX_EVT_QS;
4078 else
4079 res->max_evt_qs = 1;
92bf14ab
SP
4080
4081 res->if_cap_flags = BE_IF_CAP_FLAGS_WANT;
71bb8bd0 4082 res->if_cap_flags &= ~BE_IF_FLAGS_DEFQ_RSS;
92bf14ab
SP
4083 if (!(adapter->function_caps & BE_FUNCTION_CAPS_RSS))
4084 res->if_cap_flags &= ~BE_IF_FLAGS_RSS;
4085}
4086
30128031
SP
4087static void be_setup_init(struct be_adapter *adapter)
4088{
4089 adapter->vlan_prio_bmap = 0xff;
42f11cf2 4090 adapter->phy.link_speed = -1;
30128031
SP
4091 adapter->if_handle = -1;
4092 adapter->be3_native = false;
f66b7cfd 4093 adapter->if_flags = 0;
51d1f98a 4094 adapter->phy_state = BE_UNKNOWN_PHY_STATE;
f25b119c
PR
4095 if (be_physfn(adapter))
4096 adapter->cmd_privileges = MAX_PRIVILEGES;
4097 else
4098 adapter->cmd_privileges = MIN_PRIVILEGES;
30128031
SP
4099}
4100
bec84e6b
VV
4101static int be_get_sriov_config(struct be_adapter *adapter)
4102{
bec84e6b 4103 struct be_resources res = {0};
d3d18312 4104 int max_vfs, old_vfs;
bec84e6b 4105
f2858738 4106 be_cmd_get_profile_config(adapter, &res, RESOURCE_LIMITS, 0);
d3d18312 4107
ace40aff 4108 /* Some old versions of BE3 FW don't report max_vfs value */
bec84e6b
VV
4109 if (BE3_chip(adapter) && !res.max_vfs) {
4110 max_vfs = pci_sriov_get_totalvfs(adapter->pdev);
4111 res.max_vfs = max_vfs > 0 ? min(MAX_VFS, max_vfs) : 0;
4112 }
4113
d3d18312 4114 adapter->pool_res = res;
bec84e6b 4115
ace40aff
VV
4116 /* If during previous unload of the driver, the VFs were not disabled,
4117 * then we cannot rely on the PF POOL limits for the TotalVFs value.
4118 * Instead use the TotalVFs value stored in the pci-dev struct.
4119 */
bec84e6b
VV
4120 old_vfs = pci_num_vf(adapter->pdev);
4121 if (old_vfs) {
ace40aff
VV
4122 dev_info(&adapter->pdev->dev, "%d VFs are already enabled\n",
4123 old_vfs);
4124
4125 adapter->pool_res.max_vfs =
4126 pci_sriov_get_totalvfs(adapter->pdev);
bec84e6b 4127 adapter->num_vfs = old_vfs;
bec84e6b
VV
4128 }
4129
4130 return 0;
4131}
4132
ace40aff
VV
4133static void be_alloc_sriov_res(struct be_adapter *adapter)
4134{
4135 int old_vfs = pci_num_vf(adapter->pdev);
4136 u16 num_vf_qs;
4137 int status;
4138
4139 be_get_sriov_config(adapter);
4140
4141 if (!old_vfs)
4142 pci_sriov_set_totalvfs(adapter->pdev, be_max_vfs(adapter));
4143
4144 /* When the HW is in SRIOV capable configuration, the PF-pool
4145 * resources are given to PF during driver load, if there are no
4146 * old VFs. This facility is not available in BE3 FW.
4147 * Also, this is done by FW in Lancer chip.
4148 */
4149 if (skyhawk_chip(adapter) && be_max_vfs(adapter) && !old_vfs) {
4150 num_vf_qs = be_calculate_vf_qs(adapter, 0);
4151 status = be_cmd_set_sriov_config(adapter, adapter->pool_res, 0,
4152 num_vf_qs);
4153 if (status)
4154 dev_err(&adapter->pdev->dev,
4155 "Failed to optimize SRIOV resources\n");
4156 }
4157}
4158
92bf14ab 4159static int be_get_resources(struct be_adapter *adapter)
abb93951 4160{
92bf14ab
SP
4161 struct device *dev = &adapter->pdev->dev;
4162 struct be_resources res = {0};
4163 int status;
abb93951 4164
92bf14ab
SP
4165 if (BEx_chip(adapter)) {
4166 BEx_get_resources(adapter, &res);
4167 adapter->res = res;
abb93951
PR
4168 }
4169
92bf14ab
SP
4170 /* For Lancer, SH etc read per-function resource limits from FW.
4171 * GET_FUNC_CONFIG returns per function guaranteed limits.
4172 * GET_PROFILE_CONFIG returns PCI-E related limits PF-pool limits
4173 */
4174 if (!BEx_chip(adapter)) {
4175 status = be_cmd_get_func_config(adapter, &res);
4176 if (status)
4177 return status;
abb93951 4178
71bb8bd0
VV
4179 /* If a deafault RXQ must be created, we'll use up one RSSQ*/
4180 if (res.max_rss_qs && res.max_rss_qs == res.max_rx_qs &&
4181 !(res.if_cap_flags & BE_IF_FLAGS_DEFQ_RSS))
4182 res.max_rss_qs -= 1;
4183
92bf14ab
SP
4184 /* If RoCE may be enabled stash away half the EQs for RoCE */
4185 if (be_roce_supported(adapter))
4186 res.max_evt_qs /= 2;
4187 adapter->res = res;
abb93951 4188 }
4c876616 4189
71bb8bd0
VV
4190 /* If FW supports RSS default queue, then skip creating non-RSS
4191 * queue for non-IP traffic.
4192 */
4193 adapter->need_def_rxq = (be_if_cap_flags(adapter) &
4194 BE_IF_FLAGS_DEFQ_RSS) ? 0 : 1;
4195
acbafeb1
SP
4196 dev_info(dev, "Max: txqs %d, rxqs %d, rss %d, eqs %d, vfs %d\n",
4197 be_max_txqs(adapter), be_max_rxqs(adapter),
4198 be_max_rss(adapter), be_max_eqs(adapter),
4199 be_max_vfs(adapter));
4200 dev_info(dev, "Max: uc-macs %d, mc-macs %d, vlans %d\n",
4201 be_max_uc(adapter), be_max_mc(adapter),
4202 be_max_vlans(adapter));
4203
ace40aff
VV
4204 /* Sanitize cfg_num_qs based on HW and platform limits */
4205 adapter->cfg_num_qs = min_t(u16, netif_get_num_default_rss_queues(),
4206 be_max_qs(adapter));
92bf14ab 4207 return 0;
abb93951
PR
4208}
4209
39f1d94d
SP
4210static int be_get_config(struct be_adapter *adapter)
4211{
6b085ba9 4212 int status, level;
542963b7 4213 u16 profile_id;
6b085ba9 4214
980df249
SR
4215 status = be_cmd_get_cntl_attributes(adapter);
4216 if (status)
4217 return status;
4218
e97e3cda 4219 status = be_cmd_query_fw_cfg(adapter);
abb93951 4220 if (status)
92bf14ab 4221 return status;
abb93951 4222
fd7ff6f0
VD
4223 if (!lancer_chip(adapter) && be_physfn(adapter))
4224 be_cmd_get_fat_dump_len(adapter, &adapter->fat_dump_len);
4225
6b085ba9
SP
4226 if (BEx_chip(adapter)) {
4227 level = be_cmd_get_fw_log_level(adapter);
4228 adapter->msg_enable =
4229 level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0;
4230 }
4231
4232 be_cmd_get_acpi_wol_cap(adapter);
4233
21252377
VV
4234 be_cmd_query_port_name(adapter);
4235
4236 if (be_physfn(adapter)) {
542963b7
VV
4237 status = be_cmd_get_active_profile(adapter, &profile_id);
4238 if (!status)
4239 dev_info(&adapter->pdev->dev,
4240 "Using profile 0x%x\n", profile_id);
962bcb75 4241 }
bec84e6b 4242
92bf14ab
SP
4243 status = be_get_resources(adapter);
4244 if (status)
4245 return status;
abb93951 4246
46ee9c14
RN
4247 adapter->pmac_id = kcalloc(be_max_uc(adapter),
4248 sizeof(*adapter->pmac_id), GFP_KERNEL);
92bf14ab
SP
4249 if (!adapter->pmac_id)
4250 return -ENOMEM;
abb93951 4251
92bf14ab 4252 return 0;
39f1d94d
SP
4253}
4254
95046b92
SP
4255static int be_mac_setup(struct be_adapter *adapter)
4256{
4257 u8 mac[ETH_ALEN];
4258 int status;
4259
4260 if (is_zero_ether_addr(adapter->netdev->dev_addr)) {
4261 status = be_cmd_get_perm_mac(adapter, mac);
4262 if (status)
4263 return status;
4264
4265 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
4266 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
95046b92
SP
4267 }
4268
95046b92
SP
4269 return 0;
4270}
4271
68d7bdcb
SP
4272static void be_schedule_worker(struct be_adapter *adapter)
4273{
4274 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
4275 adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
4276}
4277
972f37b4 4278static void be_schedule_err_detection(struct be_adapter *adapter, u32 delay)
eb7dd46c
SP
4279{
4280 schedule_delayed_work(&adapter->be_err_detection_work,
972f37b4 4281 msecs_to_jiffies(delay));
eb7dd46c
SP
4282 adapter->flags |= BE_FLAGS_ERR_DETECTION_SCHEDULED;
4283}
4284
7707133c 4285static int be_setup_queues(struct be_adapter *adapter)
5fb379ee 4286{
68d7bdcb 4287 struct net_device *netdev = adapter->netdev;
10ef9ab4 4288 int status;
ba343c77 4289
7707133c 4290 status = be_evt_queues_create(adapter);
abb93951
PR
4291 if (status)
4292 goto err;
73d540f2 4293
7707133c 4294 status = be_tx_qs_create(adapter);
c2bba3df
SK
4295 if (status)
4296 goto err;
10ef9ab4 4297
7707133c 4298 status = be_rx_cqs_create(adapter);
10ef9ab4 4299 if (status)
a54769f5 4300 goto err;
6b7c5b94 4301
7707133c 4302 status = be_mcc_queues_create(adapter);
10ef9ab4
SP
4303 if (status)
4304 goto err;
4305
68d7bdcb
SP
4306 status = netif_set_real_num_rx_queues(netdev, adapter->num_rx_qs);
4307 if (status)
4308 goto err;
4309
4310 status = netif_set_real_num_tx_queues(netdev, adapter->num_tx_qs);
4311 if (status)
4312 goto err;
4313
7707133c
SP
4314 return 0;
4315err:
4316 dev_err(&adapter->pdev->dev, "queue_setup failed\n");
4317 return status;
4318}
4319
62219066
AK
4320static int be_if_create(struct be_adapter *adapter)
4321{
4322 u32 en_flags = BE_IF_FLAGS_RSS | BE_IF_FLAGS_DEFQ_RSS;
4323 u32 cap_flags = be_if_cap_flags(adapter);
4324 int status;
4325
4326 if (adapter->cfg_num_qs == 1)
4327 cap_flags &= ~(BE_IF_FLAGS_DEFQ_RSS | BE_IF_FLAGS_RSS);
4328
4329 en_flags &= cap_flags;
4330 /* will enable all the needed filter flags in be_open() */
4331 status = be_cmd_if_create(adapter, be_if_cap_flags(adapter), en_flags,
4332 &adapter->if_handle, 0);
4333
4334 return status;
4335}
4336
68d7bdcb
SP
4337int be_update_queues(struct be_adapter *adapter)
4338{
4339 struct net_device *netdev = adapter->netdev;
4340 int status;
4341
4342 if (netif_running(netdev))
4343 be_close(netdev);
4344
4345 be_cancel_worker(adapter);
4346
4347 /* If any vectors have been shared with RoCE we cannot re-program
4348 * the MSIx table.
4349 */
4350 if (!adapter->num_msix_roce_vec)
4351 be_msix_disable(adapter);
4352
4353 be_clear_queues(adapter);
62219066
AK
4354 status = be_cmd_if_destroy(adapter, adapter->if_handle, 0);
4355 if (status)
4356 return status;
68d7bdcb
SP
4357
4358 if (!msix_enabled(adapter)) {
4359 status = be_msix_enable(adapter);
4360 if (status)
4361 return status;
4362 }
4363
62219066
AK
4364 status = be_if_create(adapter);
4365 if (status)
4366 return status;
4367
68d7bdcb
SP
4368 status = be_setup_queues(adapter);
4369 if (status)
4370 return status;
4371
4372 be_schedule_worker(adapter);
4373
4374 if (netif_running(netdev))
4375 status = be_open(netdev);
4376
4377 return status;
4378}
4379
f7062ee5
SP
4380static inline int fw_major_num(const char *fw_ver)
4381{
4382 int fw_major = 0, i;
4383
4384 i = sscanf(fw_ver, "%d.", &fw_major);
4385 if (i != 1)
4386 return 0;
4387
4388 return fw_major;
4389}
4390
f962f840
SP
4391/* If any VFs are already enabled don't FLR the PF */
4392static bool be_reset_required(struct be_adapter *adapter)
4393{
4394 return pci_num_vf(adapter->pdev) ? false : true;
4395}
4396
4397/* Wait for the FW to be ready and perform the required initialization */
4398static int be_func_init(struct be_adapter *adapter)
4399{
4400 int status;
4401
4402 status = be_fw_wait_ready(adapter);
4403 if (status)
4404 return status;
4405
4406 if (be_reset_required(adapter)) {
4407 status = be_cmd_reset_function(adapter);
4408 if (status)
4409 return status;
4410
4411 /* Wait for interrupts to quiesce after an FLR */
4412 msleep(100);
4413
4414 /* We can clear all errors when function reset succeeds */
954f6825 4415 be_clear_error(adapter, BE_CLEAR_ALL);
f962f840
SP
4416 }
4417
4418 /* Tell FW we're ready to fire cmds */
4419 status = be_cmd_fw_init(adapter);
4420 if (status)
4421 return status;
4422
4423 /* Allow interrupts for other ULPs running on NIC function */
4424 be_intr_set(adapter, true);
4425
4426 return 0;
4427}
4428
7707133c
SP
4429static int be_setup(struct be_adapter *adapter)
4430{
4431 struct device *dev = &adapter->pdev->dev;
7707133c
SP
4432 int status;
4433
f962f840
SP
4434 status = be_func_init(adapter);
4435 if (status)
4436 return status;
4437
7707133c
SP
4438 be_setup_init(adapter);
4439
4440 if (!lancer_chip(adapter))
4441 be_cmd_req_native_mode(adapter);
4442
980df249
SR
4443 /* invoke this cmd first to get pf_num and vf_num which are needed
4444 * for issuing profile related cmds
4445 */
4446 if (!BEx_chip(adapter)) {
4447 status = be_cmd_get_func_config(adapter, NULL);
4448 if (status)
4449 return status;
4450 }
72ef3a88 4451
ace40aff
VV
4452 if (!BE2_chip(adapter) && be_physfn(adapter))
4453 be_alloc_sriov_res(adapter);
4454
7707133c 4455 status = be_get_config(adapter);
10ef9ab4 4456 if (status)
a54769f5 4457 goto err;
6b7c5b94 4458
7707133c 4459 status = be_msix_enable(adapter);
10ef9ab4 4460 if (status)
a54769f5 4461 goto err;
6b7c5b94 4462
bcc84140 4463 /* will enable all the needed filter flags in be_open() */
62219066 4464 status = be_if_create(adapter);
7707133c 4465 if (status)
a54769f5 4466 goto err;
6b7c5b94 4467
68d7bdcb
SP
4468 /* Updating real_num_tx/rx_queues() requires rtnl_lock() */
4469 rtnl_lock();
7707133c 4470 status = be_setup_queues(adapter);
68d7bdcb 4471 rtnl_unlock();
95046b92 4472 if (status)
1578e777
PR
4473 goto err;
4474
7707133c 4475 be_cmd_get_fn_privileges(adapter, &adapter->cmd_privileges, 0);
7707133c
SP
4476
4477 status = be_mac_setup(adapter);
10ef9ab4
SP
4478 if (status)
4479 goto err;
4480
e97e3cda 4481 be_cmd_get_fw_ver(adapter);
acbafeb1 4482 dev_info(dev, "FW version is %s\n", adapter->fw_ver);
5a56eb10 4483
e9e2a904 4484 if (BE2_chip(adapter) && fw_major_num(adapter->fw_ver) < 4) {
50762667 4485 dev_err(dev, "Firmware on card is old(%s), IRQs may not work",
e9e2a904
SK
4486 adapter->fw_ver);
4487 dev_err(dev, "Please upgrade firmware to version >= 4.0\n");
4488 }
4489
00d594c3
KA
4490 status = be_cmd_set_flow_control(adapter, adapter->tx_fc,
4491 adapter->rx_fc);
4492 if (status)
4493 be_cmd_get_flow_control(adapter, &adapter->tx_fc,
4494 &adapter->rx_fc);
590c391d 4495
00d594c3
KA
4496 dev_info(&adapter->pdev->dev, "HW Flow control - TX:%d RX:%d\n",
4497 adapter->tx_fc, adapter->rx_fc);
2dc1deb6 4498
bdce2ad7
SR
4499 if (be_physfn(adapter))
4500 be_cmd_set_logical_link_config(adapter,
4501 IFLA_VF_LINK_STATE_AUTO, 0);
4502
bec84e6b
VV
4503 if (adapter->num_vfs)
4504 be_vf_setup(adapter);
f9449ab7 4505
f25b119c
PR
4506 status = be_cmd_get_phy_info(adapter);
4507 if (!status && be_pause_supported(adapter))
42f11cf2
AK
4508 adapter->phy.fc_autoneg = 1;
4509
68d7bdcb 4510 be_schedule_worker(adapter);
e1ad8e33 4511 adapter->flags |= BE_FLAGS_SETUP_DONE;
f9449ab7 4512 return 0;
a54769f5
SP
4513err:
4514 be_clear(adapter);
4515 return status;
4516}
6b7c5b94 4517
66268739
IV
4518#ifdef CONFIG_NET_POLL_CONTROLLER
4519static void be_netpoll(struct net_device *netdev)
4520{
4521 struct be_adapter *adapter = netdev_priv(netdev);
10ef9ab4 4522 struct be_eq_obj *eqo;
66268739
IV
4523 int i;
4524
e49cc34f 4525 for_all_evt_queues(adapter, eqo, i) {
20947770 4526 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0, 0);
e49cc34f
SP
4527 napi_schedule(&eqo->napi);
4528 }
66268739
IV
4529}
4530#endif
4531
485bf569
SN
4532int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
4533{
4534 const struct firmware *fw;
4535 int status;
4536
4537 if (!netif_running(adapter->netdev)) {
4538 dev_err(&adapter->pdev->dev,
4539 "Firmware load not allowed (interface is down)\n");
940a3fcd 4540 return -ENETDOWN;
485bf569
SN
4541 }
4542
4543 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
4544 if (status)
4545 goto fw_exit;
4546
4547 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
4548
4549 if (lancer_chip(adapter))
4550 status = lancer_fw_download(adapter, fw);
4551 else
4552 status = be_fw_download(adapter, fw);
4553
eeb65ced 4554 if (!status)
e97e3cda 4555 be_cmd_get_fw_ver(adapter);
eeb65ced 4556
84517482
AK
4557fw_exit:
4558 release_firmware(fw);
4559 return status;
4560}
4561
add511b3
RP
4562static int be_ndo_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
4563 u16 flags)
a77dcb8c
AK
4564{
4565 struct be_adapter *adapter = netdev_priv(dev);
4566 struct nlattr *attr, *br_spec;
4567 int rem;
4568 int status = 0;
4569 u16 mode = 0;
4570
4571 if (!sriov_enabled(adapter))
4572 return -EOPNOTSUPP;
4573
4574 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
4ea85e83
TG
4575 if (!br_spec)
4576 return -EINVAL;
a77dcb8c
AK
4577
4578 nla_for_each_nested(attr, br_spec, rem) {
4579 if (nla_type(attr) != IFLA_BRIDGE_MODE)
4580 continue;
4581
b7c1a314
TG
4582 if (nla_len(attr) < sizeof(mode))
4583 return -EINVAL;
4584
a77dcb8c 4585 mode = nla_get_u16(attr);
ac0f5fba
SR
4586 if (BE3_chip(adapter) && mode == BRIDGE_MODE_VEPA)
4587 return -EOPNOTSUPP;
4588
a77dcb8c
AK
4589 if (mode != BRIDGE_MODE_VEPA && mode != BRIDGE_MODE_VEB)
4590 return -EINVAL;
4591
4592 status = be_cmd_set_hsw_config(adapter, 0, 0,
4593 adapter->if_handle,
4594 mode == BRIDGE_MODE_VEPA ?
4595 PORT_FWD_TYPE_VEPA :
e7bcbd7b 4596 PORT_FWD_TYPE_VEB, 0);
a77dcb8c
AK
4597 if (status)
4598 goto err;
4599
4600 dev_info(&adapter->pdev->dev, "enabled switch mode: %s\n",
4601 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
4602
4603 return status;
4604 }
4605err:
4606 dev_err(&adapter->pdev->dev, "Failed to set switch mode %s\n",
4607 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
4608
4609 return status;
4610}
4611
4612static int be_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
46c264da
ND
4613 struct net_device *dev, u32 filter_mask,
4614 int nlflags)
a77dcb8c
AK
4615{
4616 struct be_adapter *adapter = netdev_priv(dev);
4617 int status = 0;
4618 u8 hsw_mode;
4619
a77dcb8c
AK
4620 /* BE and Lancer chips support VEB mode only */
4621 if (BEx_chip(adapter) || lancer_chip(adapter)) {
8431706b
IV
4622 /* VEB is disabled in non-SR-IOV profiles on BE3/Lancer */
4623 if (!pci_sriov_get_totalvfs(adapter->pdev))
4624 return 0;
a77dcb8c
AK
4625 hsw_mode = PORT_FWD_TYPE_VEB;
4626 } else {
4627 status = be_cmd_get_hsw_config(adapter, NULL, 0,
e7bcbd7b
KA
4628 adapter->if_handle, &hsw_mode,
4629 NULL);
a77dcb8c
AK
4630 if (status)
4631 return 0;
ff9ed19d
KP
4632
4633 if (hsw_mode == PORT_FWD_TYPE_PASSTHRU)
4634 return 0;
a77dcb8c
AK
4635 }
4636
4637 return ndo_dflt_bridge_getlink(skb, pid, seq, dev,
4638 hsw_mode == PORT_FWD_TYPE_VEPA ?
2c3c031c 4639 BRIDGE_MODE_VEPA : BRIDGE_MODE_VEB,
7d4f8d87 4640 0, 0, nlflags, filter_mask, NULL);
a77dcb8c
AK
4641}
4642
c5abe7c0 4643#ifdef CONFIG_BE2NET_VXLAN
630f4b70
SB
4644/* VxLAN offload Notes:
4645 *
4646 * The stack defines tunnel offload flags (hw_enc_features) for IP and doesn't
4647 * distinguish various types of transports (VxLAN, GRE, NVGRE ..). So, offload
4648 * is expected to work across all types of IP tunnels once exported. Skyhawk
4649 * supports offloads for either VxLAN or NVGRE, exclusively. So we export VxLAN
16dde0d6
SB
4650 * offloads in hw_enc_features only when a VxLAN port is added. If other (non
4651 * VxLAN) tunnels are configured while VxLAN offloads are enabled, offloads for
4652 * those other tunnels are unexported on the fly through ndo_features_check().
630f4b70
SB
4653 *
4654 * Skyhawk supports VxLAN offloads only for one UDP dport. So, if the stack
4655 * adds more than one port, disable offloads and don't re-enable them again
4656 * until after all the tunnels are removed.
4657 */
c9c47142
SP
4658static void be_add_vxlan_port(struct net_device *netdev, sa_family_t sa_family,
4659 __be16 port)
4660{
4661 struct be_adapter *adapter = netdev_priv(netdev);
4662 struct device *dev = &adapter->pdev->dev;
4663 int status;
4664
af19e686 4665 if (lancer_chip(adapter) || BEx_chip(adapter) || be_is_mc(adapter))
c9c47142
SP
4666 return;
4667
1e5b311a
JB
4668 if (adapter->vxlan_port == port && adapter->vxlan_port_count) {
4669 adapter->vxlan_port_aliases++;
4670 return;
4671 }
4672
c9c47142 4673 if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS) {
c9c47142
SP
4674 dev_info(dev,
4675 "Only one UDP port supported for VxLAN offloads\n");
630f4b70
SB
4676 dev_info(dev, "Disabling VxLAN offloads\n");
4677 adapter->vxlan_port_count++;
4678 goto err;
c9c47142
SP
4679 }
4680
630f4b70
SB
4681 if (adapter->vxlan_port_count++ >= 1)
4682 return;
4683
c9c47142
SP
4684 status = be_cmd_manage_iface(adapter, adapter->if_handle,
4685 OP_CONVERT_NORMAL_TO_TUNNEL);
4686 if (status) {
4687 dev_warn(dev, "Failed to convert normal interface to tunnel\n");
4688 goto err;
4689 }
4690
4691 status = be_cmd_set_vxlan_port(adapter, port);
4692 if (status) {
4693 dev_warn(dev, "Failed to add VxLAN port\n");
4694 goto err;
4695 }
4696 adapter->flags |= BE_FLAGS_VXLAN_OFFLOADS;
4697 adapter->vxlan_port = port;
4698
630f4b70
SB
4699 netdev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
4700 NETIF_F_TSO | NETIF_F_TSO6 |
4701 NETIF_F_GSO_UDP_TUNNEL;
4702 netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL;
ac9a3d84 4703 netdev->features |= NETIF_F_GSO_UDP_TUNNEL;
630f4b70 4704
c9c47142
SP
4705 dev_info(dev, "Enabled VxLAN offloads for UDP port %d\n",
4706 be16_to_cpu(port));
4707 return;
4708err:
4709 be_disable_vxlan_offloads(adapter);
c9c47142
SP
4710}
4711
4712static void be_del_vxlan_port(struct net_device *netdev, sa_family_t sa_family,
4713 __be16 port)
4714{
4715 struct be_adapter *adapter = netdev_priv(netdev);
4716
af19e686 4717 if (lancer_chip(adapter) || BEx_chip(adapter) || be_is_mc(adapter))
c9c47142
SP
4718 return;
4719
4720 if (adapter->vxlan_port != port)
630f4b70 4721 goto done;
c9c47142 4722
1e5b311a
JB
4723 if (adapter->vxlan_port_aliases) {
4724 adapter->vxlan_port_aliases--;
4725 return;
4726 }
4727
c9c47142
SP
4728 be_disable_vxlan_offloads(adapter);
4729
4730 dev_info(&adapter->pdev->dev,
4731 "Disabled VxLAN offloads for UDP port %d\n",
4732 be16_to_cpu(port));
630f4b70
SB
4733done:
4734 adapter->vxlan_port_count--;
c9c47142 4735}
725d548f 4736
5f35227e
JG
4737static netdev_features_t be_features_check(struct sk_buff *skb,
4738 struct net_device *dev,
4739 netdev_features_t features)
725d548f 4740{
16dde0d6
SB
4741 struct be_adapter *adapter = netdev_priv(dev);
4742 u8 l4_hdr = 0;
4743
4744 /* The code below restricts offload features for some tunneled packets.
4745 * Offload features for normal (non tunnel) packets are unchanged.
4746 */
4747 if (!skb->encapsulation ||
4748 !(adapter->flags & BE_FLAGS_VXLAN_OFFLOADS))
4749 return features;
4750
4751 /* It's an encapsulated packet and VxLAN offloads are enabled. We
4752 * should disable tunnel offload features if it's not a VxLAN packet,
4753 * as tunnel offloads have been enabled only for VxLAN. This is done to
4754 * allow other tunneled traffic like GRE work fine while VxLAN
4755 * offloads are configured in Skyhawk-R.
4756 */
4757 switch (vlan_get_protocol(skb)) {
4758 case htons(ETH_P_IP):
4759 l4_hdr = ip_hdr(skb)->protocol;
4760 break;
4761 case htons(ETH_P_IPV6):
4762 l4_hdr = ipv6_hdr(skb)->nexthdr;
4763 break;
4764 default:
4765 return features;
4766 }
4767
4768 if (l4_hdr != IPPROTO_UDP ||
4769 skb->inner_protocol_type != ENCAP_TYPE_ETHER ||
4770 skb->inner_protocol != htons(ETH_P_TEB) ||
4771 skb_inner_mac_header(skb) - skb_transport_header(skb) !=
4772 sizeof(struct udphdr) + sizeof(struct vxlanhdr))
a188222b 4773 return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
16dde0d6
SB
4774
4775 return features;
725d548f 4776}
c5abe7c0 4777#endif
c9c47142 4778
a155a5db
SB
4779static int be_get_phys_port_id(struct net_device *dev,
4780 struct netdev_phys_item_id *ppid)
4781{
4782 int i, id_len = CNTL_SERIAL_NUM_WORDS * CNTL_SERIAL_NUM_WORD_SZ + 1;
4783 struct be_adapter *adapter = netdev_priv(dev);
4784 u8 *id;
4785
4786 if (MAX_PHYS_ITEM_ID_LEN < id_len)
4787 return -ENOSPC;
4788
4789 ppid->id[0] = adapter->hba_port_num + 1;
4790 id = &ppid->id[1];
4791 for (i = CNTL_SERIAL_NUM_WORDS - 1; i >= 0;
4792 i--, id += CNTL_SERIAL_NUM_WORD_SZ)
4793 memcpy(id, &adapter->serial_num[i], CNTL_SERIAL_NUM_WORD_SZ);
4794
4795 ppid->id_len = id_len;
4796
4797 return 0;
4798}
4799
e5686ad8 4800static const struct net_device_ops be_netdev_ops = {
6b7c5b94
SP
4801 .ndo_open = be_open,
4802 .ndo_stop = be_close,
4803 .ndo_start_xmit = be_xmit,
a54769f5 4804 .ndo_set_rx_mode = be_set_rx_mode,
6b7c5b94
SP
4805 .ndo_set_mac_address = be_mac_addr_set,
4806 .ndo_change_mtu = be_change_mtu,
ab1594e9 4807 .ndo_get_stats64 = be_get_stats64,
6b7c5b94 4808 .ndo_validate_addr = eth_validate_addr,
6b7c5b94
SP
4809 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
4810 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
64600ea5 4811 .ndo_set_vf_mac = be_set_vf_mac,
1da87b7f 4812 .ndo_set_vf_vlan = be_set_vf_vlan,
ed616689 4813 .ndo_set_vf_rate = be_set_vf_tx_rate,
66268739 4814 .ndo_get_vf_config = be_get_vf_config,
bdce2ad7 4815 .ndo_set_vf_link_state = be_set_vf_link_state,
e7bcbd7b 4816 .ndo_set_vf_spoofchk = be_set_vf_spoofchk,
66268739
IV
4817#ifdef CONFIG_NET_POLL_CONTROLLER
4818 .ndo_poll_controller = be_netpoll,
4819#endif
a77dcb8c
AK
4820 .ndo_bridge_setlink = be_ndo_bridge_setlink,
4821 .ndo_bridge_getlink = be_ndo_bridge_getlink,
6384a4d0 4822#ifdef CONFIG_NET_RX_BUSY_POLL
c9c47142 4823 .ndo_busy_poll = be_busy_poll,
6384a4d0 4824#endif
c5abe7c0 4825#ifdef CONFIG_BE2NET_VXLAN
c9c47142
SP
4826 .ndo_add_vxlan_port = be_add_vxlan_port,
4827 .ndo_del_vxlan_port = be_del_vxlan_port,
5f35227e 4828 .ndo_features_check = be_features_check,
c5abe7c0 4829#endif
a155a5db 4830 .ndo_get_phys_port_id = be_get_phys_port_id,
6b7c5b94
SP
4831};
4832
4833static void be_netdev_init(struct net_device *netdev)
4834{
4835 struct be_adapter *adapter = netdev_priv(netdev);
4836
6332c8d3 4837 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
8b8ddc68 4838 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
f646968f 4839 NETIF_F_HW_VLAN_CTAG_TX;
62219066 4840 if ((be_if_cap_flags(adapter) & BE_IF_FLAGS_RSS))
8b8ddc68 4841 netdev->hw_features |= NETIF_F_RXHASH;
6332c8d3
MM
4842
4843 netdev->features |= netdev->hw_features |
f646968f 4844 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER;
4b972914 4845
eb8a50d9 4846 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
79032644 4847 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
51c59870 4848
fbc13f01
AK
4849 netdev->priv_flags |= IFF_UNICAST_FLT;
4850
6b7c5b94
SP
4851 netdev->flags |= IFF_MULTICAST;
4852
b7e5887e 4853 netif_set_gso_max_size(netdev, 65535 - ETH_HLEN);
c190e3c8 4854
10ef9ab4 4855 netdev->netdev_ops = &be_netdev_ops;
6b7c5b94 4856
7ad24ea4 4857 netdev->ethtool_ops = &be_ethtool_ops;
6b7c5b94
SP
4858}
4859
87ac1a52
KA
4860static void be_cleanup(struct be_adapter *adapter)
4861{
4862 struct net_device *netdev = adapter->netdev;
4863
4864 rtnl_lock();
4865 netif_device_detach(netdev);
4866 if (netif_running(netdev))
4867 be_close(netdev);
4868 rtnl_unlock();
4869
4870 be_clear(adapter);
4871}
4872
484d76fd 4873static int be_resume(struct be_adapter *adapter)
78fad34e 4874{
d0e1b319 4875 struct net_device *netdev = adapter->netdev;
78fad34e
SP
4876 int status;
4877
78fad34e
SP
4878 status = be_setup(adapter);
4879 if (status)
484d76fd 4880 return status;
78fad34e 4881
d0e1b319
KA
4882 if (netif_running(netdev)) {
4883 status = be_open(netdev);
78fad34e 4884 if (status)
484d76fd 4885 return status;
78fad34e
SP
4886 }
4887
d0e1b319
KA
4888 netif_device_attach(netdev);
4889
484d76fd
KA
4890 return 0;
4891}
4892
4893static int be_err_recover(struct be_adapter *adapter)
4894{
484d76fd
KA
4895 int status;
4896
1babbad4
PR
4897 /* Error recovery is supported only Lancer as of now */
4898 if (!lancer_chip(adapter))
4899 return -EIO;
4900
4901 /* Wait for adapter to reach quiescent state before
4902 * destroying queues
4903 */
4904 status = be_fw_wait_ready(adapter);
4905 if (status)
4906 goto err;
4907
4908 be_cleanup(adapter);
4909
484d76fd
KA
4910 status = be_resume(adapter);
4911 if (status)
4912 goto err;
4913
78fad34e
SP
4914 return 0;
4915err:
78fad34e
SP
4916 return status;
4917}
4918
eb7dd46c 4919static void be_err_detection_task(struct work_struct *work)
78fad34e
SP
4920{
4921 struct be_adapter *adapter =
eb7dd46c
SP
4922 container_of(work, struct be_adapter,
4923 be_err_detection_work.work);
1babbad4
PR
4924 struct device *dev = &adapter->pdev->dev;
4925 int recovery_status;
972f37b4 4926 int delay = ERR_DETECTION_DELAY;
78fad34e
SP
4927
4928 be_detect_error(adapter);
4929
1babbad4
PR
4930 if (be_check_error(adapter, BE_ERROR_HW))
4931 recovery_status = be_err_recover(adapter);
4932 else
4933 goto reschedule_task;
4934
4935 if (!recovery_status) {
972f37b4 4936 adapter->recovery_retries = 0;
1babbad4
PR
4937 dev_info(dev, "Adapter recovery successful\n");
4938 goto reschedule_task;
4939 } else if (be_virtfn(adapter)) {
4940 /* For VFs, check if PF have allocated resources
4941 * every second.
4942 */
4943 dev_err(dev, "Re-trying adapter recovery\n");
4944 goto reschedule_task;
972f37b4
PR
4945 } else if (adapter->recovery_retries++ <
4946 MAX_ERR_RECOVERY_RETRY_COUNT) {
4947 /* In case of another error during recovery, it takes 30 sec
4948 * for adapter to come out of error. Retry error recovery after
4949 * this time interval.
4950 */
4951 dev_err(&adapter->pdev->dev, "Re-trying adapter recovery\n");
4952 delay = ERR_RECOVERY_RETRY_DELAY;
4953 goto reschedule_task;
1babbad4
PR
4954 } else {
4955 dev_err(dev, "Adapter recovery failed\n");
78fad34e
SP
4956 }
4957
1babbad4
PR
4958 return;
4959reschedule_task:
972f37b4 4960 be_schedule_err_detection(adapter, delay);
78fad34e
SP
4961}
4962
4963static void be_log_sfp_info(struct be_adapter *adapter)
4964{
4965 int status;
4966
4967 status = be_cmd_query_sfp_info(adapter);
4968 if (!status) {
4969 dev_err(&adapter->pdev->dev,
51d1f98a
AK
4970 "Port %c: %s Vendor: %s part no: %s",
4971 adapter->port_name,
4972 be_misconfig_evt_port_state[adapter->phy_state],
4973 adapter->phy.vendor_name,
78fad34e
SP
4974 adapter->phy.vendor_pn);
4975 }
51d1f98a 4976 adapter->flags &= ~BE_FLAGS_PHY_MISCONFIGURED;
78fad34e
SP
4977}
4978
4979static void be_worker(struct work_struct *work)
4980{
4981 struct be_adapter *adapter =
4982 container_of(work, struct be_adapter, work.work);
4983 struct be_rx_obj *rxo;
4984 int i;
4985
4986 /* when interrupts are not yet enabled, just reap any pending
4987 * mcc completions
4988 */
4989 if (!netif_running(adapter->netdev)) {
4990 local_bh_disable();
4991 be_process_mcc(adapter);
4992 local_bh_enable();
4993 goto reschedule;
4994 }
4995
4996 if (!adapter->stats_cmd_sent) {
4997 if (lancer_chip(adapter))
4998 lancer_cmd_get_pport_stats(adapter,
4999 &adapter->stats_cmd);
5000 else
5001 be_cmd_get_stats(adapter, &adapter->stats_cmd);
5002 }
5003
5004 if (be_physfn(adapter) &&
5005 MODULO(adapter->work_counter, adapter->be_get_temp_freq) == 0)
5006 be_cmd_get_die_temperature(adapter);
5007
5008 for_all_rx_queues(adapter, rxo, i) {
5009 /* Replenish RX-queues starved due to memory
5010 * allocation failures.
5011 */
5012 if (rxo->rx_post_starved)
5013 be_post_rx_frags(rxo, GFP_KERNEL, MAX_RX_POST);
5014 }
5015
20947770
PR
5016 /* EQ-delay update for Skyhawk is done while notifying EQ */
5017 if (!skyhawk_chip(adapter))
5018 be_eqd_update(adapter, false);
78fad34e 5019
51d1f98a 5020 if (adapter->flags & BE_FLAGS_PHY_MISCONFIGURED)
78fad34e
SP
5021 be_log_sfp_info(adapter);
5022
5023reschedule:
5024 adapter->work_counter++;
5025 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
5026}
5027
6b7c5b94
SP
5028static void be_unmap_pci_bars(struct be_adapter *adapter)
5029{
c5b3ad4c
SP
5030 if (adapter->csr)
5031 pci_iounmap(adapter->pdev, adapter->csr);
8788fdc2 5032 if (adapter->db)
ce66f781 5033 pci_iounmap(adapter->pdev, adapter->db);
045508a8
PP
5034}
5035
ce66f781
SP
5036static int db_bar(struct be_adapter *adapter)
5037{
18c57c74 5038 if (lancer_chip(adapter) || be_virtfn(adapter))
ce66f781
SP
5039 return 0;
5040 else
5041 return 4;
5042}
5043
5044static int be_roce_map_pci_bars(struct be_adapter *adapter)
045508a8 5045{
dbf0f2a7 5046 if (skyhawk_chip(adapter)) {
ce66f781
SP
5047 adapter->roce_db.size = 4096;
5048 adapter->roce_db.io_addr = pci_resource_start(adapter->pdev,
5049 db_bar(adapter));
5050 adapter->roce_db.total_size = pci_resource_len(adapter->pdev,
5051 db_bar(adapter));
5052 }
045508a8 5053 return 0;
6b7c5b94
SP
5054}
5055
5056static int be_map_pci_bars(struct be_adapter *adapter)
5057{
0fa74a4b 5058 struct pci_dev *pdev = adapter->pdev;
6b7c5b94 5059 u8 __iomem *addr;
78fad34e
SP
5060 u32 sli_intf;
5061
5062 pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
5063 adapter->sli_family = (sli_intf & SLI_INTF_FAMILY_MASK) >>
5064 SLI_INTF_FAMILY_SHIFT;
5065 adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
fe6d2a38 5066
c5b3ad4c 5067 if (BEx_chip(adapter) && be_physfn(adapter)) {
0fa74a4b 5068 adapter->csr = pci_iomap(pdev, 2, 0);
ddf1169f 5069 if (!adapter->csr)
c5b3ad4c
SP
5070 return -ENOMEM;
5071 }
5072
25848c90 5073 addr = pci_iomap(pdev, db_bar(adapter), 0);
ddf1169f 5074 if (!addr)
6b7c5b94 5075 goto pci_map_err;
ba343c77 5076 adapter->db = addr;
ce66f781 5077
25848c90
SR
5078 if (skyhawk_chip(adapter) || BEx_chip(adapter)) {
5079 if (be_physfn(adapter)) {
5080 /* PCICFG is the 2nd BAR in BE2 */
5081 addr = pci_iomap(pdev, BE2_chip(adapter) ? 1 : 0, 0);
5082 if (!addr)
5083 goto pci_map_err;
5084 adapter->pcicfg = addr;
5085 } else {
5086 adapter->pcicfg = adapter->db + SRIOV_VF_PCICFG_OFFSET;
5087 }
5088 }
5089
ce66f781 5090 be_roce_map_pci_bars(adapter);
6b7c5b94 5091 return 0;
ce66f781 5092
6b7c5b94 5093pci_map_err:
25848c90 5094 dev_err(&pdev->dev, "Error in mapping PCI BARs\n");
6b7c5b94
SP
5095 be_unmap_pci_bars(adapter);
5096 return -ENOMEM;
5097}
5098
78fad34e 5099static void be_drv_cleanup(struct be_adapter *adapter)
6b7c5b94 5100{
8788fdc2 5101 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
78fad34e 5102 struct device *dev = &adapter->pdev->dev;
6b7c5b94
SP
5103
5104 if (mem->va)
78fad34e 5105 dma_free_coherent(dev, mem->size, mem->va, mem->dma);
e7b909a6 5106
5b8821b7 5107 mem = &adapter->rx_filter;
e7b909a6 5108 if (mem->va)
78fad34e
SP
5109 dma_free_coherent(dev, mem->size, mem->va, mem->dma);
5110
5111 mem = &adapter->stats_cmd;
5112 if (mem->va)
5113 dma_free_coherent(dev, mem->size, mem->va, mem->dma);
6b7c5b94
SP
5114}
5115
78fad34e
SP
5116/* Allocate and initialize various fields in be_adapter struct */
5117static int be_drv_init(struct be_adapter *adapter)
6b7c5b94 5118{
8788fdc2
SP
5119 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
5120 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
5b8821b7 5121 struct be_dma_mem *rx_filter = &adapter->rx_filter;
78fad34e
SP
5122 struct be_dma_mem *stats_cmd = &adapter->stats_cmd;
5123 struct device *dev = &adapter->pdev->dev;
5124 int status = 0;
6b7c5b94
SP
5125
5126 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
e51000db
SB
5127 mbox_mem_alloc->va = dma_zalloc_coherent(dev, mbox_mem_alloc->size,
5128 &mbox_mem_alloc->dma,
5129 GFP_KERNEL);
78fad34e
SP
5130 if (!mbox_mem_alloc->va)
5131 return -ENOMEM;
5132
6b7c5b94
SP
5133 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
5134 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
5135 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
e7b909a6 5136
5b8821b7 5137 rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
78fad34e
SP
5138 rx_filter->va = dma_zalloc_coherent(dev, rx_filter->size,
5139 &rx_filter->dma, GFP_KERNEL);
ddf1169f 5140 if (!rx_filter->va) {
e7b909a6
SP
5141 status = -ENOMEM;
5142 goto free_mbox;
5143 }
1f9061d2 5144
78fad34e
SP
5145 if (lancer_chip(adapter))
5146 stats_cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
5147 else if (BE2_chip(adapter))
5148 stats_cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
5149 else if (BE3_chip(adapter))
5150 stats_cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
5151 else
5152 stats_cmd->size = sizeof(struct be_cmd_req_get_stats_v2);
5153 stats_cmd->va = dma_zalloc_coherent(dev, stats_cmd->size,
5154 &stats_cmd->dma, GFP_KERNEL);
5155 if (!stats_cmd->va) {
5156 status = -ENOMEM;
5157 goto free_rx_filter;
5158 }
5159
2984961c 5160 mutex_init(&adapter->mbox_lock);
8788fdc2
SP
5161 spin_lock_init(&adapter->mcc_lock);
5162 spin_lock_init(&adapter->mcc_cq_lock);
5eeff635 5163 init_completion(&adapter->et_cmd_compl);
e7b909a6 5164
78fad34e 5165 pci_save_state(adapter->pdev);
6b7c5b94 5166
78fad34e 5167 INIT_DELAYED_WORK(&adapter->work, be_worker);
eb7dd46c
SP
5168 INIT_DELAYED_WORK(&adapter->be_err_detection_work,
5169 be_err_detection_task);
6b7c5b94 5170
78fad34e
SP
5171 adapter->rx_fc = true;
5172 adapter->tx_fc = true;
6b7c5b94 5173
78fad34e
SP
5174 /* Must be a power of 2 or else MODULO will BUG_ON */
5175 adapter->be_get_temp_freq = 64;
ca34fe38 5176
6b7c5b94 5177 return 0;
78fad34e
SP
5178
5179free_rx_filter:
5180 dma_free_coherent(dev, rx_filter->size, rx_filter->va, rx_filter->dma);
5181free_mbox:
5182 dma_free_coherent(dev, mbox_mem_alloc->size, mbox_mem_alloc->va,
5183 mbox_mem_alloc->dma);
5184 return status;
6b7c5b94
SP
5185}
5186
3bc6b06c 5187static void be_remove(struct pci_dev *pdev)
6b7c5b94
SP
5188{
5189 struct be_adapter *adapter = pci_get_drvdata(pdev);
8d56ff11 5190
6b7c5b94
SP
5191 if (!adapter)
5192 return;
5193
045508a8 5194 be_roce_dev_remove(adapter);
8cef7a78 5195 be_intr_set(adapter, false);
045508a8 5196
eb7dd46c 5197 be_cancel_err_detection(adapter);
f67ef7ba 5198
6b7c5b94
SP
5199 unregister_netdev(adapter->netdev);
5200
5fb379ee
SP
5201 be_clear(adapter);
5202
bf99e50d
PR
5203 /* tell fw we're done with firing cmds */
5204 be_cmd_fw_clean(adapter);
5205
78fad34e
SP
5206 be_unmap_pci_bars(adapter);
5207 be_drv_cleanup(adapter);
6b7c5b94 5208
d6b6d987
SP
5209 pci_disable_pcie_error_reporting(pdev);
5210
6b7c5b94
SP
5211 pci_release_regions(pdev);
5212 pci_disable_device(pdev);
5213
5214 free_netdev(adapter->netdev);
5215}
5216
9a03259c
AB
5217static ssize_t be_hwmon_show_temp(struct device *dev,
5218 struct device_attribute *dev_attr,
5219 char *buf)
29e9122b
VD
5220{
5221 struct be_adapter *adapter = dev_get_drvdata(dev);
5222
5223 /* Unit: millidegree Celsius */
5224 if (adapter->hwmon_info.be_on_die_temp == BE_INVALID_DIE_TEMP)
5225 return -EIO;
5226 else
5227 return sprintf(buf, "%u\n",
5228 adapter->hwmon_info.be_on_die_temp * 1000);
5229}
5230
5231static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO,
5232 be_hwmon_show_temp, NULL, 1);
5233
5234static struct attribute *be_hwmon_attrs[] = {
5235 &sensor_dev_attr_temp1_input.dev_attr.attr,
5236 NULL
5237};
5238
5239ATTRIBUTE_GROUPS(be_hwmon);
5240
d379142b
SP
5241static char *mc_name(struct be_adapter *adapter)
5242{
f93f160b
VV
5243 char *str = ""; /* default */
5244
5245 switch (adapter->mc_type) {
5246 case UMC:
5247 str = "UMC";
5248 break;
5249 case FLEX10:
5250 str = "FLEX10";
5251 break;
5252 case vNIC1:
5253 str = "vNIC-1";
5254 break;
5255 case nPAR:
5256 str = "nPAR";
5257 break;
5258 case UFP:
5259 str = "UFP";
5260 break;
5261 case vNIC2:
5262 str = "vNIC-2";
5263 break;
5264 default:
5265 str = "";
5266 }
5267
5268 return str;
d379142b
SP
5269}
5270
5271static inline char *func_name(struct be_adapter *adapter)
5272{
5273 return be_physfn(adapter) ? "PF" : "VF";
5274}
5275
f7062ee5
SP
5276static inline char *nic_name(struct pci_dev *pdev)
5277{
5278 switch (pdev->device) {
5279 case OC_DEVICE_ID1:
5280 return OC_NAME;
5281 case OC_DEVICE_ID2:
5282 return OC_NAME_BE;
5283 case OC_DEVICE_ID3:
5284 case OC_DEVICE_ID4:
5285 return OC_NAME_LANCER;
5286 case BE_DEVICE_ID2:
5287 return BE3_NAME;
5288 case OC_DEVICE_ID5:
5289 case OC_DEVICE_ID6:
5290 return OC_NAME_SH;
5291 default:
5292 return BE_NAME;
5293 }
5294}
5295
1dd06ae8 5296static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id)
6b7c5b94 5297{
6b7c5b94
SP
5298 struct be_adapter *adapter;
5299 struct net_device *netdev;
21252377 5300 int status = 0;
6b7c5b94 5301
acbafeb1
SP
5302 dev_info(&pdev->dev, "%s version is %s\n", DRV_NAME, DRV_VER);
5303
6b7c5b94
SP
5304 status = pci_enable_device(pdev);
5305 if (status)
5306 goto do_none;
5307
5308 status = pci_request_regions(pdev, DRV_NAME);
5309 if (status)
5310 goto disable_dev;
5311 pci_set_master(pdev);
5312
7f640062 5313 netdev = alloc_etherdev_mqs(sizeof(*adapter), MAX_TX_QS, MAX_RX_QS);
ddf1169f 5314 if (!netdev) {
6b7c5b94
SP
5315 status = -ENOMEM;
5316 goto rel_reg;
5317 }
5318 adapter = netdev_priv(netdev);
5319 adapter->pdev = pdev;
5320 pci_set_drvdata(pdev, adapter);
5321 adapter->netdev = netdev;
2243e2e9 5322 SET_NETDEV_DEV(netdev, &pdev->dev);
6b7c5b94 5323
4c15c243 5324 status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
6b7c5b94
SP
5325 if (!status) {
5326 netdev->features |= NETIF_F_HIGHDMA;
5327 } else {
4c15c243 5328 status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
6b7c5b94
SP
5329 if (status) {
5330 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
5331 goto free_netdev;
5332 }
5333 }
5334
2f951a9a
KA
5335 status = pci_enable_pcie_error_reporting(pdev);
5336 if (!status)
5337 dev_info(&pdev->dev, "PCIe error reporting enabled\n");
d6b6d987 5338
78fad34e 5339 status = be_map_pci_bars(adapter);
6b7c5b94 5340 if (status)
39f1d94d 5341 goto free_netdev;
6b7c5b94 5342
78fad34e
SP
5343 status = be_drv_init(adapter);
5344 if (status)
5345 goto unmap_bars;
5346
5fb379ee
SP
5347 status = be_setup(adapter);
5348 if (status)
78fad34e 5349 goto drv_cleanup;
2243e2e9 5350
3abcdeda 5351 be_netdev_init(netdev);
6b7c5b94
SP
5352 status = register_netdev(netdev);
5353 if (status != 0)
5fb379ee 5354 goto unsetup;
6b7c5b94 5355
045508a8
PP
5356 be_roce_dev_add(adapter);
5357
972f37b4 5358 be_schedule_err_detection(adapter, ERR_DETECTION_DELAY);
b4e32a71 5359
29e9122b 5360 /* On Die temperature not supported for VF. */
9a03259c 5361 if (be_physfn(adapter) && IS_ENABLED(CONFIG_BE2NET_HWMON)) {
29e9122b
VD
5362 adapter->hwmon_info.hwmon_dev =
5363 devm_hwmon_device_register_with_groups(&pdev->dev,
5364 DRV_NAME,
5365 adapter,
5366 be_hwmon_groups);
5367 adapter->hwmon_info.be_on_die_temp = BE_INVALID_DIE_TEMP;
5368 }
5369
d379142b 5370 dev_info(&pdev->dev, "%s: %s %s port %c\n", nic_name(pdev),
21252377 5371 func_name(adapter), mc_name(adapter), adapter->port_name);
34b1ef04 5372
6b7c5b94
SP
5373 return 0;
5374
5fb379ee
SP
5375unsetup:
5376 be_clear(adapter);
78fad34e
SP
5377drv_cleanup:
5378 be_drv_cleanup(adapter);
5379unmap_bars:
5380 be_unmap_pci_bars(adapter);
f9449ab7 5381free_netdev:
fe6d2a38 5382 free_netdev(netdev);
6b7c5b94
SP
5383rel_reg:
5384 pci_release_regions(pdev);
5385disable_dev:
5386 pci_disable_device(pdev);
5387do_none:
c4ca2374 5388 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
6b7c5b94
SP
5389 return status;
5390}
5391
5392static int be_suspend(struct pci_dev *pdev, pm_message_t state)
5393{
5394 struct be_adapter *adapter = pci_get_drvdata(pdev);
6b7c5b94 5395
76a9e08e 5396 if (adapter->wol_en)
71d8d1b5
AK
5397 be_setup_wol(adapter, true);
5398
d4360d6f 5399 be_intr_set(adapter, false);
eb7dd46c 5400 be_cancel_err_detection(adapter);
f67ef7ba 5401
87ac1a52 5402 be_cleanup(adapter);
6b7c5b94
SP
5403
5404 pci_save_state(pdev);
5405 pci_disable_device(pdev);
5406 pci_set_power_state(pdev, pci_choose_state(pdev, state));
5407 return 0;
5408}
5409
484d76fd 5410static int be_pci_resume(struct pci_dev *pdev)
6b7c5b94 5411{
6b7c5b94 5412 struct be_adapter *adapter = pci_get_drvdata(pdev);
484d76fd 5413 int status = 0;
6b7c5b94
SP
5414
5415 status = pci_enable_device(pdev);
5416 if (status)
5417 return status;
5418
6b7c5b94
SP
5419 pci_restore_state(pdev);
5420
484d76fd 5421 status = be_resume(adapter);
2243e2e9
SP
5422 if (status)
5423 return status;
5424
972f37b4 5425 be_schedule_err_detection(adapter, ERR_DETECTION_DELAY);
eb7dd46c 5426
76a9e08e 5427 if (adapter->wol_en)
71d8d1b5 5428 be_setup_wol(adapter, false);
a4ca055f 5429
6b7c5b94
SP
5430 return 0;
5431}
5432
82456b03
SP
5433/*
5434 * An FLR will stop BE from DMAing any data.
5435 */
5436static void be_shutdown(struct pci_dev *pdev)
5437{
5438 struct be_adapter *adapter = pci_get_drvdata(pdev);
82456b03 5439
2d5d4154
AK
5440 if (!adapter)
5441 return;
82456b03 5442
d114f99a 5443 be_roce_dev_shutdown(adapter);
0f4a6828 5444 cancel_delayed_work_sync(&adapter->work);
eb7dd46c 5445 be_cancel_err_detection(adapter);
a4ca055f 5446
2d5d4154 5447 netif_device_detach(adapter->netdev);
82456b03 5448
57841869
AK
5449 be_cmd_reset_function(adapter);
5450
82456b03 5451 pci_disable_device(pdev);
82456b03
SP
5452}
5453
cf588477 5454static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
748b539a 5455 pci_channel_state_t state)
cf588477
SP
5456{
5457 struct be_adapter *adapter = pci_get_drvdata(pdev);
cf588477
SP
5458
5459 dev_err(&adapter->pdev->dev, "EEH error detected\n");
5460
68f22793
PR
5461 be_roce_dev_remove(adapter);
5462
954f6825
VD
5463 if (!be_check_error(adapter, BE_ERROR_EEH)) {
5464 be_set_error(adapter, BE_ERROR_EEH);
cf588477 5465
eb7dd46c 5466 be_cancel_err_detection(adapter);
cf588477 5467
87ac1a52 5468 be_cleanup(adapter);
cf588477 5469 }
cf588477
SP
5470
5471 if (state == pci_channel_io_perm_failure)
5472 return PCI_ERS_RESULT_DISCONNECT;
5473
5474 pci_disable_device(pdev);
5475
eeb7fc7b
SK
5476 /* The error could cause the FW to trigger a flash debug dump.
5477 * Resetting the card while flash dump is in progress
c8a54163
PR
5478 * can cause it not to recover; wait for it to finish.
5479 * Wait only for first function as it is needed only once per
5480 * adapter.
eeb7fc7b 5481 */
c8a54163
PR
5482 if (pdev->devfn == 0)
5483 ssleep(30);
5484
cf588477
SP
5485 return PCI_ERS_RESULT_NEED_RESET;
5486}
5487
5488static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
5489{
5490 struct be_adapter *adapter = pci_get_drvdata(pdev);
5491 int status;
5492
5493 dev_info(&adapter->pdev->dev, "EEH reset\n");
cf588477
SP
5494
5495 status = pci_enable_device(pdev);
5496 if (status)
5497 return PCI_ERS_RESULT_DISCONNECT;
5498
5499 pci_set_master(pdev);
cf588477
SP
5500 pci_restore_state(pdev);
5501
5502 /* Check if card is ok and fw is ready */
c5b3ad4c
SP
5503 dev_info(&adapter->pdev->dev,
5504 "Waiting for FW to be ready after EEH reset\n");
bf99e50d 5505 status = be_fw_wait_ready(adapter);
cf588477
SP
5506 if (status)
5507 return PCI_ERS_RESULT_DISCONNECT;
5508
d6b6d987 5509 pci_cleanup_aer_uncorrect_error_status(pdev);
954f6825 5510 be_clear_error(adapter, BE_CLEAR_ALL);
cf588477
SP
5511 return PCI_ERS_RESULT_RECOVERED;
5512}
5513
5514static void be_eeh_resume(struct pci_dev *pdev)
5515{
5516 int status = 0;
5517 struct be_adapter *adapter = pci_get_drvdata(pdev);
cf588477
SP
5518
5519 dev_info(&adapter->pdev->dev, "EEH resume\n");
5520
5521 pci_save_state(pdev);
5522
484d76fd 5523 status = be_resume(adapter);
bf99e50d
PR
5524 if (status)
5525 goto err;
5526
68f22793
PR
5527 be_roce_dev_add(adapter);
5528
972f37b4 5529 be_schedule_err_detection(adapter, ERR_DETECTION_DELAY);
cf588477
SP
5530 return;
5531err:
5532 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
cf588477
SP
5533}
5534
ace40aff
VV
5535static int be_pci_sriov_configure(struct pci_dev *pdev, int num_vfs)
5536{
5537 struct be_adapter *adapter = pci_get_drvdata(pdev);
5538 u16 num_vf_qs;
5539 int status;
5540
5541 if (!num_vfs)
5542 be_vf_clear(adapter);
5543
5544 adapter->num_vfs = num_vfs;
5545
5546 if (adapter->num_vfs == 0 && pci_vfs_assigned(pdev)) {
5547 dev_warn(&pdev->dev,
5548 "Cannot disable VFs while they are assigned\n");
5549 return -EBUSY;
5550 }
5551
5552 /* When the HW is in SRIOV capable configuration, the PF-pool resources
5553 * are equally distributed across the max-number of VFs. The user may
5554 * request only a subset of the max-vfs to be enabled.
5555 * Based on num_vfs, redistribute the resources across num_vfs so that
5556 * each VF will have access to more number of resources.
5557 * This facility is not available in BE3 FW.
5558 * Also, this is done by FW in Lancer chip.
5559 */
5560 if (skyhawk_chip(adapter) && !pci_num_vf(pdev)) {
5561 num_vf_qs = be_calculate_vf_qs(adapter, adapter->num_vfs);
5562 status = be_cmd_set_sriov_config(adapter, adapter->pool_res,
5563 adapter->num_vfs, num_vf_qs);
5564 if (status)
5565 dev_err(&pdev->dev,
5566 "Failed to optimize SR-IOV resources\n");
5567 }
5568
5569 status = be_get_resources(adapter);
5570 if (status)
5571 return be_cmd_status(status);
5572
5573 /* Updating real_num_tx/rx_queues() requires rtnl_lock() */
5574 rtnl_lock();
5575 status = be_update_queues(adapter);
5576 rtnl_unlock();
5577 if (status)
5578 return be_cmd_status(status);
5579
5580 if (adapter->num_vfs)
5581 status = be_vf_setup(adapter);
5582
5583 if (!status)
5584 return adapter->num_vfs;
5585
5586 return 0;
5587}
5588
3646f0e5 5589static const struct pci_error_handlers be_eeh_handlers = {
cf588477
SP
5590 .error_detected = be_eeh_err_detected,
5591 .slot_reset = be_eeh_reset,
5592 .resume = be_eeh_resume,
5593};
5594
6b7c5b94
SP
5595static struct pci_driver be_driver = {
5596 .name = DRV_NAME,
5597 .id_table = be_dev_ids,
5598 .probe = be_probe,
5599 .remove = be_remove,
5600 .suspend = be_suspend,
484d76fd 5601 .resume = be_pci_resume,
82456b03 5602 .shutdown = be_shutdown,
ace40aff 5603 .sriov_configure = be_pci_sriov_configure,
cf588477 5604 .err_handler = &be_eeh_handlers
6b7c5b94
SP
5605};
5606
5607static int __init be_init_module(void)
5608{
8e95a202
JP
5609 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
5610 rx_frag_size != 2048) {
6b7c5b94
SP
5611 printk(KERN_WARNING DRV_NAME
5612 " : Module param rx_frag_size must be 2048/4096/8192."
5613 " Using 2048\n");
5614 rx_frag_size = 2048;
5615 }
6b7c5b94 5616
ace40aff
VV
5617 if (num_vfs > 0) {
5618 pr_info(DRV_NAME " : Module param num_vfs is obsolete.");
5619 pr_info(DRV_NAME " : Use sysfs method to enable VFs\n");
5620 }
5621
6b7c5b94
SP
5622 return pci_register_driver(&be_driver);
5623}
5624module_init(be_init_module);
5625
5626static void __exit be_exit_module(void)
5627{
5628 pci_unregister_driver(&be_driver);
5629}
5630module_exit(be_exit_module);