virtio-net: drop NETIF_F_FRAGLIST
[linux-2.6-block.git] / drivers / net / ethernet / emulex / benet / be_main.c
CommitLineData
6b7c5b94 1/*
d19261b8 2 * Copyright (C) 2005 - 2015 Emulex
6b7c5b94
SP
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
d2145cde 11 * linux-drivers@emulex.com
6b7c5b94 12 *
d2145cde
AK
13 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
6b7c5b94
SP
16 */
17
70c71606 18#include <linux/prefetch.h>
9d9779e7 19#include <linux/module.h>
6b7c5b94 20#include "be.h"
8788fdc2 21#include "be_cmds.h"
65f71b8b 22#include <asm/div64.h>
d6b6d987 23#include <linux/aer.h>
a77dcb8c 24#include <linux/if_bridge.h>
6384a4d0 25#include <net/busy_poll.h>
c9c47142 26#include <net/vxlan.h>
6b7c5b94
SP
27
28MODULE_VERSION(DRV_VER);
6b7c5b94 29MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
00d3d51e 30MODULE_AUTHOR("Emulex Corporation");
6b7c5b94
SP
31MODULE_LICENSE("GPL");
32
ace40aff
VV
33/* num_vfs module param is obsolete.
34 * Use sysfs method to enable/disable VFs.
35 */
ba343c77 36static unsigned int num_vfs;
ba343c77 37module_param(num_vfs, uint, S_IRUGO);
ba343c77 38MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
6b7c5b94 39
11ac75ed
SP
40static ushort rx_frag_size = 2048;
41module_param(rx_frag_size, ushort, S_IRUGO);
42MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
43
9baa3c34 44static const struct pci_device_id be_dev_ids[] = {
c4ca2374 45 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
59fd5d87 46 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
c4ca2374
AK
47 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
48 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
fe6d2a38 49 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
12f4d0a8 50 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
ecedb6ae 51 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
76b73530 52 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID6)},
6b7c5b94
SP
53 { 0 }
54};
55MODULE_DEVICE_TABLE(pci, be_dev_ids);
7c185276 56/* UE Status Low CSR */
42c8b11e 57static const char * const ue_status_low_desc[] = {
7c185276
AK
58 "CEV",
59 "CTX",
60 "DBUF",
61 "ERX",
62 "Host",
63 "MPU",
64 "NDMA",
65 "PTC ",
66 "RDMA ",
67 "RXF ",
68 "RXIPS ",
69 "RXULP0 ",
70 "RXULP1 ",
71 "RXULP2 ",
72 "TIM ",
73 "TPOST ",
74 "TPRE ",
75 "TXIPS ",
76 "TXULP0 ",
77 "TXULP1 ",
78 "UC ",
79 "WDMA ",
80 "TXULP2 ",
81 "HOST1 ",
82 "P0_OB_LINK ",
83 "P1_OB_LINK ",
84 "HOST_GPIO ",
85 "MBOX ",
6bdf8f55
VV
86 "ERX2 ",
87 "SPARE ",
88 "JTAG ",
89 "MPU_INTPEND "
7c185276 90};
e2fb1afa 91
7c185276 92/* UE Status High CSR */
42c8b11e 93static const char * const ue_status_hi_desc[] = {
7c185276
AK
94 "LPCMEMHOST",
95 "MGMT_MAC",
96 "PCS0ONLINE",
97 "MPU_IRAM",
98 "PCS1ONLINE",
99 "PCTL0",
100 "PCTL1",
101 "PMEM",
102 "RR",
103 "TXPB",
104 "RXPP",
105 "XAUI",
106 "TXP",
107 "ARM",
108 "IPC",
109 "HOST2",
110 "HOST3",
111 "HOST4",
112 "HOST5",
113 "HOST6",
114 "HOST7",
6bdf8f55
VV
115 "ECRC",
116 "Poison TLP",
42c8b11e 117 "NETC",
6bdf8f55
VV
118 "PERIPH",
119 "LLTXULP",
120 "D2P",
121 "RCON",
122 "LDMA",
123 "LLTXP",
124 "LLTXPB",
7c185276
AK
125 "Unknown"
126};
6b7c5b94
SP
127
128static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
129{
130 struct be_dma_mem *mem = &q->dma_mem;
03d28ffe 131
1cfafab9 132 if (mem->va) {
2b7bcebf
IV
133 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
134 mem->dma);
1cfafab9
SP
135 mem->va = NULL;
136 }
6b7c5b94
SP
137}
138
139static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
748b539a 140 u16 len, u16 entry_size)
6b7c5b94
SP
141{
142 struct be_dma_mem *mem = &q->dma_mem;
143
144 memset(q, 0, sizeof(*q));
145 q->len = len;
146 q->entry_size = entry_size;
147 mem->size = len * entry_size;
ede23fa8
JP
148 mem->va = dma_zalloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
149 GFP_KERNEL);
6b7c5b94 150 if (!mem->va)
10ef9ab4 151 return -ENOMEM;
6b7c5b94
SP
152 return 0;
153}
154
68c45a2d 155static void be_reg_intr_set(struct be_adapter *adapter, bool enable)
6b7c5b94 156{
db3ea781 157 u32 reg, enabled;
5f0b849e 158
db3ea781 159 pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
748b539a 160 &reg);
db3ea781
SP
161 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
162
5f0b849e 163 if (!enabled && enable)
6b7c5b94 164 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
5f0b849e 165 else if (enabled && !enable)
6b7c5b94 166 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
5f0b849e 167 else
6b7c5b94 168 return;
5f0b849e 169
db3ea781 170 pci_write_config_dword(adapter->pdev,
748b539a 171 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
6b7c5b94
SP
172}
173
68c45a2d
SK
174static void be_intr_set(struct be_adapter *adapter, bool enable)
175{
176 int status = 0;
177
178 /* On lancer interrupts can't be controlled via this register */
179 if (lancer_chip(adapter))
180 return;
181
954f6825 182 if (be_check_error(adapter, BE_ERROR_EEH))
68c45a2d
SK
183 return;
184
185 status = be_cmd_intr_set(adapter, enable);
186 if (status)
187 be_reg_intr_set(adapter, enable);
188}
189
8788fdc2 190static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
6b7c5b94
SP
191{
192 u32 val = 0;
03d28ffe 193
954f6825
VD
194 if (be_check_error(adapter, BE_ERROR_HW))
195 return;
196
6b7c5b94
SP
197 val |= qid & DB_RQ_RING_ID_MASK;
198 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
f3eb62d2
SP
199
200 wmb();
8788fdc2 201 iowrite32(val, adapter->db + DB_RQ_OFFSET);
6b7c5b94
SP
202}
203
94d73aaa
VV
204static void be_txq_notify(struct be_adapter *adapter, struct be_tx_obj *txo,
205 u16 posted)
6b7c5b94
SP
206{
207 u32 val = 0;
03d28ffe 208
954f6825
VD
209 if (be_check_error(adapter, BE_ERROR_HW))
210 return;
211
94d73aaa 212 val |= txo->q.id & DB_TXULP_RING_ID_MASK;
6b7c5b94 213 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
f3eb62d2
SP
214
215 wmb();
94d73aaa 216 iowrite32(val, adapter->db + txo->db_offset);
6b7c5b94
SP
217}
218
8788fdc2 219static void be_eq_notify(struct be_adapter *adapter, u16 qid,
20947770
PR
220 bool arm, bool clear_int, u16 num_popped,
221 u32 eq_delay_mult_enc)
6b7c5b94
SP
222{
223 u32 val = 0;
03d28ffe 224
6b7c5b94 225 val |= qid & DB_EQ_RING_ID_MASK;
748b539a 226 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) << DB_EQ_RING_ID_EXT_MASK_SHIFT);
cf588477 227
954f6825 228 if (be_check_error(adapter, BE_ERROR_HW))
cf588477
SP
229 return;
230
6b7c5b94
SP
231 if (arm)
232 val |= 1 << DB_EQ_REARM_SHIFT;
233 if (clear_int)
234 val |= 1 << DB_EQ_CLR_SHIFT;
235 val |= 1 << DB_EQ_EVNT_SHIFT;
236 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
20947770 237 val |= eq_delay_mult_enc << DB_EQ_R2I_DLY_SHIFT;
8788fdc2 238 iowrite32(val, adapter->db + DB_EQ_OFFSET);
6b7c5b94
SP
239}
240
8788fdc2 241void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
6b7c5b94
SP
242{
243 u32 val = 0;
03d28ffe 244
6b7c5b94 245 val |= qid & DB_CQ_RING_ID_MASK;
fe6d2a38
SP
246 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
247 DB_CQ_RING_ID_EXT_MASK_SHIFT);
cf588477 248
954f6825 249 if (be_check_error(adapter, BE_ERROR_HW))
cf588477
SP
250 return;
251
6b7c5b94
SP
252 if (arm)
253 val |= 1 << DB_CQ_REARM_SHIFT;
254 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
8788fdc2 255 iowrite32(val, adapter->db + DB_CQ_OFFSET);
6b7c5b94
SP
256}
257
6b7c5b94
SP
258static int be_mac_addr_set(struct net_device *netdev, void *p)
259{
260 struct be_adapter *adapter = netdev_priv(netdev);
5a712c13 261 struct device *dev = &adapter->pdev->dev;
6b7c5b94 262 struct sockaddr *addr = p;
5a712c13
SP
263 int status;
264 u8 mac[ETH_ALEN];
265 u32 old_pmac_id = adapter->pmac_id[0], curr_pmac_id = 0;
6b7c5b94 266
ca9e4988
AK
267 if (!is_valid_ether_addr(addr->sa_data))
268 return -EADDRNOTAVAIL;
269
ff32f8ab
VV
270 /* Proceed further only if, User provided MAC is different
271 * from active MAC
272 */
273 if (ether_addr_equal(addr->sa_data, netdev->dev_addr))
274 return 0;
275
5a712c13
SP
276 /* The PMAC_ADD cmd may fail if the VF doesn't have FILTMGMT
277 * privilege or if PF did not provision the new MAC address.
278 * On BE3, this cmd will always fail if the VF doesn't have the
279 * FILTMGMT privilege. This failure is OK, only if the PF programmed
280 * the MAC for the VF.
704e4c88 281 */
5a712c13
SP
282 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
283 adapter->if_handle, &adapter->pmac_id[0], 0);
284 if (!status) {
285 curr_pmac_id = adapter->pmac_id[0];
286
287 /* Delete the old programmed MAC. This call may fail if the
288 * old MAC was already deleted by the PF driver.
289 */
290 if (adapter->pmac_id[0] != old_pmac_id)
291 be_cmd_pmac_del(adapter, adapter->if_handle,
292 old_pmac_id, 0);
704e4c88
PR
293 }
294
5a712c13
SP
295 /* Decide if the new MAC is successfully activated only after
296 * querying the FW
704e4c88 297 */
b188f090
SR
298 status = be_cmd_get_active_mac(adapter, curr_pmac_id, mac,
299 adapter->if_handle, true, 0);
a65027e4 300 if (status)
e3a7ae2c 301 goto err;
6b7c5b94 302
5a712c13
SP
303 /* The MAC change did not happen, either due to lack of privilege
304 * or PF didn't pre-provision.
305 */
61d23e9f 306 if (!ether_addr_equal(addr->sa_data, mac)) {
5a712c13
SP
307 status = -EPERM;
308 goto err;
309 }
310
e3a7ae2c 311 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
5a712c13 312 dev_info(dev, "MAC address changed to %pM\n", mac);
e3a7ae2c
SK
313 return 0;
314err:
5a712c13 315 dev_warn(dev, "MAC address change to %pM failed\n", addr->sa_data);
6b7c5b94
SP
316 return status;
317}
318
ca34fe38
SP
319/* BE2 supports only v0 cmd */
320static void *hw_stats_from_cmd(struct be_adapter *adapter)
321{
322 if (BE2_chip(adapter)) {
323 struct be_cmd_resp_get_stats_v0 *cmd = adapter->stats_cmd.va;
324
325 return &cmd->hw_stats;
61000861 326 } else if (BE3_chip(adapter)) {
ca34fe38
SP
327 struct be_cmd_resp_get_stats_v1 *cmd = adapter->stats_cmd.va;
328
61000861
AK
329 return &cmd->hw_stats;
330 } else {
331 struct be_cmd_resp_get_stats_v2 *cmd = adapter->stats_cmd.va;
332
ca34fe38
SP
333 return &cmd->hw_stats;
334 }
335}
336
337/* BE2 supports only v0 cmd */
338static void *be_erx_stats_from_cmd(struct be_adapter *adapter)
339{
340 if (BE2_chip(adapter)) {
341 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
342
343 return &hw_stats->erx;
61000861 344 } else if (BE3_chip(adapter)) {
ca34fe38
SP
345 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
346
61000861
AK
347 return &hw_stats->erx;
348 } else {
349 struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
350
ca34fe38
SP
351 return &hw_stats->erx;
352 }
353}
354
355static void populate_be_v0_stats(struct be_adapter *adapter)
89a88ab8 356{
ac124ff9
SP
357 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
358 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
359 struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
89a88ab8 360 struct be_port_rxf_stats_v0 *port_stats =
ac124ff9
SP
361 &rxf_stats->port[adapter->port_num];
362 struct be_drv_stats *drvs = &adapter->drv_stats;
89a88ab8 363
ac124ff9 364 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
89a88ab8
AK
365 drvs->rx_pause_frames = port_stats->rx_pause_frames;
366 drvs->rx_crc_errors = port_stats->rx_crc_errors;
367 drvs->rx_control_frames = port_stats->rx_control_frames;
368 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
369 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
370 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
371 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
372 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
373 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
374 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
375 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
376 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
377 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
378 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
ac124ff9 379 drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
89a88ab8
AK
380 drvs->rx_dropped_header_too_small =
381 port_stats->rx_dropped_header_too_small;
18fb06a1
SR
382 drvs->rx_address_filtered =
383 port_stats->rx_address_filtered +
384 port_stats->rx_vlan_filtered;
89a88ab8
AK
385 drvs->rx_alignment_symbol_errors =
386 port_stats->rx_alignment_symbol_errors;
387
388 drvs->tx_pauseframes = port_stats->tx_pauseframes;
389 drvs->tx_controlframes = port_stats->tx_controlframes;
390
391 if (adapter->port_num)
ac124ff9 392 drvs->jabber_events = rxf_stats->port1_jabber_events;
89a88ab8 393 else
ac124ff9 394 drvs->jabber_events = rxf_stats->port0_jabber_events;
89a88ab8 395 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
89a88ab8 396 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
89a88ab8
AK
397 drvs->forwarded_packets = rxf_stats->forwarded_packets;
398 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
ac124ff9
SP
399 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
400 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
89a88ab8
AK
401 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
402}
403
ca34fe38 404static void populate_be_v1_stats(struct be_adapter *adapter)
89a88ab8 405{
ac124ff9
SP
406 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
407 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
408 struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
89a88ab8 409 struct be_port_rxf_stats_v1 *port_stats =
ac124ff9
SP
410 &rxf_stats->port[adapter->port_num];
411 struct be_drv_stats *drvs = &adapter->drv_stats;
89a88ab8 412
ac124ff9 413 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
02fe7027
AK
414 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
415 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
89a88ab8
AK
416 drvs->rx_pause_frames = port_stats->rx_pause_frames;
417 drvs->rx_crc_errors = port_stats->rx_crc_errors;
418 drvs->rx_control_frames = port_stats->rx_control_frames;
419 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
420 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
421 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
422 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
423 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
424 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
425 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
426 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
427 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
428 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
429 drvs->rx_dropped_header_too_small =
430 port_stats->rx_dropped_header_too_small;
431 drvs->rx_input_fifo_overflow_drop =
432 port_stats->rx_input_fifo_overflow_drop;
18fb06a1 433 drvs->rx_address_filtered = port_stats->rx_address_filtered;
89a88ab8
AK
434 drvs->rx_alignment_symbol_errors =
435 port_stats->rx_alignment_symbol_errors;
ac124ff9 436 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
89a88ab8
AK
437 drvs->tx_pauseframes = port_stats->tx_pauseframes;
438 drvs->tx_controlframes = port_stats->tx_controlframes;
b5adffc4 439 drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
89a88ab8
AK
440 drvs->jabber_events = port_stats->jabber_events;
441 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
89a88ab8 442 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
89a88ab8
AK
443 drvs->forwarded_packets = rxf_stats->forwarded_packets;
444 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
ac124ff9
SP
445 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
446 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
89a88ab8
AK
447 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
448}
449
61000861
AK
450static void populate_be_v2_stats(struct be_adapter *adapter)
451{
452 struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
453 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
454 struct be_rxf_stats_v2 *rxf_stats = &hw_stats->rxf;
455 struct be_port_rxf_stats_v2 *port_stats =
456 &rxf_stats->port[adapter->port_num];
457 struct be_drv_stats *drvs = &adapter->drv_stats;
458
459 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
460 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
461 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
462 drvs->rx_pause_frames = port_stats->rx_pause_frames;
463 drvs->rx_crc_errors = port_stats->rx_crc_errors;
464 drvs->rx_control_frames = port_stats->rx_control_frames;
465 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
466 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
467 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
468 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
469 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
470 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
471 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
472 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
473 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
474 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
475 drvs->rx_dropped_header_too_small =
476 port_stats->rx_dropped_header_too_small;
477 drvs->rx_input_fifo_overflow_drop =
478 port_stats->rx_input_fifo_overflow_drop;
479 drvs->rx_address_filtered = port_stats->rx_address_filtered;
480 drvs->rx_alignment_symbol_errors =
481 port_stats->rx_alignment_symbol_errors;
482 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
483 drvs->tx_pauseframes = port_stats->tx_pauseframes;
484 drvs->tx_controlframes = port_stats->tx_controlframes;
485 drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
486 drvs->jabber_events = port_stats->jabber_events;
487 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
488 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
489 drvs->forwarded_packets = rxf_stats->forwarded_packets;
490 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
491 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
492 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
493 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
748b539a 494 if (be_roce_supported(adapter)) {
461ae379
AK
495 drvs->rx_roce_bytes_lsd = port_stats->roce_bytes_received_lsd;
496 drvs->rx_roce_bytes_msd = port_stats->roce_bytes_received_msd;
497 drvs->rx_roce_frames = port_stats->roce_frames_received;
498 drvs->roce_drops_crc = port_stats->roce_drops_crc;
499 drvs->roce_drops_payload_len =
500 port_stats->roce_drops_payload_len;
501 }
61000861
AK
502}
503
005d5696
SX
504static void populate_lancer_stats(struct be_adapter *adapter)
505{
005d5696 506 struct be_drv_stats *drvs = &adapter->drv_stats;
748b539a 507 struct lancer_pport_stats *pport_stats = pport_stats_from_cmd(adapter);
ac124ff9
SP
508
509 be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
510 drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
511 drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
512 drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
005d5696 513 drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
ac124ff9 514 drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
005d5696
SX
515 drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
516 drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
517 drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
518 drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
519 drvs->rx_dropped_tcp_length =
520 pport_stats->rx_dropped_invalid_tcp_length;
521 drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
522 drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
523 drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
524 drvs->rx_dropped_header_too_small =
525 pport_stats->rx_dropped_header_too_small;
526 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
18fb06a1
SR
527 drvs->rx_address_filtered =
528 pport_stats->rx_address_filtered +
529 pport_stats->rx_vlan_filtered;
ac124ff9 530 drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
005d5696 531 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
ac124ff9
SP
532 drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
533 drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
005d5696 534 drvs->jabber_events = pport_stats->rx_jabbers;
ac124ff9
SP
535 drvs->forwarded_packets = pport_stats->num_forwards_lo;
536 drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
005d5696 537 drvs->rx_drops_too_many_frags =
ac124ff9 538 pport_stats->rx_drops_too_many_frags_lo;
005d5696 539}
89a88ab8 540
09c1c68f
SP
541static void accumulate_16bit_val(u32 *acc, u16 val)
542{
543#define lo(x) (x & 0xFFFF)
544#define hi(x) (x & 0xFFFF0000)
545 bool wrapped = val < lo(*acc);
546 u32 newacc = hi(*acc) + val;
547
548 if (wrapped)
549 newacc += 65536;
550 ACCESS_ONCE(*acc) = newacc;
551}
552
4188e7df 553static void populate_erx_stats(struct be_adapter *adapter,
748b539a 554 struct be_rx_obj *rxo, u32 erx_stat)
a6c578ef
AK
555{
556 if (!BEx_chip(adapter))
557 rx_stats(rxo)->rx_drops_no_frags = erx_stat;
558 else
559 /* below erx HW counter can actually wrap around after
560 * 65535. Driver accumulates a 32-bit value
561 */
562 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
563 (u16)erx_stat);
564}
565
89a88ab8
AK
566void be_parse_stats(struct be_adapter *adapter)
567{
61000861 568 struct be_erx_stats_v2 *erx = be_erx_stats_from_cmd(adapter);
ac124ff9
SP
569 struct be_rx_obj *rxo;
570 int i;
a6c578ef 571 u32 erx_stat;
ac124ff9 572
ca34fe38
SP
573 if (lancer_chip(adapter)) {
574 populate_lancer_stats(adapter);
005d5696 575 } else {
ca34fe38
SP
576 if (BE2_chip(adapter))
577 populate_be_v0_stats(adapter);
61000861
AK
578 else if (BE3_chip(adapter))
579 /* for BE3 */
ca34fe38 580 populate_be_v1_stats(adapter);
61000861
AK
581 else
582 populate_be_v2_stats(adapter);
d51ebd33 583
61000861 584 /* erx_v2 is longer than v0, v1. use v2 for v0, v1 access */
ca34fe38 585 for_all_rx_queues(adapter, rxo, i) {
a6c578ef
AK
586 erx_stat = erx->rx_drops_no_fragments[rxo->q.id];
587 populate_erx_stats(adapter, rxo, erx_stat);
ca34fe38 588 }
09c1c68f 589 }
89a88ab8
AK
590}
591
ab1594e9 592static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
748b539a 593 struct rtnl_link_stats64 *stats)
6b7c5b94 594{
ab1594e9 595 struct be_adapter *adapter = netdev_priv(netdev);
89a88ab8 596 struct be_drv_stats *drvs = &adapter->drv_stats;
3abcdeda 597 struct be_rx_obj *rxo;
3c8def97 598 struct be_tx_obj *txo;
ab1594e9
SP
599 u64 pkts, bytes;
600 unsigned int start;
3abcdeda 601 int i;
6b7c5b94 602
3abcdeda 603 for_all_rx_queues(adapter, rxo, i) {
ab1594e9 604 const struct be_rx_stats *rx_stats = rx_stats(rxo);
03d28ffe 605
ab1594e9 606 do {
57a7744e 607 start = u64_stats_fetch_begin_irq(&rx_stats->sync);
ab1594e9
SP
608 pkts = rx_stats(rxo)->rx_pkts;
609 bytes = rx_stats(rxo)->rx_bytes;
57a7744e 610 } while (u64_stats_fetch_retry_irq(&rx_stats->sync, start));
ab1594e9
SP
611 stats->rx_packets += pkts;
612 stats->rx_bytes += bytes;
613 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
614 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
615 rx_stats(rxo)->rx_drops_no_frags;
3abcdeda
SP
616 }
617
3c8def97 618 for_all_tx_queues(adapter, txo, i) {
ab1594e9 619 const struct be_tx_stats *tx_stats = tx_stats(txo);
03d28ffe 620
ab1594e9 621 do {
57a7744e 622 start = u64_stats_fetch_begin_irq(&tx_stats->sync);
ab1594e9
SP
623 pkts = tx_stats(txo)->tx_pkts;
624 bytes = tx_stats(txo)->tx_bytes;
57a7744e 625 } while (u64_stats_fetch_retry_irq(&tx_stats->sync, start));
ab1594e9
SP
626 stats->tx_packets += pkts;
627 stats->tx_bytes += bytes;
3c8def97 628 }
6b7c5b94
SP
629
630 /* bad pkts received */
ab1594e9 631 stats->rx_errors = drvs->rx_crc_errors +
89a88ab8
AK
632 drvs->rx_alignment_symbol_errors +
633 drvs->rx_in_range_errors +
634 drvs->rx_out_range_errors +
635 drvs->rx_frame_too_long +
636 drvs->rx_dropped_too_small +
637 drvs->rx_dropped_too_short +
638 drvs->rx_dropped_header_too_small +
639 drvs->rx_dropped_tcp_length +
ab1594e9 640 drvs->rx_dropped_runt;
68110868 641
6b7c5b94 642 /* detailed rx errors */
ab1594e9 643 stats->rx_length_errors = drvs->rx_in_range_errors +
89a88ab8
AK
644 drvs->rx_out_range_errors +
645 drvs->rx_frame_too_long;
68110868 646
ab1594e9 647 stats->rx_crc_errors = drvs->rx_crc_errors;
6b7c5b94
SP
648
649 /* frame alignment errors */
ab1594e9 650 stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
68110868 651
6b7c5b94
SP
652 /* receiver fifo overrun */
653 /* drops_no_pbuf is no per i/f, it's per BE card */
ab1594e9 654 stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
89a88ab8
AK
655 drvs->rx_input_fifo_overflow_drop +
656 drvs->rx_drops_no_pbuf;
ab1594e9 657 return stats;
6b7c5b94
SP
658}
659
b236916a 660void be_link_status_update(struct be_adapter *adapter, u8 link_status)
6b7c5b94 661{
6b7c5b94
SP
662 struct net_device *netdev = adapter->netdev;
663
b236916a 664 if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
ea172a01 665 netif_carrier_off(netdev);
b236916a 666 adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
6b7c5b94 667 }
b236916a 668
bdce2ad7 669 if (link_status)
b236916a
AK
670 netif_carrier_on(netdev);
671 else
672 netif_carrier_off(netdev);
18824894
IV
673
674 netdev_info(netdev, "Link is %s\n", link_status ? "Up" : "Down");
6b7c5b94
SP
675}
676
5f07b3c5 677static void be_tx_stats_update(struct be_tx_obj *txo, struct sk_buff *skb)
6b7c5b94 678{
3c8def97
SP
679 struct be_tx_stats *stats = tx_stats(txo);
680
ab1594e9 681 u64_stats_update_begin(&stats->sync);
ac124ff9 682 stats->tx_reqs++;
5f07b3c5
SP
683 stats->tx_bytes += skb->len;
684 stats->tx_pkts += (skb_shinfo(skb)->gso_segs ? : 1);
ab1594e9 685 u64_stats_update_end(&stats->sync);
6b7c5b94
SP
686}
687
5f07b3c5
SP
688/* Returns number of WRBs needed for the skb */
689static u32 skb_wrb_cnt(struct sk_buff *skb)
6b7c5b94 690{
5f07b3c5
SP
691 /* +1 for the header wrb */
692 return 1 + (skb_headlen(skb) ? 1 : 0) + skb_shinfo(skb)->nr_frags;
6b7c5b94
SP
693}
694
695static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
696{
f986afcb
SP
697 wrb->frag_pa_hi = cpu_to_le32(upper_32_bits(addr));
698 wrb->frag_pa_lo = cpu_to_le32(lower_32_bits(addr));
699 wrb->frag_len = cpu_to_le32(len & ETH_WRB_FRAG_LEN_MASK);
700 wrb->rsvd0 = 0;
701}
702
703/* A dummy wrb is just all zeros. Using a separate routine for dummy-wrb
704 * to avoid the swap and shift/mask operations in wrb_fill().
705 */
706static inline void wrb_fill_dummy(struct be_eth_wrb *wrb)
707{
708 wrb->frag_pa_hi = 0;
709 wrb->frag_pa_lo = 0;
710 wrb->frag_len = 0;
89b1f496 711 wrb->rsvd0 = 0;
6b7c5b94
SP
712}
713
1ded132d 714static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
748b539a 715 struct sk_buff *skb)
1ded132d
AK
716{
717 u8 vlan_prio;
718 u16 vlan_tag;
719
df8a39de 720 vlan_tag = skb_vlan_tag_get(skb);
1ded132d
AK
721 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
722 /* If vlan priority provided by OS is NOT in available bmap */
723 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
724 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
725 adapter->recommended_prio;
726
727 return vlan_tag;
728}
729
c9c47142
SP
730/* Used only for IP tunnel packets */
731static u16 skb_inner_ip_proto(struct sk_buff *skb)
732{
733 return (inner_ip_hdr(skb)->version == 4) ?
734 inner_ip_hdr(skb)->protocol : inner_ipv6_hdr(skb)->nexthdr;
735}
736
737static u16 skb_ip_proto(struct sk_buff *skb)
738{
739 return (ip_hdr(skb)->version == 4) ?
740 ip_hdr(skb)->protocol : ipv6_hdr(skb)->nexthdr;
741}
742
cf5671e6
SB
743static inline bool be_is_txq_full(struct be_tx_obj *txo)
744{
745 return atomic_read(&txo->q.used) + BE_MAX_TX_FRAG_COUNT >= txo->q.len;
746}
747
748static inline bool be_can_txq_wake(struct be_tx_obj *txo)
749{
750 return atomic_read(&txo->q.used) < txo->q.len / 2;
751}
752
753static inline bool be_is_tx_compl_pending(struct be_tx_obj *txo)
754{
755 return atomic_read(&txo->q.used) > txo->pend_wrb_cnt;
756}
757
804abcdb
SB
758static void be_get_wrb_params_from_skb(struct be_adapter *adapter,
759 struct sk_buff *skb,
760 struct be_wrb_params *wrb_params)
6b7c5b94 761{
804abcdb 762 u16 proto;
6b7c5b94 763
49e4b847 764 if (skb_is_gso(skb)) {
804abcdb
SB
765 BE_WRB_F_SET(wrb_params->features, LSO, 1);
766 wrb_params->lso_mss = skb_shinfo(skb)->gso_size;
fe6d2a38 767 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
804abcdb 768 BE_WRB_F_SET(wrb_params->features, LSO6, 1);
6b7c5b94 769 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
c9c47142 770 if (skb->encapsulation) {
804abcdb 771 BE_WRB_F_SET(wrb_params->features, IPCS, 1);
c9c47142
SP
772 proto = skb_inner_ip_proto(skb);
773 } else {
774 proto = skb_ip_proto(skb);
775 }
776 if (proto == IPPROTO_TCP)
804abcdb 777 BE_WRB_F_SET(wrb_params->features, TCPCS, 1);
c9c47142 778 else if (proto == IPPROTO_UDP)
804abcdb 779 BE_WRB_F_SET(wrb_params->features, UDPCS, 1);
6b7c5b94
SP
780 }
781
df8a39de 782 if (skb_vlan_tag_present(skb)) {
804abcdb
SB
783 BE_WRB_F_SET(wrb_params->features, VLAN, 1);
784 wrb_params->vlan_tag = be_get_tx_vlan_tag(adapter, skb);
6b7c5b94
SP
785 }
786
804abcdb
SB
787 BE_WRB_F_SET(wrb_params->features, CRC, 1);
788}
5f07b3c5 789
804abcdb
SB
790static void wrb_fill_hdr(struct be_adapter *adapter,
791 struct be_eth_hdr_wrb *hdr,
792 struct be_wrb_params *wrb_params,
793 struct sk_buff *skb)
794{
795 memset(hdr, 0, sizeof(*hdr));
796
797 SET_TX_WRB_HDR_BITS(crc, hdr,
798 BE_WRB_F_GET(wrb_params->features, CRC));
799 SET_TX_WRB_HDR_BITS(ipcs, hdr,
800 BE_WRB_F_GET(wrb_params->features, IPCS));
801 SET_TX_WRB_HDR_BITS(tcpcs, hdr,
802 BE_WRB_F_GET(wrb_params->features, TCPCS));
803 SET_TX_WRB_HDR_BITS(udpcs, hdr,
804 BE_WRB_F_GET(wrb_params->features, UDPCS));
805
806 SET_TX_WRB_HDR_BITS(lso, hdr,
807 BE_WRB_F_GET(wrb_params->features, LSO));
808 SET_TX_WRB_HDR_BITS(lso6, hdr,
809 BE_WRB_F_GET(wrb_params->features, LSO6));
810 SET_TX_WRB_HDR_BITS(lso_mss, hdr, wrb_params->lso_mss);
811
812 /* Hack to skip HW VLAN tagging needs evt = 1, compl = 0. When this
813 * hack is not needed, the evt bit is set while ringing DB.
5f07b3c5 814 */
804abcdb
SB
815 SET_TX_WRB_HDR_BITS(event, hdr,
816 BE_WRB_F_GET(wrb_params->features, VLAN_SKIP_HW));
817 SET_TX_WRB_HDR_BITS(vlan, hdr,
818 BE_WRB_F_GET(wrb_params->features, VLAN));
819 SET_TX_WRB_HDR_BITS(vlan_tag, hdr, wrb_params->vlan_tag);
820
821 SET_TX_WRB_HDR_BITS(num_wrb, hdr, skb_wrb_cnt(skb));
822 SET_TX_WRB_HDR_BITS(len, hdr, skb->len);
760c295e
VD
823 SET_TX_WRB_HDR_BITS(mgmt, hdr,
824 BE_WRB_F_GET(wrb_params->features, OS2BMC));
6b7c5b94
SP
825}
826
2b7bcebf 827static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
748b539a 828 bool unmap_single)
7101e111
SP
829{
830 dma_addr_t dma;
f986afcb 831 u32 frag_len = le32_to_cpu(wrb->frag_len);
7101e111 832
7101e111 833
f986afcb
SP
834 dma = (u64)le32_to_cpu(wrb->frag_pa_hi) << 32 |
835 (u64)le32_to_cpu(wrb->frag_pa_lo);
836 if (frag_len) {
7101e111 837 if (unmap_single)
f986afcb 838 dma_unmap_single(dev, dma, frag_len, DMA_TO_DEVICE);
7101e111 839 else
f986afcb 840 dma_unmap_page(dev, dma, frag_len, DMA_TO_DEVICE);
7101e111
SP
841 }
842}
6b7c5b94 843
79a0d7d8
SB
844/* Grab a WRB header for xmit */
845static u16 be_tx_get_wrb_hdr(struct be_tx_obj *txo)
846{
847 u16 head = txo->q.head;
848
849 queue_head_inc(&txo->q);
850 return head;
851}
852
853/* Set up the WRB header for xmit */
854static void be_tx_setup_wrb_hdr(struct be_adapter *adapter,
855 struct be_tx_obj *txo,
856 struct be_wrb_params *wrb_params,
857 struct sk_buff *skb, u16 head)
858{
859 u32 num_frags = skb_wrb_cnt(skb);
860 struct be_queue_info *txq = &txo->q;
861 struct be_eth_hdr_wrb *hdr = queue_index_node(txq, head);
862
863 wrb_fill_hdr(adapter, hdr, wrb_params, skb);
864 be_dws_cpu_to_le(hdr, sizeof(*hdr));
865
866 BUG_ON(txo->sent_skb_list[head]);
867 txo->sent_skb_list[head] = skb;
868 txo->last_req_hdr = head;
869 atomic_add(num_frags, &txq->used);
870 txo->last_req_wrb_cnt = num_frags;
871 txo->pend_wrb_cnt += num_frags;
872}
873
874/* Setup a WRB fragment (buffer descriptor) for xmit */
875static void be_tx_setup_wrb_frag(struct be_tx_obj *txo, dma_addr_t busaddr,
876 int len)
877{
878 struct be_eth_wrb *wrb;
879 struct be_queue_info *txq = &txo->q;
880
881 wrb = queue_head_node(txq);
882 wrb_fill(wrb, busaddr, len);
883 queue_head_inc(txq);
884}
885
886/* Bring the queue back to the state it was in before be_xmit_enqueue() routine
887 * was invoked. The producer index is restored to the previous packet and the
888 * WRBs of the current packet are unmapped. Invoked to handle tx setup errors.
889 */
890static void be_xmit_restore(struct be_adapter *adapter,
891 struct be_tx_obj *txo, u16 head, bool map_single,
892 u32 copied)
893{
894 struct device *dev;
895 struct be_eth_wrb *wrb;
896 struct be_queue_info *txq = &txo->q;
897
898 dev = &adapter->pdev->dev;
899 txq->head = head;
900
901 /* skip the first wrb (hdr); it's not mapped */
902 queue_head_inc(txq);
903 while (copied) {
904 wrb = queue_head_node(txq);
905 unmap_tx_frag(dev, wrb, map_single);
906 map_single = false;
907 copied -= le32_to_cpu(wrb->frag_len);
908 queue_head_inc(txq);
909 }
910
911 txq->head = head;
912}
913
914/* Enqueue the given packet for transmit. This routine allocates WRBs for the
915 * packet, dma maps the packet buffers and sets up the WRBs. Returns the number
916 * of WRBs used up by the packet.
917 */
5f07b3c5 918static u32 be_xmit_enqueue(struct be_adapter *adapter, struct be_tx_obj *txo,
804abcdb
SB
919 struct sk_buff *skb,
920 struct be_wrb_params *wrb_params)
6b7c5b94 921{
5f07b3c5 922 u32 i, copied = 0, wrb_cnt = skb_wrb_cnt(skb);
2b7bcebf 923 struct device *dev = &adapter->pdev->dev;
5f07b3c5 924 struct be_queue_info *txq = &txo->q;
7101e111 925 bool map_single = false;
5f07b3c5 926 u16 head = txq->head;
79a0d7d8
SB
927 dma_addr_t busaddr;
928 int len;
6b7c5b94 929
79a0d7d8 930 head = be_tx_get_wrb_hdr(txo);
6b7c5b94 931
ebc8d2ab 932 if (skb->len > skb->data_len) {
79a0d7d8 933 len = skb_headlen(skb);
03d28ffe 934
2b7bcebf
IV
935 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
936 if (dma_mapping_error(dev, busaddr))
7101e111
SP
937 goto dma_err;
938 map_single = true;
79a0d7d8 939 be_tx_setup_wrb_frag(txo, busaddr, len);
ebc8d2ab
DM
940 copied += len;
941 }
6b7c5b94 942
ebc8d2ab 943 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
748b539a 944 const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
79a0d7d8 945 len = skb_frag_size(frag);
03d28ffe 946
79a0d7d8 947 busaddr = skb_frag_dma_map(dev, frag, 0, len, DMA_TO_DEVICE);
2b7bcebf 948 if (dma_mapping_error(dev, busaddr))
7101e111 949 goto dma_err;
79a0d7d8
SB
950 be_tx_setup_wrb_frag(txo, busaddr, len);
951 copied += len;
6b7c5b94
SP
952 }
953
79a0d7d8 954 be_tx_setup_wrb_hdr(adapter, txo, wrb_params, skb, head);
6b7c5b94 955
5f07b3c5
SP
956 be_tx_stats_update(txo, skb);
957 return wrb_cnt;
6b7c5b94 958
7101e111 959dma_err:
79a0d7d8
SB
960 adapter->drv_stats.dma_map_errors++;
961 be_xmit_restore(adapter, txo, head, map_single, copied);
7101e111 962 return 0;
6b7c5b94
SP
963}
964
f7062ee5
SP
965static inline int qnq_async_evt_rcvd(struct be_adapter *adapter)
966{
967 return adapter->flags & BE_FLAGS_QNQ_ASYNC_EVT_RCVD;
968}
969
93040ae5 970static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
bc0c3405 971 struct sk_buff *skb,
804abcdb
SB
972 struct be_wrb_params
973 *wrb_params)
93040ae5
SK
974{
975 u16 vlan_tag = 0;
976
977 skb = skb_share_check(skb, GFP_ATOMIC);
978 if (unlikely(!skb))
979 return skb;
980
df8a39de 981 if (skb_vlan_tag_present(skb))
93040ae5 982 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
52fe29e4
SB
983
984 if (qnq_async_evt_rcvd(adapter) && adapter->pvid) {
985 if (!vlan_tag)
986 vlan_tag = adapter->pvid;
987 /* f/w workaround to set skip_hw_vlan = 1, informs the F/W to
988 * skip VLAN insertion
989 */
804abcdb 990 BE_WRB_F_SET(wrb_params->features, VLAN_SKIP_HW, 1);
52fe29e4 991 }
bc0c3405
AK
992
993 if (vlan_tag) {
62749e2c
JP
994 skb = vlan_insert_tag_set_proto(skb, htons(ETH_P_8021Q),
995 vlan_tag);
bc0c3405
AK
996 if (unlikely(!skb))
997 return skb;
bc0c3405
AK
998 skb->vlan_tci = 0;
999 }
1000
1001 /* Insert the outer VLAN, if any */
1002 if (adapter->qnq_vid) {
1003 vlan_tag = adapter->qnq_vid;
62749e2c
JP
1004 skb = vlan_insert_tag_set_proto(skb, htons(ETH_P_8021Q),
1005 vlan_tag);
bc0c3405
AK
1006 if (unlikely(!skb))
1007 return skb;
804abcdb 1008 BE_WRB_F_SET(wrb_params->features, VLAN_SKIP_HW, 1);
bc0c3405
AK
1009 }
1010
93040ae5
SK
1011 return skb;
1012}
1013
bc0c3405
AK
1014static bool be_ipv6_exthdr_check(struct sk_buff *skb)
1015{
1016 struct ethhdr *eh = (struct ethhdr *)skb->data;
1017 u16 offset = ETH_HLEN;
1018
1019 if (eh->h_proto == htons(ETH_P_IPV6)) {
1020 struct ipv6hdr *ip6h = (struct ipv6hdr *)(skb->data + offset);
1021
1022 offset += sizeof(struct ipv6hdr);
1023 if (ip6h->nexthdr != NEXTHDR_TCP &&
1024 ip6h->nexthdr != NEXTHDR_UDP) {
1025 struct ipv6_opt_hdr *ehdr =
504fbf1e 1026 (struct ipv6_opt_hdr *)(skb->data + offset);
bc0c3405
AK
1027
1028 /* offending pkt: 2nd byte following IPv6 hdr is 0xff */
1029 if (ehdr->hdrlen == 0xff)
1030 return true;
1031 }
1032 }
1033 return false;
1034}
1035
1036static int be_vlan_tag_tx_chk(struct be_adapter *adapter, struct sk_buff *skb)
1037{
df8a39de 1038 return skb_vlan_tag_present(skb) || adapter->pvid || adapter->qnq_vid;
bc0c3405
AK
1039}
1040
748b539a 1041static int be_ipv6_tx_stall_chk(struct be_adapter *adapter, struct sk_buff *skb)
bc0c3405 1042{
ee9c799c 1043 return BE3_chip(adapter) && be_ipv6_exthdr_check(skb);
bc0c3405
AK
1044}
1045
ec495fac
VV
1046static struct sk_buff *be_lancer_xmit_workarounds(struct be_adapter *adapter,
1047 struct sk_buff *skb,
804abcdb
SB
1048 struct be_wrb_params
1049 *wrb_params)
6b7c5b94 1050{
d2cb6ce7 1051 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
ee9c799c
SP
1052 unsigned int eth_hdr_len;
1053 struct iphdr *ip;
93040ae5 1054
1297f9db
AK
1055 /* For padded packets, BE HW modifies tot_len field in IP header
1056 * incorrecly when VLAN tag is inserted by HW.
3904dcc4 1057 * For padded packets, Lancer computes incorrect checksum.
1ded132d 1058 */
ee9c799c
SP
1059 eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
1060 VLAN_ETH_HLEN : ETH_HLEN;
3904dcc4 1061 if (skb->len <= 60 &&
df8a39de 1062 (lancer_chip(adapter) || skb_vlan_tag_present(skb)) &&
ee9c799c 1063 is_ipv4_pkt(skb)) {
93040ae5
SK
1064 ip = (struct iphdr *)ip_hdr(skb);
1065 pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len));
1066 }
1ded132d 1067
d2cb6ce7 1068 /* If vlan tag is already inlined in the packet, skip HW VLAN
f93f160b 1069 * tagging in pvid-tagging mode
d2cb6ce7 1070 */
f93f160b 1071 if (be_pvid_tagging_enabled(adapter) &&
d2cb6ce7 1072 veh->h_vlan_proto == htons(ETH_P_8021Q))
804abcdb 1073 BE_WRB_F_SET(wrb_params->features, VLAN_SKIP_HW, 1);
d2cb6ce7 1074
93040ae5
SK
1075 /* HW has a bug wherein it will calculate CSUM for VLAN
1076 * pkts even though it is disabled.
1077 * Manually insert VLAN in pkt.
1078 */
1079 if (skb->ip_summed != CHECKSUM_PARTIAL &&
df8a39de 1080 skb_vlan_tag_present(skb)) {
804abcdb 1081 skb = be_insert_vlan_in_pkt(adapter, skb, wrb_params);
bc0c3405 1082 if (unlikely(!skb))
c9128951 1083 goto err;
bc0c3405
AK
1084 }
1085
1086 /* HW may lockup when VLAN HW tagging is requested on
1087 * certain ipv6 packets. Drop such pkts if the HW workaround to
1088 * skip HW tagging is not enabled by FW.
1089 */
1090 if (unlikely(be_ipv6_tx_stall_chk(adapter, skb) &&
cd3307aa
KA
1091 (adapter->pvid || adapter->qnq_vid) &&
1092 !qnq_async_evt_rcvd(adapter)))
bc0c3405
AK
1093 goto tx_drop;
1094
1095 /* Manual VLAN tag insertion to prevent:
1096 * ASIC lockup when the ASIC inserts VLAN tag into
1097 * certain ipv6 packets. Insert VLAN tags in driver,
1098 * and set event, completion, vlan bits accordingly
1099 * in the Tx WRB.
1100 */
1101 if (be_ipv6_tx_stall_chk(adapter, skb) &&
1102 be_vlan_tag_tx_chk(adapter, skb)) {
804abcdb 1103 skb = be_insert_vlan_in_pkt(adapter, skb, wrb_params);
1ded132d 1104 if (unlikely(!skb))
c9128951 1105 goto err;
1ded132d
AK
1106 }
1107
ee9c799c
SP
1108 return skb;
1109tx_drop:
1110 dev_kfree_skb_any(skb);
c9128951 1111err:
ee9c799c
SP
1112 return NULL;
1113}
1114
ec495fac
VV
1115static struct sk_buff *be_xmit_workarounds(struct be_adapter *adapter,
1116 struct sk_buff *skb,
804abcdb 1117 struct be_wrb_params *wrb_params)
ec495fac
VV
1118{
1119 /* Lancer, SH-R ASICs have a bug wherein Packets that are 32 bytes or
1120 * less may cause a transmit stall on that port. So the work-around is
1121 * to pad short packets (<= 32 bytes) to a 36-byte length.
1122 */
1123 if (unlikely(!BEx_chip(adapter) && skb->len <= 32)) {
74b6939d 1124 if (skb_put_padto(skb, 36))
ec495fac 1125 return NULL;
ec495fac
VV
1126 }
1127
1128 if (BEx_chip(adapter) || lancer_chip(adapter)) {
804abcdb 1129 skb = be_lancer_xmit_workarounds(adapter, skb, wrb_params);
ec495fac
VV
1130 if (!skb)
1131 return NULL;
1132 }
1133
1134 return skb;
1135}
1136
5f07b3c5
SP
1137static void be_xmit_flush(struct be_adapter *adapter, struct be_tx_obj *txo)
1138{
1139 struct be_queue_info *txq = &txo->q;
1140 struct be_eth_hdr_wrb *hdr = queue_index_node(txq, txo->last_req_hdr);
1141
1142 /* Mark the last request eventable if it hasn't been marked already */
1143 if (!(hdr->dw[2] & cpu_to_le32(TX_HDR_WRB_EVT)))
1144 hdr->dw[2] |= cpu_to_le32(TX_HDR_WRB_EVT | TX_HDR_WRB_COMPL);
1145
1146 /* compose a dummy wrb if there are odd set of wrbs to notify */
1147 if (!lancer_chip(adapter) && (txo->pend_wrb_cnt & 1)) {
f986afcb 1148 wrb_fill_dummy(queue_head_node(txq));
5f07b3c5
SP
1149 queue_head_inc(txq);
1150 atomic_inc(&txq->used);
1151 txo->pend_wrb_cnt++;
1152 hdr->dw[2] &= ~cpu_to_le32(TX_HDR_WRB_NUM_MASK <<
1153 TX_HDR_WRB_NUM_SHIFT);
1154 hdr->dw[2] |= cpu_to_le32((txo->last_req_wrb_cnt + 1) <<
1155 TX_HDR_WRB_NUM_SHIFT);
1156 }
1157 be_txq_notify(adapter, txo, txo->pend_wrb_cnt);
1158 txo->pend_wrb_cnt = 0;
1159}
1160
760c295e
VD
1161/* OS2BMC related */
1162
1163#define DHCP_CLIENT_PORT 68
1164#define DHCP_SERVER_PORT 67
1165#define NET_BIOS_PORT1 137
1166#define NET_BIOS_PORT2 138
1167#define DHCPV6_RAS_PORT 547
1168
1169#define is_mc_allowed_on_bmc(adapter, eh) \
1170 (!is_multicast_filt_enabled(adapter) && \
1171 is_multicast_ether_addr(eh->h_dest) && \
1172 !is_broadcast_ether_addr(eh->h_dest))
1173
1174#define is_bc_allowed_on_bmc(adapter, eh) \
1175 (!is_broadcast_filt_enabled(adapter) && \
1176 is_broadcast_ether_addr(eh->h_dest))
1177
1178#define is_arp_allowed_on_bmc(adapter, skb) \
1179 (is_arp(skb) && is_arp_filt_enabled(adapter))
1180
1181#define is_broadcast_packet(eh, adapter) \
1182 (is_multicast_ether_addr(eh->h_dest) && \
1183 !compare_ether_addr(eh->h_dest, adapter->netdev->broadcast))
1184
1185#define is_arp(skb) (skb->protocol == htons(ETH_P_ARP))
1186
1187#define is_arp_filt_enabled(adapter) \
1188 (adapter->bmc_filt_mask & (BMC_FILT_BROADCAST_ARP))
1189
1190#define is_dhcp_client_filt_enabled(adapter) \
1191 (adapter->bmc_filt_mask & BMC_FILT_BROADCAST_DHCP_CLIENT)
1192
1193#define is_dhcp_srvr_filt_enabled(adapter) \
1194 (adapter->bmc_filt_mask & BMC_FILT_BROADCAST_DHCP_SERVER)
1195
1196#define is_nbios_filt_enabled(adapter) \
1197 (adapter->bmc_filt_mask & BMC_FILT_BROADCAST_NET_BIOS)
1198
1199#define is_ipv6_na_filt_enabled(adapter) \
1200 (adapter->bmc_filt_mask & \
1201 BMC_FILT_MULTICAST_IPV6_NEIGH_ADVER)
1202
1203#define is_ipv6_ra_filt_enabled(adapter) \
1204 (adapter->bmc_filt_mask & BMC_FILT_MULTICAST_IPV6_RA)
1205
1206#define is_ipv6_ras_filt_enabled(adapter) \
1207 (adapter->bmc_filt_mask & BMC_FILT_MULTICAST_IPV6_RAS)
1208
1209#define is_broadcast_filt_enabled(adapter) \
1210 (adapter->bmc_filt_mask & BMC_FILT_BROADCAST)
1211
1212#define is_multicast_filt_enabled(adapter) \
1213 (adapter->bmc_filt_mask & BMC_FILT_MULTICAST)
1214
1215static bool be_send_pkt_to_bmc(struct be_adapter *adapter,
1216 struct sk_buff **skb)
1217{
1218 struct ethhdr *eh = (struct ethhdr *)(*skb)->data;
1219 bool os2bmc = false;
1220
1221 if (!be_is_os2bmc_enabled(adapter))
1222 goto done;
1223
1224 if (!is_multicast_ether_addr(eh->h_dest))
1225 goto done;
1226
1227 if (is_mc_allowed_on_bmc(adapter, eh) ||
1228 is_bc_allowed_on_bmc(adapter, eh) ||
1229 is_arp_allowed_on_bmc(adapter, (*skb))) {
1230 os2bmc = true;
1231 goto done;
1232 }
1233
1234 if ((*skb)->protocol == htons(ETH_P_IPV6)) {
1235 struct ipv6hdr *hdr = ipv6_hdr((*skb));
1236 u8 nexthdr = hdr->nexthdr;
1237
1238 if (nexthdr == IPPROTO_ICMPV6) {
1239 struct icmp6hdr *icmp6 = icmp6_hdr((*skb));
1240
1241 switch (icmp6->icmp6_type) {
1242 case NDISC_ROUTER_ADVERTISEMENT:
1243 os2bmc = is_ipv6_ra_filt_enabled(adapter);
1244 goto done;
1245 case NDISC_NEIGHBOUR_ADVERTISEMENT:
1246 os2bmc = is_ipv6_na_filt_enabled(adapter);
1247 goto done;
1248 default:
1249 break;
1250 }
1251 }
1252 }
1253
1254 if (is_udp_pkt((*skb))) {
1255 struct udphdr *udp = udp_hdr((*skb));
1256
1257 switch (udp->dest) {
1258 case DHCP_CLIENT_PORT:
1259 os2bmc = is_dhcp_client_filt_enabled(adapter);
1260 goto done;
1261 case DHCP_SERVER_PORT:
1262 os2bmc = is_dhcp_srvr_filt_enabled(adapter);
1263 goto done;
1264 case NET_BIOS_PORT1:
1265 case NET_BIOS_PORT2:
1266 os2bmc = is_nbios_filt_enabled(adapter);
1267 goto done;
1268 case DHCPV6_RAS_PORT:
1269 os2bmc = is_ipv6_ras_filt_enabled(adapter);
1270 goto done;
1271 default:
1272 break;
1273 }
1274 }
1275done:
1276 /* For packets over a vlan, which are destined
1277 * to BMC, asic expects the vlan to be inline in the packet.
1278 */
1279 if (os2bmc)
1280 *skb = be_insert_vlan_in_pkt(adapter, *skb, NULL);
1281
1282 return os2bmc;
1283}
1284
ee9c799c
SP
1285static netdev_tx_t be_xmit(struct sk_buff *skb, struct net_device *netdev)
1286{
1287 struct be_adapter *adapter = netdev_priv(netdev);
5f07b3c5
SP
1288 u16 q_idx = skb_get_queue_mapping(skb);
1289 struct be_tx_obj *txo = &adapter->tx_obj[q_idx];
804abcdb 1290 struct be_wrb_params wrb_params = { 0 };
804abcdb 1291 bool flush = !skb->xmit_more;
5f07b3c5 1292 u16 wrb_cnt;
ee9c799c 1293
804abcdb 1294 skb = be_xmit_workarounds(adapter, skb, &wrb_params);
5f07b3c5
SP
1295 if (unlikely(!skb))
1296 goto drop;
6b7c5b94 1297
804abcdb
SB
1298 be_get_wrb_params_from_skb(adapter, skb, &wrb_params);
1299
1300 wrb_cnt = be_xmit_enqueue(adapter, txo, skb, &wrb_params);
5f07b3c5
SP
1301 if (unlikely(!wrb_cnt)) {
1302 dev_kfree_skb_any(skb);
1303 goto drop;
1304 }
cd8f76c0 1305
760c295e
VD
1306 /* if os2bmc is enabled and if the pkt is destined to bmc,
1307 * enqueue the pkt a 2nd time with mgmt bit set.
1308 */
1309 if (be_send_pkt_to_bmc(adapter, &skb)) {
1310 BE_WRB_F_SET(wrb_params.features, OS2BMC, 1);
1311 wrb_cnt = be_xmit_enqueue(adapter, txo, skb, &wrb_params);
1312 if (unlikely(!wrb_cnt))
1313 goto drop;
1314 else
1315 skb_get(skb);
1316 }
1317
cf5671e6 1318 if (be_is_txq_full(txo)) {
5f07b3c5
SP
1319 netif_stop_subqueue(netdev, q_idx);
1320 tx_stats(txo)->tx_stops++;
1321 }
c190e3c8 1322
5f07b3c5
SP
1323 if (flush || __netif_subqueue_stopped(netdev, q_idx))
1324 be_xmit_flush(adapter, txo);
6b7c5b94 1325
5f07b3c5
SP
1326 return NETDEV_TX_OK;
1327drop:
1328 tx_stats(txo)->tx_drv_drops++;
1329 /* Flush the already enqueued tx requests */
1330 if (flush && txo->pend_wrb_cnt)
1331 be_xmit_flush(adapter, txo);
6b7c5b94 1332
6b7c5b94
SP
1333 return NETDEV_TX_OK;
1334}
1335
1336static int be_change_mtu(struct net_device *netdev, int new_mtu)
1337{
1338 struct be_adapter *adapter = netdev_priv(netdev);
0d3f5cce
KA
1339 struct device *dev = &adapter->pdev->dev;
1340
1341 if (new_mtu < BE_MIN_MTU || new_mtu > BE_MAX_MTU) {
1342 dev_info(dev, "MTU must be between %d and %d bytes\n",
1343 BE_MIN_MTU, BE_MAX_MTU);
6b7c5b94
SP
1344 return -EINVAL;
1345 }
0d3f5cce
KA
1346
1347 dev_info(dev, "MTU changed from %d to %d bytes\n",
748b539a 1348 netdev->mtu, new_mtu);
6b7c5b94
SP
1349 netdev->mtu = new_mtu;
1350 return 0;
1351}
1352
f66b7cfd
SP
1353static inline bool be_in_all_promisc(struct be_adapter *adapter)
1354{
1355 return (adapter->if_flags & BE_IF_FLAGS_ALL_PROMISCUOUS) ==
1356 BE_IF_FLAGS_ALL_PROMISCUOUS;
1357}
1358
1359static int be_set_vlan_promisc(struct be_adapter *adapter)
1360{
1361 struct device *dev = &adapter->pdev->dev;
1362 int status;
1363
1364 if (adapter->if_flags & BE_IF_FLAGS_VLAN_PROMISCUOUS)
1365 return 0;
1366
1367 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_VLAN_PROMISCUOUS, ON);
1368 if (!status) {
1369 dev_info(dev, "Enabled VLAN promiscuous mode\n");
1370 adapter->if_flags |= BE_IF_FLAGS_VLAN_PROMISCUOUS;
1371 } else {
1372 dev_err(dev, "Failed to enable VLAN promiscuous mode\n");
1373 }
1374 return status;
1375}
1376
1377static int be_clear_vlan_promisc(struct be_adapter *adapter)
1378{
1379 struct device *dev = &adapter->pdev->dev;
1380 int status;
1381
1382 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_VLAN_PROMISCUOUS, OFF);
1383 if (!status) {
1384 dev_info(dev, "Disabling VLAN promiscuous mode\n");
1385 adapter->if_flags &= ~BE_IF_FLAGS_VLAN_PROMISCUOUS;
1386 }
1387 return status;
1388}
1389
6b7c5b94 1390/*
82903e4b
AK
1391 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
1392 * If the user configures more, place BE in vlan promiscuous mode.
6b7c5b94 1393 */
10329df8 1394static int be_vid_config(struct be_adapter *adapter)
6b7c5b94 1395{
50762667 1396 struct device *dev = &adapter->pdev->dev;
10329df8 1397 u16 vids[BE_NUM_VLANS_SUPPORTED];
f6cbd364 1398 u16 num = 0, i = 0;
82903e4b 1399 int status = 0;
1da87b7f 1400
c0e64ef4 1401 /* No need to further configure vids if in promiscuous mode */
f66b7cfd 1402 if (be_in_all_promisc(adapter))
c0e64ef4
SP
1403 return 0;
1404
92bf14ab 1405 if (adapter->vlans_added > be_max_vlans(adapter))
f66b7cfd 1406 return be_set_vlan_promisc(adapter);
0fc16ebf
PR
1407
1408 /* Construct VLAN Table to give to HW */
f6cbd364
RN
1409 for_each_set_bit(i, adapter->vids, VLAN_N_VID)
1410 vids[num++] = cpu_to_le16(i);
0fc16ebf 1411
435452aa 1412 status = be_cmd_vlan_config(adapter, adapter->if_handle, vids, num, 0);
0fc16ebf 1413 if (status) {
f66b7cfd 1414 dev_err(dev, "Setting HW VLAN filtering failed\n");
d9d604f8 1415 /* Set to VLAN promisc mode as setting VLAN filter failed */
77be8c1c
KA
1416 if (addl_status(status) == MCC_ADDL_STATUS_INSUFFICIENT_VLANS ||
1417 addl_status(status) ==
4c60005f 1418 MCC_ADDL_STATUS_INSUFFICIENT_RESOURCES)
f66b7cfd
SP
1419 return be_set_vlan_promisc(adapter);
1420 } else if (adapter->if_flags & BE_IF_FLAGS_VLAN_PROMISCUOUS) {
1421 status = be_clear_vlan_promisc(adapter);
6b7c5b94 1422 }
0fc16ebf 1423 return status;
6b7c5b94
SP
1424}
1425
80d5c368 1426static int be_vlan_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
6b7c5b94
SP
1427{
1428 struct be_adapter *adapter = netdev_priv(netdev);
80817cbf 1429 int status = 0;
6b7c5b94 1430
a85e9986
PR
1431 /* Packets with VID 0 are always received by Lancer by default */
1432 if (lancer_chip(adapter) && vid == 0)
48291c22
VV
1433 return status;
1434
f6cbd364 1435 if (test_bit(vid, adapter->vids))
48291c22 1436 return status;
a85e9986 1437
f6cbd364 1438 set_bit(vid, adapter->vids);
a6b74e01 1439 adapter->vlans_added++;
8e586137 1440
a6b74e01
SK
1441 status = be_vid_config(adapter);
1442 if (status) {
1443 adapter->vlans_added--;
f6cbd364 1444 clear_bit(vid, adapter->vids);
a6b74e01 1445 }
48291c22 1446
80817cbf 1447 return status;
6b7c5b94
SP
1448}
1449
80d5c368 1450static int be_vlan_rem_vid(struct net_device *netdev, __be16 proto, u16 vid)
6b7c5b94
SP
1451{
1452 struct be_adapter *adapter = netdev_priv(netdev);
1453
a85e9986
PR
1454 /* Packets with VID 0 are always received by Lancer by default */
1455 if (lancer_chip(adapter) && vid == 0)
9d4dfe4a 1456 return 0;
a85e9986 1457
f6cbd364 1458 clear_bit(vid, adapter->vids);
9d4dfe4a
KA
1459 adapter->vlans_added--;
1460
1461 return be_vid_config(adapter);
6b7c5b94
SP
1462}
1463
f66b7cfd 1464static void be_clear_all_promisc(struct be_adapter *adapter)
7ad09458 1465{
ac34b743 1466 be_cmd_rx_filter(adapter, BE_IF_FLAGS_ALL_PROMISCUOUS, OFF);
f66b7cfd 1467 adapter->if_flags &= ~BE_IF_FLAGS_ALL_PROMISCUOUS;
7ad09458
S
1468}
1469
f66b7cfd
SP
1470static void be_set_all_promisc(struct be_adapter *adapter)
1471{
1472 be_cmd_rx_filter(adapter, BE_IF_FLAGS_ALL_PROMISCUOUS, ON);
1473 adapter->if_flags |= BE_IF_FLAGS_ALL_PROMISCUOUS;
1474}
1475
1476static void be_set_mc_promisc(struct be_adapter *adapter)
6b7c5b94 1477{
0fc16ebf 1478 int status;
6b7c5b94 1479
f66b7cfd
SP
1480 if (adapter->if_flags & BE_IF_FLAGS_MCAST_PROMISCUOUS)
1481 return;
6b7c5b94 1482
f66b7cfd
SP
1483 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_MCAST_PROMISCUOUS, ON);
1484 if (!status)
1485 adapter->if_flags |= BE_IF_FLAGS_MCAST_PROMISCUOUS;
1486}
1487
1488static void be_set_mc_list(struct be_adapter *adapter)
1489{
1490 int status;
1491
1492 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_MULTICAST, ON);
1493 if (!status)
1494 adapter->if_flags &= ~BE_IF_FLAGS_MCAST_PROMISCUOUS;
1495 else
1496 be_set_mc_promisc(adapter);
1497}
1498
1499static void be_set_uc_list(struct be_adapter *adapter)
1500{
1501 struct netdev_hw_addr *ha;
1502 int i = 1; /* First slot is claimed by the Primary MAC */
1503
1504 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++)
1505 be_cmd_pmac_del(adapter, adapter->if_handle,
1506 adapter->pmac_id[i], 0);
1507
1508 if (netdev_uc_count(adapter->netdev) > be_max_uc(adapter)) {
1509 be_set_all_promisc(adapter);
1510 return;
6b7c5b94
SP
1511 }
1512
f66b7cfd
SP
1513 netdev_for_each_uc_addr(ha, adapter->netdev) {
1514 adapter->uc_macs++; /* First slot is for Primary MAC */
1515 be_cmd_pmac_add(adapter, (u8 *)ha->addr, adapter->if_handle,
1516 &adapter->pmac_id[adapter->uc_macs], 0);
1517 }
1518}
6b7c5b94 1519
f66b7cfd
SP
1520static void be_clear_uc_list(struct be_adapter *adapter)
1521{
1522 int i;
fbc13f01 1523
f66b7cfd
SP
1524 for (i = 1; i < (adapter->uc_macs + 1); i++)
1525 be_cmd_pmac_del(adapter, adapter->if_handle,
1526 adapter->pmac_id[i], 0);
1527 adapter->uc_macs = 0;
1528}
fbc13f01 1529
f66b7cfd
SP
1530static void be_set_rx_mode(struct net_device *netdev)
1531{
1532 struct be_adapter *adapter = netdev_priv(netdev);
fbc13f01 1533
f66b7cfd
SP
1534 if (netdev->flags & IFF_PROMISC) {
1535 be_set_all_promisc(adapter);
1536 return;
fbc13f01
AK
1537 }
1538
f66b7cfd
SP
1539 /* Interface was previously in promiscuous mode; disable it */
1540 if (be_in_all_promisc(adapter)) {
1541 be_clear_all_promisc(adapter);
1542 if (adapter->vlans_added)
1543 be_vid_config(adapter);
0fc16ebf 1544 }
a0794885 1545
f66b7cfd
SP
1546 /* Enable multicast promisc if num configured exceeds what we support */
1547 if (netdev->flags & IFF_ALLMULTI ||
1548 netdev_mc_count(netdev) > be_max_mc(adapter)) {
1549 be_set_mc_promisc(adapter);
a0794885 1550 return;
f66b7cfd 1551 }
a0794885 1552
f66b7cfd
SP
1553 if (netdev_uc_count(netdev) != adapter->uc_macs)
1554 be_set_uc_list(adapter);
1555
1556 be_set_mc_list(adapter);
6b7c5b94
SP
1557}
1558
ba343c77
SB
1559static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
1560{
1561 struct be_adapter *adapter = netdev_priv(netdev);
11ac75ed 1562 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
ba343c77
SB
1563 int status;
1564
11ac75ed 1565 if (!sriov_enabled(adapter))
ba343c77
SB
1566 return -EPERM;
1567
11ac75ed 1568 if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
ba343c77
SB
1569 return -EINVAL;
1570
3c31aaf3
VV
1571 /* Proceed further only if user provided MAC is different
1572 * from active MAC
1573 */
1574 if (ether_addr_equal(mac, vf_cfg->mac_addr))
1575 return 0;
1576
3175d8c2
SP
1577 if (BEx_chip(adapter)) {
1578 be_cmd_pmac_del(adapter, vf_cfg->if_handle, vf_cfg->pmac_id,
1579 vf + 1);
ba343c77 1580
11ac75ed
SP
1581 status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
1582 &vf_cfg->pmac_id, vf + 1);
3175d8c2
SP
1583 } else {
1584 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
1585 vf + 1);
590c391d
PR
1586 }
1587
abccf23e
KA
1588 if (status) {
1589 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed: %#x",
1590 mac, vf, status);
1591 return be_cmd_status(status);
1592 }
64600ea5 1593
abccf23e
KA
1594 ether_addr_copy(vf_cfg->mac_addr, mac);
1595
1596 return 0;
ba343c77
SB
1597}
1598
64600ea5 1599static int be_get_vf_config(struct net_device *netdev, int vf,
748b539a 1600 struct ifla_vf_info *vi)
64600ea5
AK
1601{
1602 struct be_adapter *adapter = netdev_priv(netdev);
11ac75ed 1603 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
64600ea5 1604
11ac75ed 1605 if (!sriov_enabled(adapter))
64600ea5
AK
1606 return -EPERM;
1607
11ac75ed 1608 if (vf >= adapter->num_vfs)
64600ea5
AK
1609 return -EINVAL;
1610
1611 vi->vf = vf;
ed616689
SC
1612 vi->max_tx_rate = vf_cfg->tx_rate;
1613 vi->min_tx_rate = 0;
a60b3a13
AK
1614 vi->vlan = vf_cfg->vlan_tag & VLAN_VID_MASK;
1615 vi->qos = vf_cfg->vlan_tag >> VLAN_PRIO_SHIFT;
11ac75ed 1616 memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
bdce2ad7 1617 vi->linkstate = adapter->vf_cfg[vf].plink_tracking;
e7bcbd7b 1618 vi->spoofchk = adapter->vf_cfg[vf].spoofchk;
64600ea5
AK
1619
1620 return 0;
1621}
1622
435452aa
VV
1623static int be_set_vf_tvt(struct be_adapter *adapter, int vf, u16 vlan)
1624{
1625 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1626 u16 vids[BE_NUM_VLANS_SUPPORTED];
1627 int vf_if_id = vf_cfg->if_handle;
1628 int status;
1629
1630 /* Enable Transparent VLAN Tagging */
e7bcbd7b 1631 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1, vf_if_id, 0, 0);
435452aa
VV
1632 if (status)
1633 return status;
1634
1635 /* Clear pre-programmed VLAN filters on VF if any, if TVT is enabled */
1636 vids[0] = 0;
1637 status = be_cmd_vlan_config(adapter, vf_if_id, vids, 1, vf + 1);
1638 if (!status)
1639 dev_info(&adapter->pdev->dev,
1640 "Cleared guest VLANs on VF%d", vf);
1641
1642 /* After TVT is enabled, disallow VFs to program VLAN filters */
1643 if (vf_cfg->privileges & BE_PRIV_FILTMGMT) {
1644 status = be_cmd_set_fn_privileges(adapter, vf_cfg->privileges &
1645 ~BE_PRIV_FILTMGMT, vf + 1);
1646 if (!status)
1647 vf_cfg->privileges &= ~BE_PRIV_FILTMGMT;
1648 }
1649 return 0;
1650}
1651
1652static int be_clear_vf_tvt(struct be_adapter *adapter, int vf)
1653{
1654 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1655 struct device *dev = &adapter->pdev->dev;
1656 int status;
1657
1658 /* Reset Transparent VLAN Tagging. */
1659 status = be_cmd_set_hsw_config(adapter, BE_RESET_VLAN_TAG_ID, vf + 1,
e7bcbd7b 1660 vf_cfg->if_handle, 0, 0);
435452aa
VV
1661 if (status)
1662 return status;
1663
1664 /* Allow VFs to program VLAN filtering */
1665 if (!(vf_cfg->privileges & BE_PRIV_FILTMGMT)) {
1666 status = be_cmd_set_fn_privileges(adapter, vf_cfg->privileges |
1667 BE_PRIV_FILTMGMT, vf + 1);
1668 if (!status) {
1669 vf_cfg->privileges |= BE_PRIV_FILTMGMT;
1670 dev_info(dev, "VF%d: FILTMGMT priv enabled", vf);
1671 }
1672 }
1673
1674 dev_info(dev,
1675 "Disable/re-enable i/f in VM to clear Transparent VLAN tag");
1676 return 0;
1677}
1678
748b539a 1679static int be_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos)
1da87b7f
AK
1680{
1681 struct be_adapter *adapter = netdev_priv(netdev);
b9fc0e53 1682 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
435452aa 1683 int status;
1da87b7f 1684
11ac75ed 1685 if (!sriov_enabled(adapter))
1da87b7f
AK
1686 return -EPERM;
1687
b9fc0e53 1688 if (vf >= adapter->num_vfs || vlan > 4095 || qos > 7)
1da87b7f
AK
1689 return -EINVAL;
1690
b9fc0e53
AK
1691 if (vlan || qos) {
1692 vlan |= qos << VLAN_PRIO_SHIFT;
435452aa 1693 status = be_set_vf_tvt(adapter, vf, vlan);
1da87b7f 1694 } else {
435452aa 1695 status = be_clear_vf_tvt(adapter, vf);
1da87b7f
AK
1696 }
1697
abccf23e
KA
1698 if (status) {
1699 dev_err(&adapter->pdev->dev,
435452aa
VV
1700 "VLAN %d config on VF %d failed : %#x\n", vlan, vf,
1701 status);
abccf23e
KA
1702 return be_cmd_status(status);
1703 }
1704
1705 vf_cfg->vlan_tag = vlan;
abccf23e 1706 return 0;
1da87b7f
AK
1707}
1708
ed616689
SC
1709static int be_set_vf_tx_rate(struct net_device *netdev, int vf,
1710 int min_tx_rate, int max_tx_rate)
e1d18735
AK
1711{
1712 struct be_adapter *adapter = netdev_priv(netdev);
0f77ba73
RN
1713 struct device *dev = &adapter->pdev->dev;
1714 int percent_rate, status = 0;
1715 u16 link_speed = 0;
1716 u8 link_status;
e1d18735 1717
11ac75ed 1718 if (!sriov_enabled(adapter))
e1d18735
AK
1719 return -EPERM;
1720
94f434c2 1721 if (vf >= adapter->num_vfs)
e1d18735
AK
1722 return -EINVAL;
1723
ed616689
SC
1724 if (min_tx_rate)
1725 return -EINVAL;
1726
0f77ba73
RN
1727 if (!max_tx_rate)
1728 goto config_qos;
1729
1730 status = be_cmd_link_status_query(adapter, &link_speed,
1731 &link_status, 0);
1732 if (status)
1733 goto err;
1734
1735 if (!link_status) {
1736 dev_err(dev, "TX-rate setting not allowed when link is down\n");
940a3fcd 1737 status = -ENETDOWN;
0f77ba73
RN
1738 goto err;
1739 }
1740
1741 if (max_tx_rate < 100 || max_tx_rate > link_speed) {
1742 dev_err(dev, "TX-rate must be between 100 and %d Mbps\n",
1743 link_speed);
1744 status = -EINVAL;
1745 goto err;
1746 }
1747
1748 /* On Skyhawk the QOS setting must be done only as a % value */
1749 percent_rate = link_speed / 100;
1750 if (skyhawk_chip(adapter) && (max_tx_rate % percent_rate)) {
1751 dev_err(dev, "TX-rate must be a multiple of %d Mbps\n",
1752 percent_rate);
1753 status = -EINVAL;
1754 goto err;
94f434c2 1755 }
e1d18735 1756
0f77ba73
RN
1757config_qos:
1758 status = be_cmd_config_qos(adapter, max_tx_rate, link_speed, vf + 1);
e1d18735 1759 if (status)
0f77ba73
RN
1760 goto err;
1761
1762 adapter->vf_cfg[vf].tx_rate = max_tx_rate;
1763 return 0;
1764
1765err:
1766 dev_err(dev, "TX-rate setting of %dMbps on VF%d failed\n",
1767 max_tx_rate, vf);
abccf23e 1768 return be_cmd_status(status);
e1d18735 1769}
e2fb1afa 1770
bdce2ad7
SR
1771static int be_set_vf_link_state(struct net_device *netdev, int vf,
1772 int link_state)
1773{
1774 struct be_adapter *adapter = netdev_priv(netdev);
1775 int status;
1776
1777 if (!sriov_enabled(adapter))
1778 return -EPERM;
1779
1780 if (vf >= adapter->num_vfs)
1781 return -EINVAL;
1782
1783 status = be_cmd_set_logical_link_config(adapter, link_state, vf+1);
abccf23e
KA
1784 if (status) {
1785 dev_err(&adapter->pdev->dev,
1786 "Link state change on VF %d failed: %#x\n", vf, status);
1787 return be_cmd_status(status);
1788 }
bdce2ad7 1789
abccf23e
KA
1790 adapter->vf_cfg[vf].plink_tracking = link_state;
1791
1792 return 0;
bdce2ad7 1793}
e1d18735 1794
e7bcbd7b
KA
1795static int be_set_vf_spoofchk(struct net_device *netdev, int vf, bool enable)
1796{
1797 struct be_adapter *adapter = netdev_priv(netdev);
1798 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1799 u8 spoofchk;
1800 int status;
1801
1802 if (!sriov_enabled(adapter))
1803 return -EPERM;
1804
1805 if (vf >= adapter->num_vfs)
1806 return -EINVAL;
1807
1808 if (BEx_chip(adapter))
1809 return -EOPNOTSUPP;
1810
1811 if (enable == vf_cfg->spoofchk)
1812 return 0;
1813
1814 spoofchk = enable ? ENABLE_MAC_SPOOFCHK : DISABLE_MAC_SPOOFCHK;
1815
1816 status = be_cmd_set_hsw_config(adapter, 0, vf + 1, vf_cfg->if_handle,
1817 0, spoofchk);
1818 if (status) {
1819 dev_err(&adapter->pdev->dev,
1820 "Spoofchk change on VF %d failed: %#x\n", vf, status);
1821 return be_cmd_status(status);
1822 }
1823
1824 vf_cfg->spoofchk = enable;
1825 return 0;
1826}
1827
2632bafd
SP
1828static void be_aic_update(struct be_aic_obj *aic, u64 rx_pkts, u64 tx_pkts,
1829 ulong now)
6b7c5b94 1830{
2632bafd
SP
1831 aic->rx_pkts_prev = rx_pkts;
1832 aic->tx_reqs_prev = tx_pkts;
1833 aic->jiffies = now;
1834}
ac124ff9 1835
20947770 1836static int be_get_new_eqd(struct be_eq_obj *eqo)
2632bafd 1837{
20947770
PR
1838 struct be_adapter *adapter = eqo->adapter;
1839 int eqd, start;
2632bafd 1840 struct be_aic_obj *aic;
2632bafd
SP
1841 struct be_rx_obj *rxo;
1842 struct be_tx_obj *txo;
20947770 1843 u64 rx_pkts = 0, tx_pkts = 0;
2632bafd
SP
1844 ulong now;
1845 u32 pps, delta;
20947770 1846 int i;
10ef9ab4 1847
20947770
PR
1848 aic = &adapter->aic_obj[eqo->idx];
1849 if (!aic->enable) {
1850 if (aic->jiffies)
1851 aic->jiffies = 0;
1852 eqd = aic->et_eqd;
1853 return eqd;
1854 }
6b7c5b94 1855
20947770 1856 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
2632bafd 1857 do {
57a7744e 1858 start = u64_stats_fetch_begin_irq(&rxo->stats.sync);
20947770 1859 rx_pkts += rxo->stats.rx_pkts;
57a7744e 1860 } while (u64_stats_fetch_retry_irq(&rxo->stats.sync, start));
20947770 1861 }
10ef9ab4 1862
20947770 1863 for_all_tx_queues_on_eq(adapter, eqo, txo, i) {
2632bafd 1864 do {
57a7744e 1865 start = u64_stats_fetch_begin_irq(&txo->stats.sync);
20947770 1866 tx_pkts += txo->stats.tx_reqs;
57a7744e 1867 } while (u64_stats_fetch_retry_irq(&txo->stats.sync, start));
20947770 1868 }
6b7c5b94 1869
20947770
PR
1870 /* Skip, if wrapped around or first calculation */
1871 now = jiffies;
1872 if (!aic->jiffies || time_before(now, aic->jiffies) ||
1873 rx_pkts < aic->rx_pkts_prev ||
1874 tx_pkts < aic->tx_reqs_prev) {
1875 be_aic_update(aic, rx_pkts, tx_pkts, now);
1876 return aic->prev_eqd;
1877 }
2632bafd 1878
20947770
PR
1879 delta = jiffies_to_msecs(now - aic->jiffies);
1880 if (delta == 0)
1881 return aic->prev_eqd;
10ef9ab4 1882
20947770
PR
1883 pps = (((u32)(rx_pkts - aic->rx_pkts_prev) * 1000) / delta) +
1884 (((u32)(tx_pkts - aic->tx_reqs_prev) * 1000) / delta);
1885 eqd = (pps / 15000) << 2;
2632bafd 1886
20947770
PR
1887 if (eqd < 8)
1888 eqd = 0;
1889 eqd = min_t(u32, eqd, aic->max_eqd);
1890 eqd = max_t(u32, eqd, aic->min_eqd);
1891
1892 be_aic_update(aic, rx_pkts, tx_pkts, now);
1893
1894 return eqd;
1895}
1896
1897/* For Skyhawk-R only */
1898static u32 be_get_eq_delay_mult_enc(struct be_eq_obj *eqo)
1899{
1900 struct be_adapter *adapter = eqo->adapter;
1901 struct be_aic_obj *aic = &adapter->aic_obj[eqo->idx];
1902 ulong now = jiffies;
1903 int eqd;
1904 u32 mult_enc;
1905
1906 if (!aic->enable)
1907 return 0;
1908
1909 if (time_before_eq(now, aic->jiffies) ||
1910 jiffies_to_msecs(now - aic->jiffies) < 1)
1911 eqd = aic->prev_eqd;
1912 else
1913 eqd = be_get_new_eqd(eqo);
1914
1915 if (eqd > 100)
1916 mult_enc = R2I_DLY_ENC_1;
1917 else if (eqd > 60)
1918 mult_enc = R2I_DLY_ENC_2;
1919 else if (eqd > 20)
1920 mult_enc = R2I_DLY_ENC_3;
1921 else
1922 mult_enc = R2I_DLY_ENC_0;
1923
1924 aic->prev_eqd = eqd;
1925
1926 return mult_enc;
1927}
1928
1929void be_eqd_update(struct be_adapter *adapter, bool force_update)
1930{
1931 struct be_set_eqd set_eqd[MAX_EVT_QS];
1932 struct be_aic_obj *aic;
1933 struct be_eq_obj *eqo;
1934 int i, num = 0, eqd;
1935
1936 for_all_evt_queues(adapter, eqo, i) {
1937 aic = &adapter->aic_obj[eqo->idx];
1938 eqd = be_get_new_eqd(eqo);
1939 if (force_update || eqd != aic->prev_eqd) {
2632bafd
SP
1940 set_eqd[num].delay_multiplier = (eqd * 65)/100;
1941 set_eqd[num].eq_id = eqo->q.id;
1942 aic->prev_eqd = eqd;
1943 num++;
1944 }
ac124ff9 1945 }
2632bafd
SP
1946
1947 if (num)
1948 be_cmd_modify_eqd(adapter, set_eqd, num);
6b7c5b94
SP
1949}
1950
3abcdeda 1951static void be_rx_stats_update(struct be_rx_obj *rxo,
748b539a 1952 struct be_rx_compl_info *rxcp)
4097f663 1953{
ac124ff9 1954 struct be_rx_stats *stats = rx_stats(rxo);
1ef78abe 1955
ab1594e9 1956 u64_stats_update_begin(&stats->sync);
3abcdeda 1957 stats->rx_compl++;
2e588f84 1958 stats->rx_bytes += rxcp->pkt_size;
3abcdeda 1959 stats->rx_pkts++;
2e588f84 1960 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
3abcdeda 1961 stats->rx_mcast_pkts++;
2e588f84 1962 if (rxcp->err)
ac124ff9 1963 stats->rx_compl_err++;
ab1594e9 1964 u64_stats_update_end(&stats->sync);
4097f663
SP
1965}
1966
2e588f84 1967static inline bool csum_passed(struct be_rx_compl_info *rxcp)
728a9972 1968{
19fad86f 1969 /* L4 checksum is not reliable for non TCP/UDP packets.
c9c47142
SP
1970 * Also ignore ipcksm for ipv6 pkts
1971 */
2e588f84 1972 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
c9c47142 1973 (rxcp->ip_csum || rxcp->ipv6) && !rxcp->err;
728a9972
AK
1974}
1975
0b0ef1d0 1976static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo)
6b7c5b94 1977{
10ef9ab4 1978 struct be_adapter *adapter = rxo->adapter;
6b7c5b94 1979 struct be_rx_page_info *rx_page_info;
3abcdeda 1980 struct be_queue_info *rxq = &rxo->q;
0b0ef1d0 1981 u16 frag_idx = rxq->tail;
6b7c5b94 1982
3abcdeda 1983 rx_page_info = &rxo->page_info_tbl[frag_idx];
6b7c5b94
SP
1984 BUG_ON(!rx_page_info->page);
1985
e50287be 1986 if (rx_page_info->last_frag) {
2b7bcebf
IV
1987 dma_unmap_page(&adapter->pdev->dev,
1988 dma_unmap_addr(rx_page_info, bus),
1989 adapter->big_page_size, DMA_FROM_DEVICE);
e50287be
SP
1990 rx_page_info->last_frag = false;
1991 } else {
1992 dma_sync_single_for_cpu(&adapter->pdev->dev,
1993 dma_unmap_addr(rx_page_info, bus),
1994 rx_frag_size, DMA_FROM_DEVICE);
205859a2 1995 }
6b7c5b94 1996
0b0ef1d0 1997 queue_tail_inc(rxq);
6b7c5b94
SP
1998 atomic_dec(&rxq->used);
1999 return rx_page_info;
2000}
2001
2002/* Throwaway the data in the Rx completion */
10ef9ab4
SP
2003static void be_rx_compl_discard(struct be_rx_obj *rxo,
2004 struct be_rx_compl_info *rxcp)
6b7c5b94 2005{
6b7c5b94 2006 struct be_rx_page_info *page_info;
2e588f84 2007 u16 i, num_rcvd = rxcp->num_rcvd;
6b7c5b94 2008
e80d9da6 2009 for (i = 0; i < num_rcvd; i++) {
0b0ef1d0 2010 page_info = get_rx_page_info(rxo);
e80d9da6
PR
2011 put_page(page_info->page);
2012 memset(page_info, 0, sizeof(*page_info));
6b7c5b94
SP
2013 }
2014}
2015
2016/*
2017 * skb_fill_rx_data forms a complete skb for an ether frame
2018 * indicated by rxcp.
2019 */
10ef9ab4
SP
2020static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
2021 struct be_rx_compl_info *rxcp)
6b7c5b94 2022{
6b7c5b94 2023 struct be_rx_page_info *page_info;
2e588f84
SP
2024 u16 i, j;
2025 u16 hdr_len, curr_frag_len, remaining;
6b7c5b94 2026 u8 *start;
6b7c5b94 2027
0b0ef1d0 2028 page_info = get_rx_page_info(rxo);
6b7c5b94
SP
2029 start = page_address(page_info->page) + page_info->page_offset;
2030 prefetch(start);
2031
2032 /* Copy data in the first descriptor of this completion */
2e588f84 2033 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
6b7c5b94 2034
6b7c5b94
SP
2035 skb->len = curr_frag_len;
2036 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
ac1ae5f3 2037 memcpy(skb->data, start, curr_frag_len);
6b7c5b94
SP
2038 /* Complete packet has now been moved to data */
2039 put_page(page_info->page);
2040 skb->data_len = 0;
2041 skb->tail += curr_frag_len;
2042 } else {
ac1ae5f3
ED
2043 hdr_len = ETH_HLEN;
2044 memcpy(skb->data, start, hdr_len);
6b7c5b94 2045 skb_shinfo(skb)->nr_frags = 1;
b061b39e 2046 skb_frag_set_page(skb, 0, page_info->page);
6b7c5b94
SP
2047 skb_shinfo(skb)->frags[0].page_offset =
2048 page_info->page_offset + hdr_len;
748b539a
SP
2049 skb_frag_size_set(&skb_shinfo(skb)->frags[0],
2050 curr_frag_len - hdr_len);
6b7c5b94 2051 skb->data_len = curr_frag_len - hdr_len;
bdb28a97 2052 skb->truesize += rx_frag_size;
6b7c5b94
SP
2053 skb->tail += hdr_len;
2054 }
205859a2 2055 page_info->page = NULL;
6b7c5b94 2056
2e588f84
SP
2057 if (rxcp->pkt_size <= rx_frag_size) {
2058 BUG_ON(rxcp->num_rcvd != 1);
2059 return;
6b7c5b94
SP
2060 }
2061
2062 /* More frags present for this completion */
2e588f84
SP
2063 remaining = rxcp->pkt_size - curr_frag_len;
2064 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
0b0ef1d0 2065 page_info = get_rx_page_info(rxo);
2e588f84 2066 curr_frag_len = min(remaining, rx_frag_size);
6b7c5b94 2067
bd46cb6c
AK
2068 /* Coalesce all frags from the same physical page in one slot */
2069 if (page_info->page_offset == 0) {
2070 /* Fresh page */
2071 j++;
b061b39e 2072 skb_frag_set_page(skb, j, page_info->page);
bd46cb6c
AK
2073 skb_shinfo(skb)->frags[j].page_offset =
2074 page_info->page_offset;
9e903e08 2075 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
bd46cb6c
AK
2076 skb_shinfo(skb)->nr_frags++;
2077 } else {
2078 put_page(page_info->page);
2079 }
2080
9e903e08 2081 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
6b7c5b94
SP
2082 skb->len += curr_frag_len;
2083 skb->data_len += curr_frag_len;
bdb28a97 2084 skb->truesize += rx_frag_size;
2e588f84 2085 remaining -= curr_frag_len;
205859a2 2086 page_info->page = NULL;
6b7c5b94 2087 }
bd46cb6c 2088 BUG_ON(j > MAX_SKB_FRAGS);
6b7c5b94
SP
2089}
2090
5be93b9a 2091/* Process the RX completion indicated by rxcp when GRO is disabled */
6384a4d0 2092static void be_rx_compl_process(struct be_rx_obj *rxo, struct napi_struct *napi,
10ef9ab4 2093 struct be_rx_compl_info *rxcp)
6b7c5b94 2094{
10ef9ab4 2095 struct be_adapter *adapter = rxo->adapter;
6332c8d3 2096 struct net_device *netdev = adapter->netdev;
6b7c5b94 2097 struct sk_buff *skb;
89420424 2098
bb349bb4 2099 skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE);
a058a632 2100 if (unlikely(!skb)) {
ac124ff9 2101 rx_stats(rxo)->rx_drops_no_skbs++;
10ef9ab4 2102 be_rx_compl_discard(rxo, rxcp);
6b7c5b94
SP
2103 return;
2104 }
2105
10ef9ab4 2106 skb_fill_rx_data(rxo, skb, rxcp);
6b7c5b94 2107
6332c8d3 2108 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
728a9972 2109 skb->ip_summed = CHECKSUM_UNNECESSARY;
c6ce2f4b
SK
2110 else
2111 skb_checksum_none_assert(skb);
6b7c5b94 2112
6332c8d3 2113 skb->protocol = eth_type_trans(skb, netdev);
aaa6daec 2114 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
10ef9ab4 2115 if (netdev->features & NETIF_F_RXHASH)
d2464c8c 2116 skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
c9c47142 2117
b6c0e89d 2118 skb->csum_level = rxcp->tunneled;
6384a4d0 2119 skb_mark_napi_id(skb, napi);
6b7c5b94 2120
343e43c0 2121 if (rxcp->vlanf)
86a9bad3 2122 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
4c5102f9
AK
2123
2124 netif_receive_skb(skb);
6b7c5b94
SP
2125}
2126
5be93b9a 2127/* Process the RX completion indicated by rxcp when GRO is enabled */
4188e7df
JH
2128static void be_rx_compl_process_gro(struct be_rx_obj *rxo,
2129 struct napi_struct *napi,
2130 struct be_rx_compl_info *rxcp)
6b7c5b94 2131{
10ef9ab4 2132 struct be_adapter *adapter = rxo->adapter;
6b7c5b94 2133 struct be_rx_page_info *page_info;
5be93b9a 2134 struct sk_buff *skb = NULL;
2e588f84
SP
2135 u16 remaining, curr_frag_len;
2136 u16 i, j;
3968fa1e 2137
10ef9ab4 2138 skb = napi_get_frags(napi);
5be93b9a 2139 if (!skb) {
10ef9ab4 2140 be_rx_compl_discard(rxo, rxcp);
5be93b9a
AK
2141 return;
2142 }
2143
2e588f84
SP
2144 remaining = rxcp->pkt_size;
2145 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
0b0ef1d0 2146 page_info = get_rx_page_info(rxo);
6b7c5b94
SP
2147
2148 curr_frag_len = min(remaining, rx_frag_size);
2149
bd46cb6c
AK
2150 /* Coalesce all frags from the same physical page in one slot */
2151 if (i == 0 || page_info->page_offset == 0) {
2152 /* First frag or Fresh page */
2153 j++;
b061b39e 2154 skb_frag_set_page(skb, j, page_info->page);
5be93b9a
AK
2155 skb_shinfo(skb)->frags[j].page_offset =
2156 page_info->page_offset;
9e903e08 2157 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
bd46cb6c
AK
2158 } else {
2159 put_page(page_info->page);
2160 }
9e903e08 2161 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
bdb28a97 2162 skb->truesize += rx_frag_size;
bd46cb6c 2163 remaining -= curr_frag_len;
6b7c5b94
SP
2164 memset(page_info, 0, sizeof(*page_info));
2165 }
bd46cb6c 2166 BUG_ON(j > MAX_SKB_FRAGS);
6b7c5b94 2167
5be93b9a 2168 skb_shinfo(skb)->nr_frags = j + 1;
2e588f84
SP
2169 skb->len = rxcp->pkt_size;
2170 skb->data_len = rxcp->pkt_size;
5be93b9a 2171 skb->ip_summed = CHECKSUM_UNNECESSARY;
aaa6daec 2172 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
4b972914 2173 if (adapter->netdev->features & NETIF_F_RXHASH)
d2464c8c 2174 skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
c9c47142 2175
b6c0e89d 2176 skb->csum_level = rxcp->tunneled;
6384a4d0 2177 skb_mark_napi_id(skb, napi);
5be93b9a 2178
343e43c0 2179 if (rxcp->vlanf)
86a9bad3 2180 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
4c5102f9 2181
10ef9ab4 2182 napi_gro_frags(napi);
2e588f84
SP
2183}
2184
10ef9ab4
SP
2185static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
2186 struct be_rx_compl_info *rxcp)
2e588f84 2187{
c3c18bc1
SP
2188 rxcp->pkt_size = GET_RX_COMPL_V1_BITS(pktsize, compl);
2189 rxcp->vlanf = GET_RX_COMPL_V1_BITS(vtp, compl);
2190 rxcp->err = GET_RX_COMPL_V1_BITS(err, compl);
2191 rxcp->tcpf = GET_RX_COMPL_V1_BITS(tcpf, compl);
2192 rxcp->udpf = GET_RX_COMPL_V1_BITS(udpf, compl);
2193 rxcp->ip_csum = GET_RX_COMPL_V1_BITS(ipcksm, compl);
2194 rxcp->l4_csum = GET_RX_COMPL_V1_BITS(l4_cksm, compl);
2195 rxcp->ipv6 = GET_RX_COMPL_V1_BITS(ip_version, compl);
2196 rxcp->num_rcvd = GET_RX_COMPL_V1_BITS(numfrags, compl);
2197 rxcp->pkt_type = GET_RX_COMPL_V1_BITS(cast_enc, compl);
2198 rxcp->rss_hash = GET_RX_COMPL_V1_BITS(rsshash, compl);
15d72184 2199 if (rxcp->vlanf) {
c3c18bc1
SP
2200 rxcp->qnq = GET_RX_COMPL_V1_BITS(qnq, compl);
2201 rxcp->vlan_tag = GET_RX_COMPL_V1_BITS(vlan_tag, compl);
15d72184 2202 }
c3c18bc1 2203 rxcp->port = GET_RX_COMPL_V1_BITS(port, compl);
c9c47142 2204 rxcp->tunneled =
c3c18bc1 2205 GET_RX_COMPL_V1_BITS(tunneled, compl);
2e588f84
SP
2206}
2207
10ef9ab4
SP
2208static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
2209 struct be_rx_compl_info *rxcp)
2e588f84 2210{
c3c18bc1
SP
2211 rxcp->pkt_size = GET_RX_COMPL_V0_BITS(pktsize, compl);
2212 rxcp->vlanf = GET_RX_COMPL_V0_BITS(vtp, compl);
2213 rxcp->err = GET_RX_COMPL_V0_BITS(err, compl);
2214 rxcp->tcpf = GET_RX_COMPL_V0_BITS(tcpf, compl);
2215 rxcp->udpf = GET_RX_COMPL_V0_BITS(udpf, compl);
2216 rxcp->ip_csum = GET_RX_COMPL_V0_BITS(ipcksm, compl);
2217 rxcp->l4_csum = GET_RX_COMPL_V0_BITS(l4_cksm, compl);
2218 rxcp->ipv6 = GET_RX_COMPL_V0_BITS(ip_version, compl);
2219 rxcp->num_rcvd = GET_RX_COMPL_V0_BITS(numfrags, compl);
2220 rxcp->pkt_type = GET_RX_COMPL_V0_BITS(cast_enc, compl);
2221 rxcp->rss_hash = GET_RX_COMPL_V0_BITS(rsshash, compl);
15d72184 2222 if (rxcp->vlanf) {
c3c18bc1
SP
2223 rxcp->qnq = GET_RX_COMPL_V0_BITS(qnq, compl);
2224 rxcp->vlan_tag = GET_RX_COMPL_V0_BITS(vlan_tag, compl);
15d72184 2225 }
c3c18bc1
SP
2226 rxcp->port = GET_RX_COMPL_V0_BITS(port, compl);
2227 rxcp->ip_frag = GET_RX_COMPL_V0_BITS(ip_frag, compl);
2e588f84
SP
2228}
2229
2230static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
2231{
2232 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
2233 struct be_rx_compl_info *rxcp = &rxo->rxcp;
2234 struct be_adapter *adapter = rxo->adapter;
6b7c5b94 2235
2e588f84
SP
2236 /* For checking the valid bit it is Ok to use either definition as the
2237 * valid bit is at the same position in both v0 and v1 Rx compl */
2238 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
2239 return NULL;
6b7c5b94 2240
2e588f84
SP
2241 rmb();
2242 be_dws_le_to_cpu(compl, sizeof(*compl));
6b7c5b94 2243
2e588f84 2244 if (adapter->be3_native)
10ef9ab4 2245 be_parse_rx_compl_v1(compl, rxcp);
2e588f84 2246 else
10ef9ab4 2247 be_parse_rx_compl_v0(compl, rxcp);
6b7c5b94 2248
e38b1706
SK
2249 if (rxcp->ip_frag)
2250 rxcp->l4_csum = 0;
2251
15d72184 2252 if (rxcp->vlanf) {
f93f160b
VV
2253 /* In QNQ modes, if qnq bit is not set, then the packet was
2254 * tagged only with the transparent outer vlan-tag and must
2255 * not be treated as a vlan packet by host
2256 */
2257 if (be_is_qnq_mode(adapter) && !rxcp->qnq)
15d72184 2258 rxcp->vlanf = 0;
6b7c5b94 2259
15d72184 2260 if (!lancer_chip(adapter))
3c709f8f 2261 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
6b7c5b94 2262
939cf306 2263 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
f6cbd364 2264 !test_bit(rxcp->vlan_tag, adapter->vids))
15d72184
SP
2265 rxcp->vlanf = 0;
2266 }
2e588f84
SP
2267
2268 /* As the compl has been parsed, reset it; we wont touch it again */
2269 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
6b7c5b94 2270
3abcdeda 2271 queue_tail_inc(&rxo->cq);
6b7c5b94
SP
2272 return rxcp;
2273}
2274
1829b086 2275static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
6b7c5b94 2276{
6b7c5b94 2277 u32 order = get_order(size);
1829b086 2278
6b7c5b94 2279 if (order > 0)
1829b086
ED
2280 gfp |= __GFP_COMP;
2281 return alloc_pages(gfp, order);
6b7c5b94
SP
2282}
2283
2284/*
2285 * Allocate a page, split it to fragments of size rx_frag_size and post as
2286 * receive buffers to BE
2287 */
c30d7266 2288static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp, u32 frags_needed)
6b7c5b94 2289{
3abcdeda 2290 struct be_adapter *adapter = rxo->adapter;
26d92f92 2291 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
3abcdeda 2292 struct be_queue_info *rxq = &rxo->q;
6b7c5b94 2293 struct page *pagep = NULL;
ba42fad0 2294 struct device *dev = &adapter->pdev->dev;
6b7c5b94
SP
2295 struct be_eth_rx_d *rxd;
2296 u64 page_dmaaddr = 0, frag_dmaaddr;
c30d7266 2297 u32 posted, page_offset = 0, notify = 0;
6b7c5b94 2298
3abcdeda 2299 page_info = &rxo->page_info_tbl[rxq->head];
c30d7266 2300 for (posted = 0; posted < frags_needed && !page_info->page; posted++) {
6b7c5b94 2301 if (!pagep) {
1829b086 2302 pagep = be_alloc_pages(adapter->big_page_size, gfp);
6b7c5b94 2303 if (unlikely(!pagep)) {
ac124ff9 2304 rx_stats(rxo)->rx_post_fail++;
6b7c5b94
SP
2305 break;
2306 }
ba42fad0
IV
2307 page_dmaaddr = dma_map_page(dev, pagep, 0,
2308 adapter->big_page_size,
2b7bcebf 2309 DMA_FROM_DEVICE);
ba42fad0
IV
2310 if (dma_mapping_error(dev, page_dmaaddr)) {
2311 put_page(pagep);
2312 pagep = NULL;
d3de1540 2313 adapter->drv_stats.dma_map_errors++;
ba42fad0
IV
2314 break;
2315 }
e50287be 2316 page_offset = 0;
6b7c5b94
SP
2317 } else {
2318 get_page(pagep);
e50287be 2319 page_offset += rx_frag_size;
6b7c5b94 2320 }
e50287be 2321 page_info->page_offset = page_offset;
6b7c5b94 2322 page_info->page = pagep;
6b7c5b94
SP
2323
2324 rxd = queue_head_node(rxq);
e50287be 2325 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
6b7c5b94
SP
2326 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
2327 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
6b7c5b94
SP
2328
2329 /* Any space left in the current big page for another frag? */
2330 if ((page_offset + rx_frag_size + rx_frag_size) >
2331 adapter->big_page_size) {
2332 pagep = NULL;
e50287be
SP
2333 page_info->last_frag = true;
2334 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
2335 } else {
2336 dma_unmap_addr_set(page_info, bus, frag_dmaaddr);
6b7c5b94 2337 }
26d92f92
SP
2338
2339 prev_page_info = page_info;
2340 queue_head_inc(rxq);
10ef9ab4 2341 page_info = &rxo->page_info_tbl[rxq->head];
6b7c5b94 2342 }
e50287be
SP
2343
2344 /* Mark the last frag of a page when we break out of the above loop
2345 * with no more slots available in the RXQ
2346 */
2347 if (pagep) {
2348 prev_page_info->last_frag = true;
2349 dma_unmap_addr_set(prev_page_info, bus, page_dmaaddr);
2350 }
6b7c5b94
SP
2351
2352 if (posted) {
6b7c5b94 2353 atomic_add(posted, &rxq->used);
6384a4d0
SP
2354 if (rxo->rx_post_starved)
2355 rxo->rx_post_starved = false;
c30d7266 2356 do {
69304cc9 2357 notify = min(MAX_NUM_POST_ERX_DB, posted);
c30d7266
AK
2358 be_rxq_notify(adapter, rxq->id, notify);
2359 posted -= notify;
2360 } while (posted);
ea1dae11
SP
2361 } else if (atomic_read(&rxq->used) == 0) {
2362 /* Let be_worker replenish when memory is available */
3abcdeda 2363 rxo->rx_post_starved = true;
6b7c5b94 2364 }
6b7c5b94
SP
2365}
2366
152ffe5b 2367static struct be_tx_compl_info *be_tx_compl_get(struct be_tx_obj *txo)
6b7c5b94 2368{
152ffe5b
SB
2369 struct be_queue_info *tx_cq = &txo->cq;
2370 struct be_tx_compl_info *txcp = &txo->txcp;
2371 struct be_eth_tx_compl *compl = queue_tail_node(tx_cq);
6b7c5b94 2372
152ffe5b 2373 if (compl->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
6b7c5b94
SP
2374 return NULL;
2375
152ffe5b 2376 /* Ensure load ordering of valid bit dword and other dwords below */
f3eb62d2 2377 rmb();
152ffe5b 2378 be_dws_le_to_cpu(compl, sizeof(*compl));
6b7c5b94 2379
152ffe5b
SB
2380 txcp->status = GET_TX_COMPL_BITS(status, compl);
2381 txcp->end_index = GET_TX_COMPL_BITS(wrb_index, compl);
6b7c5b94 2382
152ffe5b 2383 compl->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
6b7c5b94
SP
2384 queue_tail_inc(tx_cq);
2385 return txcp;
2386}
2387
3c8def97 2388static u16 be_tx_compl_process(struct be_adapter *adapter,
748b539a 2389 struct be_tx_obj *txo, u16 last_index)
6b7c5b94 2390{
5f07b3c5 2391 struct sk_buff **sent_skbs = txo->sent_skb_list;
3c8def97 2392 struct be_queue_info *txq = &txo->q;
5f07b3c5
SP
2393 u16 frag_index, num_wrbs = 0;
2394 struct sk_buff *skb = NULL;
2395 bool unmap_skb_hdr = false;
a73b796e 2396 struct be_eth_wrb *wrb;
6b7c5b94 2397
ec43b1a6 2398 do {
5f07b3c5
SP
2399 if (sent_skbs[txq->tail]) {
2400 /* Free skb from prev req */
2401 if (skb)
2402 dev_consume_skb_any(skb);
2403 skb = sent_skbs[txq->tail];
2404 sent_skbs[txq->tail] = NULL;
2405 queue_tail_inc(txq); /* skip hdr wrb */
2406 num_wrbs++;
2407 unmap_skb_hdr = true;
2408 }
a73b796e 2409 wrb = queue_tail_node(txq);
5f07b3c5 2410 frag_index = txq->tail;
2b7bcebf 2411 unmap_tx_frag(&adapter->pdev->dev, wrb,
5f07b3c5 2412 (unmap_skb_hdr && skb_headlen(skb)));
ec43b1a6 2413 unmap_skb_hdr = false;
6b7c5b94 2414 queue_tail_inc(txq);
5f07b3c5
SP
2415 num_wrbs++;
2416 } while (frag_index != last_index);
2417 dev_consume_skb_any(skb);
6b7c5b94 2418
4d586b82 2419 return num_wrbs;
6b7c5b94
SP
2420}
2421
10ef9ab4
SP
2422/* Return the number of events in the event queue */
2423static inline int events_get(struct be_eq_obj *eqo)
859b1e4e 2424{
10ef9ab4
SP
2425 struct be_eq_entry *eqe;
2426 int num = 0;
859b1e4e 2427
10ef9ab4
SP
2428 do {
2429 eqe = queue_tail_node(&eqo->q);
2430 if (eqe->evt == 0)
2431 break;
859b1e4e 2432
10ef9ab4
SP
2433 rmb();
2434 eqe->evt = 0;
2435 num++;
2436 queue_tail_inc(&eqo->q);
2437 } while (true);
2438
2439 return num;
859b1e4e
SP
2440}
2441
10ef9ab4
SP
2442/* Leaves the EQ is disarmed state */
2443static void be_eq_clean(struct be_eq_obj *eqo)
859b1e4e 2444{
10ef9ab4 2445 int num = events_get(eqo);
859b1e4e 2446
20947770 2447 be_eq_notify(eqo->adapter, eqo->q.id, false, true, num, 0);
859b1e4e
SP
2448}
2449
10ef9ab4 2450static void be_rx_cq_clean(struct be_rx_obj *rxo)
6b7c5b94
SP
2451{
2452 struct be_rx_page_info *page_info;
3abcdeda
SP
2453 struct be_queue_info *rxq = &rxo->q;
2454 struct be_queue_info *rx_cq = &rxo->cq;
2e588f84 2455 struct be_rx_compl_info *rxcp;
d23e946c
SP
2456 struct be_adapter *adapter = rxo->adapter;
2457 int flush_wait = 0;
6b7c5b94 2458
d23e946c
SP
2459 /* Consume pending rx completions.
2460 * Wait for the flush completion (identified by zero num_rcvd)
2461 * to arrive. Notify CQ even when there are no more CQ entries
2462 * for HW to flush partially coalesced CQ entries.
2463 * In Lancer, there is no need to wait for flush compl.
2464 */
2465 for (;;) {
2466 rxcp = be_rx_compl_get(rxo);
ddf1169f 2467 if (!rxcp) {
d23e946c
SP
2468 if (lancer_chip(adapter))
2469 break;
2470
954f6825
VD
2471 if (flush_wait++ > 50 ||
2472 be_check_error(adapter,
2473 BE_ERROR_HW)) {
d23e946c
SP
2474 dev_warn(&adapter->pdev->dev,
2475 "did not receive flush compl\n");
2476 break;
2477 }
2478 be_cq_notify(adapter, rx_cq->id, true, 0);
2479 mdelay(1);
2480 } else {
2481 be_rx_compl_discard(rxo, rxcp);
3f5dffe6 2482 be_cq_notify(adapter, rx_cq->id, false, 1);
d23e946c
SP
2483 if (rxcp->num_rcvd == 0)
2484 break;
2485 }
6b7c5b94
SP
2486 }
2487
d23e946c
SP
2488 /* After cleanup, leave the CQ in unarmed state */
2489 be_cq_notify(adapter, rx_cq->id, false, 0);
2490
2491 /* Then free posted rx buffers that were not used */
0b0ef1d0
SR
2492 while (atomic_read(&rxq->used) > 0) {
2493 page_info = get_rx_page_info(rxo);
6b7c5b94
SP
2494 put_page(page_info->page);
2495 memset(page_info, 0, sizeof(*page_info));
2496 }
2497 BUG_ON(atomic_read(&rxq->used));
5f820b6c
KA
2498 rxq->tail = 0;
2499 rxq->head = 0;
6b7c5b94
SP
2500}
2501
0ae57bb3 2502static void be_tx_compl_clean(struct be_adapter *adapter)
6b7c5b94 2503{
5f07b3c5
SP
2504 u16 end_idx, notified_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
2505 struct device *dev = &adapter->pdev->dev;
152ffe5b 2506 struct be_tx_compl_info *txcp;
0ae57bb3 2507 struct be_queue_info *txq;
152ffe5b 2508 struct be_tx_obj *txo;
0ae57bb3 2509 int i, pending_txqs;
a8e9179a 2510
1a3d0717 2511 /* Stop polling for compls when HW has been silent for 10ms */
a8e9179a 2512 do {
0ae57bb3
SP
2513 pending_txqs = adapter->num_tx_qs;
2514
2515 for_all_tx_queues(adapter, txo, i) {
1a3d0717
VV
2516 cmpl = 0;
2517 num_wrbs = 0;
0ae57bb3 2518 txq = &txo->q;
152ffe5b
SB
2519 while ((txcp = be_tx_compl_get(txo))) {
2520 num_wrbs +=
2521 be_tx_compl_process(adapter, txo,
2522 txcp->end_index);
0ae57bb3
SP
2523 cmpl++;
2524 }
2525 if (cmpl) {
2526 be_cq_notify(adapter, txo->cq.id, false, cmpl);
2527 atomic_sub(num_wrbs, &txq->used);
1a3d0717 2528 timeo = 0;
0ae57bb3 2529 }
cf5671e6 2530 if (!be_is_tx_compl_pending(txo))
0ae57bb3 2531 pending_txqs--;
a8e9179a
SP
2532 }
2533
954f6825
VD
2534 if (pending_txqs == 0 || ++timeo > 10 ||
2535 be_check_error(adapter, BE_ERROR_HW))
a8e9179a
SP
2536 break;
2537
2538 mdelay(1);
2539 } while (true);
2540
5f07b3c5 2541 /* Free enqueued TX that was never notified to HW */
0ae57bb3
SP
2542 for_all_tx_queues(adapter, txo, i) {
2543 txq = &txo->q;
0ae57bb3 2544
5f07b3c5
SP
2545 if (atomic_read(&txq->used)) {
2546 dev_info(dev, "txq%d: cleaning %d pending tx-wrbs\n",
2547 i, atomic_read(&txq->used));
2548 notified_idx = txq->tail;
0ae57bb3 2549 end_idx = txq->tail;
5f07b3c5
SP
2550 index_adv(&end_idx, atomic_read(&txq->used) - 1,
2551 txq->len);
2552 /* Use the tx-compl process logic to handle requests
2553 * that were not sent to the HW.
2554 */
0ae57bb3
SP
2555 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
2556 atomic_sub(num_wrbs, &txq->used);
5f07b3c5
SP
2557 BUG_ON(atomic_read(&txq->used));
2558 txo->pend_wrb_cnt = 0;
2559 /* Since hw was never notified of these requests,
2560 * reset TXQ indices
2561 */
2562 txq->head = notified_idx;
2563 txq->tail = notified_idx;
0ae57bb3 2564 }
b03388d6 2565 }
6b7c5b94
SP
2566}
2567
10ef9ab4
SP
2568static void be_evt_queues_destroy(struct be_adapter *adapter)
2569{
2570 struct be_eq_obj *eqo;
2571 int i;
2572
2573 for_all_evt_queues(adapter, eqo, i) {
19d59aa7
PR
2574 if (eqo->q.created) {
2575 be_eq_clean(eqo);
10ef9ab4 2576 be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
6384a4d0 2577 napi_hash_del(&eqo->napi);
68d7bdcb 2578 netif_napi_del(&eqo->napi);
19d59aa7 2579 }
d658d98a 2580 free_cpumask_var(eqo->affinity_mask);
10ef9ab4
SP
2581 be_queue_free(adapter, &eqo->q);
2582 }
2583}
2584
2585static int be_evt_queues_create(struct be_adapter *adapter)
2586{
2587 struct be_queue_info *eq;
2588 struct be_eq_obj *eqo;
2632bafd 2589 struct be_aic_obj *aic;
10ef9ab4
SP
2590 int i, rc;
2591
92bf14ab
SP
2592 adapter->num_evt_qs = min_t(u16, num_irqs(adapter),
2593 adapter->cfg_num_qs);
10ef9ab4
SP
2594
2595 for_all_evt_queues(adapter, eqo, i) {
f36963c9 2596 int numa_node = dev_to_node(&adapter->pdev->dev);
d658d98a
PR
2597 if (!zalloc_cpumask_var(&eqo->affinity_mask, GFP_KERNEL))
2598 return -ENOMEM;
f36963c9
RR
2599 cpumask_set_cpu(cpumask_local_spread(i, numa_node),
2600 eqo->affinity_mask);
68d7bdcb
SP
2601 netif_napi_add(adapter->netdev, &eqo->napi, be_poll,
2602 BE_NAPI_WEIGHT);
6384a4d0 2603 napi_hash_add(&eqo->napi);
2632bafd 2604 aic = &adapter->aic_obj[i];
10ef9ab4 2605 eqo->adapter = adapter;
10ef9ab4 2606 eqo->idx = i;
2632bafd
SP
2607 aic->max_eqd = BE_MAX_EQD;
2608 aic->enable = true;
10ef9ab4
SP
2609
2610 eq = &eqo->q;
2611 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
748b539a 2612 sizeof(struct be_eq_entry));
10ef9ab4
SP
2613 if (rc)
2614 return rc;
2615
f2f781a7 2616 rc = be_cmd_eq_create(adapter, eqo);
10ef9ab4
SP
2617 if (rc)
2618 return rc;
2619 }
1cfafab9 2620 return 0;
10ef9ab4
SP
2621}
2622
5fb379ee
SP
2623static void be_mcc_queues_destroy(struct be_adapter *adapter)
2624{
2625 struct be_queue_info *q;
5fb379ee 2626
8788fdc2 2627 q = &adapter->mcc_obj.q;
5fb379ee 2628 if (q->created)
8788fdc2 2629 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
5fb379ee
SP
2630 be_queue_free(adapter, q);
2631
8788fdc2 2632 q = &adapter->mcc_obj.cq;
5fb379ee 2633 if (q->created)
8788fdc2 2634 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
5fb379ee
SP
2635 be_queue_free(adapter, q);
2636}
2637
2638/* Must be called only after TX qs are created as MCC shares TX EQ */
2639static int be_mcc_queues_create(struct be_adapter *adapter)
2640{
2641 struct be_queue_info *q, *cq;
5fb379ee 2642
8788fdc2 2643 cq = &adapter->mcc_obj.cq;
5fb379ee 2644 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
748b539a 2645 sizeof(struct be_mcc_compl)))
5fb379ee
SP
2646 goto err;
2647
10ef9ab4
SP
2648 /* Use the default EQ for MCC completions */
2649 if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0))
5fb379ee
SP
2650 goto mcc_cq_free;
2651
8788fdc2 2652 q = &adapter->mcc_obj.q;
5fb379ee
SP
2653 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
2654 goto mcc_cq_destroy;
2655
8788fdc2 2656 if (be_cmd_mccq_create(adapter, q, cq))
5fb379ee
SP
2657 goto mcc_q_free;
2658
2659 return 0;
2660
2661mcc_q_free:
2662 be_queue_free(adapter, q);
2663mcc_cq_destroy:
8788fdc2 2664 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
5fb379ee
SP
2665mcc_cq_free:
2666 be_queue_free(adapter, cq);
2667err:
2668 return -1;
2669}
2670
6b7c5b94
SP
2671static void be_tx_queues_destroy(struct be_adapter *adapter)
2672{
2673 struct be_queue_info *q;
3c8def97
SP
2674 struct be_tx_obj *txo;
2675 u8 i;
6b7c5b94 2676
3c8def97
SP
2677 for_all_tx_queues(adapter, txo, i) {
2678 q = &txo->q;
2679 if (q->created)
2680 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
2681 be_queue_free(adapter, q);
6b7c5b94 2682
3c8def97
SP
2683 q = &txo->cq;
2684 if (q->created)
2685 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2686 be_queue_free(adapter, q);
2687 }
6b7c5b94
SP
2688}
2689
7707133c 2690static int be_tx_qs_create(struct be_adapter *adapter)
6b7c5b94 2691{
73f394e6 2692 struct be_queue_info *cq;
3c8def97 2693 struct be_tx_obj *txo;
73f394e6 2694 struct be_eq_obj *eqo;
92bf14ab 2695 int status, i;
6b7c5b94 2696
92bf14ab 2697 adapter->num_tx_qs = min(adapter->num_evt_qs, be_max_txqs(adapter));
dafc0fe3 2698
10ef9ab4
SP
2699 for_all_tx_queues(adapter, txo, i) {
2700 cq = &txo->cq;
2701 status = be_queue_alloc(adapter, cq, TX_CQ_LEN,
2702 sizeof(struct be_eth_tx_compl));
2703 if (status)
2704 return status;
3c8def97 2705
827da44c
JS
2706 u64_stats_init(&txo->stats.sync);
2707 u64_stats_init(&txo->stats.sync_compl);
2708
10ef9ab4
SP
2709 /* If num_evt_qs is less than num_tx_qs, then more than
2710 * one txq share an eq
2711 */
73f394e6
SP
2712 eqo = &adapter->eq_obj[i % adapter->num_evt_qs];
2713 status = be_cmd_cq_create(adapter, cq, &eqo->q, false, 3);
10ef9ab4
SP
2714 if (status)
2715 return status;
6b7c5b94 2716
10ef9ab4
SP
2717 status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
2718 sizeof(struct be_eth_wrb));
2719 if (status)
2720 return status;
6b7c5b94 2721
94d73aaa 2722 status = be_cmd_txq_create(adapter, txo);
10ef9ab4
SP
2723 if (status)
2724 return status;
73f394e6
SP
2725
2726 netif_set_xps_queue(adapter->netdev, eqo->affinity_mask,
2727 eqo->idx);
3c8def97 2728 }
6b7c5b94 2729
d379142b
SP
2730 dev_info(&adapter->pdev->dev, "created %d TX queue(s)\n",
2731 adapter->num_tx_qs);
10ef9ab4 2732 return 0;
6b7c5b94
SP
2733}
2734
10ef9ab4 2735static void be_rx_cqs_destroy(struct be_adapter *adapter)
6b7c5b94
SP
2736{
2737 struct be_queue_info *q;
3abcdeda
SP
2738 struct be_rx_obj *rxo;
2739 int i;
2740
2741 for_all_rx_queues(adapter, rxo, i) {
3abcdeda
SP
2742 q = &rxo->cq;
2743 if (q->created)
2744 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2745 be_queue_free(adapter, q);
ac6a0c4a
SP
2746 }
2747}
2748
10ef9ab4 2749static int be_rx_cqs_create(struct be_adapter *adapter)
6b7c5b94 2750{
10ef9ab4 2751 struct be_queue_info *eq, *cq;
3abcdeda
SP
2752 struct be_rx_obj *rxo;
2753 int rc, i;
6b7c5b94 2754
92bf14ab 2755 /* We can create as many RSS rings as there are EQs. */
71bb8bd0 2756 adapter->num_rss_qs = adapter->num_evt_qs;
92bf14ab 2757
71bb8bd0
VV
2758 /* We'll use RSS only if atleast 2 RSS rings are supported. */
2759 if (adapter->num_rss_qs <= 1)
2760 adapter->num_rss_qs = 0;
2761
2762 adapter->num_rx_qs = adapter->num_rss_qs + adapter->need_def_rxq;
2763
2764 /* When the interface is not capable of RSS rings (and there is no
2765 * need to create a default RXQ) we'll still need one RXQ
10ef9ab4 2766 */
71bb8bd0
VV
2767 if (adapter->num_rx_qs == 0)
2768 adapter->num_rx_qs = 1;
92bf14ab 2769
6b7c5b94 2770 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
3abcdeda
SP
2771 for_all_rx_queues(adapter, rxo, i) {
2772 rxo->adapter = adapter;
3abcdeda
SP
2773 cq = &rxo->cq;
2774 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
748b539a 2775 sizeof(struct be_eth_rx_compl));
3abcdeda 2776 if (rc)
10ef9ab4 2777 return rc;
3abcdeda 2778
827da44c 2779 u64_stats_init(&rxo->stats.sync);
10ef9ab4
SP
2780 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2781 rc = be_cmd_cq_create(adapter, cq, eq, false, 3);
3abcdeda 2782 if (rc)
10ef9ab4 2783 return rc;
3abcdeda 2784 }
6b7c5b94 2785
d379142b 2786 dev_info(&adapter->pdev->dev,
71bb8bd0 2787 "created %d RX queue(s)\n", adapter->num_rx_qs);
10ef9ab4 2788 return 0;
b628bde2
SP
2789}
2790
6b7c5b94
SP
2791static irqreturn_t be_intx(int irq, void *dev)
2792{
e49cc34f
SP
2793 struct be_eq_obj *eqo = dev;
2794 struct be_adapter *adapter = eqo->adapter;
2795 int num_evts = 0;
6b7c5b94 2796
d0b9cec3
SP
2797 /* IRQ is not expected when NAPI is scheduled as the EQ
2798 * will not be armed.
2799 * But, this can happen on Lancer INTx where it takes
2800 * a while to de-assert INTx or in BE2 where occasionaly
2801 * an interrupt may be raised even when EQ is unarmed.
2802 * If NAPI is already scheduled, then counting & notifying
2803 * events will orphan them.
e49cc34f 2804 */
d0b9cec3 2805 if (napi_schedule_prep(&eqo->napi)) {
e49cc34f 2806 num_evts = events_get(eqo);
d0b9cec3
SP
2807 __napi_schedule(&eqo->napi);
2808 if (num_evts)
2809 eqo->spurious_intr = 0;
2810 }
20947770 2811 be_eq_notify(adapter, eqo->q.id, false, true, num_evts, 0);
e49cc34f 2812
d0b9cec3
SP
2813 /* Return IRQ_HANDLED only for the the first spurious intr
2814 * after a valid intr to stop the kernel from branding
2815 * this irq as a bad one!
e49cc34f 2816 */
d0b9cec3
SP
2817 if (num_evts || eqo->spurious_intr++ == 0)
2818 return IRQ_HANDLED;
2819 else
2820 return IRQ_NONE;
6b7c5b94
SP
2821}
2822
10ef9ab4 2823static irqreturn_t be_msix(int irq, void *dev)
6b7c5b94 2824{
10ef9ab4 2825 struct be_eq_obj *eqo = dev;
6b7c5b94 2826
20947770 2827 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0, 0);
0b545a62 2828 napi_schedule(&eqo->napi);
6b7c5b94
SP
2829 return IRQ_HANDLED;
2830}
2831
2e588f84 2832static inline bool do_gro(struct be_rx_compl_info *rxcp)
6b7c5b94 2833{
e38b1706 2834 return (rxcp->tcpf && !rxcp->err && rxcp->l4_csum) ? true : false;
6b7c5b94
SP
2835}
2836
10ef9ab4 2837static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
748b539a 2838 int budget, int polling)
6b7c5b94 2839{
3abcdeda
SP
2840 struct be_adapter *adapter = rxo->adapter;
2841 struct be_queue_info *rx_cq = &rxo->cq;
2e588f84 2842 struct be_rx_compl_info *rxcp;
6b7c5b94 2843 u32 work_done;
c30d7266 2844 u32 frags_consumed = 0;
6b7c5b94
SP
2845
2846 for (work_done = 0; work_done < budget; work_done++) {
3abcdeda 2847 rxcp = be_rx_compl_get(rxo);
6b7c5b94
SP
2848 if (!rxcp)
2849 break;
2850
12004ae9
SP
2851 /* Is it a flush compl that has no data */
2852 if (unlikely(rxcp->num_rcvd == 0))
2853 goto loop_continue;
2854
2855 /* Discard compl with partial DMA Lancer B0 */
2856 if (unlikely(!rxcp->pkt_size)) {
10ef9ab4 2857 be_rx_compl_discard(rxo, rxcp);
12004ae9
SP
2858 goto loop_continue;
2859 }
2860
2861 /* On BE drop pkts that arrive due to imperfect filtering in
2862 * promiscuous mode on some skews
2863 */
2864 if (unlikely(rxcp->port != adapter->port_num &&
748b539a 2865 !lancer_chip(adapter))) {
10ef9ab4 2866 be_rx_compl_discard(rxo, rxcp);
12004ae9 2867 goto loop_continue;
64642811 2868 }
009dd872 2869
6384a4d0
SP
2870 /* Don't do gro when we're busy_polling */
2871 if (do_gro(rxcp) && polling != BUSY_POLLING)
10ef9ab4 2872 be_rx_compl_process_gro(rxo, napi, rxcp);
12004ae9 2873 else
6384a4d0
SP
2874 be_rx_compl_process(rxo, napi, rxcp);
2875
12004ae9 2876loop_continue:
c30d7266 2877 frags_consumed += rxcp->num_rcvd;
2e588f84 2878 be_rx_stats_update(rxo, rxcp);
6b7c5b94
SP
2879 }
2880
10ef9ab4
SP
2881 if (work_done) {
2882 be_cq_notify(adapter, rx_cq->id, true, work_done);
9372cacb 2883
6384a4d0
SP
2884 /* When an rx-obj gets into post_starved state, just
2885 * let be_worker do the posting.
2886 */
2887 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM &&
2888 !rxo->rx_post_starved)
c30d7266
AK
2889 be_post_rx_frags(rxo, GFP_ATOMIC,
2890 max_t(u32, MAX_RX_POST,
2891 frags_consumed));
6b7c5b94 2892 }
10ef9ab4 2893
6b7c5b94
SP
2894 return work_done;
2895}
2896
152ffe5b 2897static inline void be_update_tx_err(struct be_tx_obj *txo, u8 status)
512bb8a2
KA
2898{
2899 switch (status) {
2900 case BE_TX_COMP_HDR_PARSE_ERR:
2901 tx_stats(txo)->tx_hdr_parse_err++;
2902 break;
2903 case BE_TX_COMP_NDMA_ERR:
2904 tx_stats(txo)->tx_dma_err++;
2905 break;
2906 case BE_TX_COMP_ACL_ERR:
2907 tx_stats(txo)->tx_spoof_check_err++;
2908 break;
2909 }
2910}
2911
152ffe5b 2912static inline void lancer_update_tx_err(struct be_tx_obj *txo, u8 status)
512bb8a2
KA
2913{
2914 switch (status) {
2915 case LANCER_TX_COMP_LSO_ERR:
2916 tx_stats(txo)->tx_tso_err++;
2917 break;
2918 case LANCER_TX_COMP_HSW_DROP_MAC_ERR:
2919 case LANCER_TX_COMP_HSW_DROP_VLAN_ERR:
2920 tx_stats(txo)->tx_spoof_check_err++;
2921 break;
2922 case LANCER_TX_COMP_QINQ_ERR:
2923 tx_stats(txo)->tx_qinq_err++;
2924 break;
2925 case LANCER_TX_COMP_PARITY_ERR:
2926 tx_stats(txo)->tx_internal_parity_err++;
2927 break;
2928 case LANCER_TX_COMP_DMA_ERR:
2929 tx_stats(txo)->tx_dma_err++;
2930 break;
2931 }
2932}
2933
c8f64615
SP
2934static void be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
2935 int idx)
6b7c5b94 2936{
c8f64615 2937 int num_wrbs = 0, work_done = 0;
152ffe5b 2938 struct be_tx_compl_info *txcp;
c8f64615 2939
152ffe5b
SB
2940 while ((txcp = be_tx_compl_get(txo))) {
2941 num_wrbs += be_tx_compl_process(adapter, txo, txcp->end_index);
c8f64615 2942 work_done++;
3c8def97 2943
152ffe5b 2944 if (txcp->status) {
512bb8a2 2945 if (lancer_chip(adapter))
152ffe5b 2946 lancer_update_tx_err(txo, txcp->status);
512bb8a2 2947 else
152ffe5b 2948 be_update_tx_err(txo, txcp->status);
512bb8a2 2949 }
10ef9ab4 2950 }
6b7c5b94 2951
10ef9ab4
SP
2952 if (work_done) {
2953 be_cq_notify(adapter, txo->cq.id, true, work_done);
2954 atomic_sub(num_wrbs, &txo->q.used);
3c8def97 2955
10ef9ab4
SP
2956 /* As Tx wrbs have been freed up, wake up netdev queue
2957 * if it was stopped due to lack of tx wrbs. */
2958 if (__netif_subqueue_stopped(adapter->netdev, idx) &&
cf5671e6 2959 be_can_txq_wake(txo)) {
10ef9ab4 2960 netif_wake_subqueue(adapter->netdev, idx);
3c8def97 2961 }
10ef9ab4
SP
2962
2963 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
2964 tx_stats(txo)->tx_compl += work_done;
2965 u64_stats_update_end(&tx_stats(txo)->sync_compl);
6b7c5b94 2966 }
10ef9ab4 2967}
6b7c5b94 2968
f7062ee5
SP
2969#ifdef CONFIG_NET_RX_BUSY_POLL
2970static inline bool be_lock_napi(struct be_eq_obj *eqo)
2971{
2972 bool status = true;
2973
2974 spin_lock(&eqo->lock); /* BH is already disabled */
2975 if (eqo->state & BE_EQ_LOCKED) {
2976 WARN_ON(eqo->state & BE_EQ_NAPI);
2977 eqo->state |= BE_EQ_NAPI_YIELD;
2978 status = false;
2979 } else {
2980 eqo->state = BE_EQ_NAPI;
2981 }
2982 spin_unlock(&eqo->lock);
2983 return status;
2984}
2985
2986static inline void be_unlock_napi(struct be_eq_obj *eqo)
2987{
2988 spin_lock(&eqo->lock); /* BH is already disabled */
2989
2990 WARN_ON(eqo->state & (BE_EQ_POLL | BE_EQ_NAPI_YIELD));
2991 eqo->state = BE_EQ_IDLE;
2992
2993 spin_unlock(&eqo->lock);
2994}
2995
2996static inline bool be_lock_busy_poll(struct be_eq_obj *eqo)
2997{
2998 bool status = true;
2999
3000 spin_lock_bh(&eqo->lock);
3001 if (eqo->state & BE_EQ_LOCKED) {
3002 eqo->state |= BE_EQ_POLL_YIELD;
3003 status = false;
3004 } else {
3005 eqo->state |= BE_EQ_POLL;
3006 }
3007 spin_unlock_bh(&eqo->lock);
3008 return status;
3009}
3010
3011static inline void be_unlock_busy_poll(struct be_eq_obj *eqo)
3012{
3013 spin_lock_bh(&eqo->lock);
3014
3015 WARN_ON(eqo->state & (BE_EQ_NAPI));
3016 eqo->state = BE_EQ_IDLE;
3017
3018 spin_unlock_bh(&eqo->lock);
3019}
3020
3021static inline void be_enable_busy_poll(struct be_eq_obj *eqo)
3022{
3023 spin_lock_init(&eqo->lock);
3024 eqo->state = BE_EQ_IDLE;
3025}
3026
3027static inline void be_disable_busy_poll(struct be_eq_obj *eqo)
3028{
3029 local_bh_disable();
3030
3031 /* It's enough to just acquire napi lock on the eqo to stop
3032 * be_busy_poll() from processing any queueus.
3033 */
3034 while (!be_lock_napi(eqo))
3035 mdelay(1);
3036
3037 local_bh_enable();
3038}
3039
3040#else /* CONFIG_NET_RX_BUSY_POLL */
3041
3042static inline bool be_lock_napi(struct be_eq_obj *eqo)
3043{
3044 return true;
3045}
3046
3047static inline void be_unlock_napi(struct be_eq_obj *eqo)
3048{
3049}
3050
3051static inline bool be_lock_busy_poll(struct be_eq_obj *eqo)
3052{
3053 return false;
3054}
3055
3056static inline void be_unlock_busy_poll(struct be_eq_obj *eqo)
3057{
3058}
3059
3060static inline void be_enable_busy_poll(struct be_eq_obj *eqo)
3061{
3062}
3063
3064static inline void be_disable_busy_poll(struct be_eq_obj *eqo)
3065{
3066}
3067#endif /* CONFIG_NET_RX_BUSY_POLL */
3068
68d7bdcb 3069int be_poll(struct napi_struct *napi, int budget)
10ef9ab4
SP
3070{
3071 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
3072 struct be_adapter *adapter = eqo->adapter;
0b545a62 3073 int max_work = 0, work, i, num_evts;
6384a4d0 3074 struct be_rx_obj *rxo;
a4906ea0 3075 struct be_tx_obj *txo;
20947770 3076 u32 mult_enc = 0;
f31e50a8 3077
0b545a62
SP
3078 num_evts = events_get(eqo);
3079
a4906ea0
SP
3080 for_all_tx_queues_on_eq(adapter, eqo, txo, i)
3081 be_process_tx(adapter, txo, i);
f31e50a8 3082
6384a4d0
SP
3083 if (be_lock_napi(eqo)) {
3084 /* This loop will iterate twice for EQ0 in which
3085 * completions of the last RXQ (default one) are also processed
3086 * For other EQs the loop iterates only once
3087 */
3088 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
3089 work = be_process_rx(rxo, napi, budget, NAPI_POLLING);
3090 max_work = max(work, max_work);
3091 }
3092 be_unlock_napi(eqo);
3093 } else {
3094 max_work = budget;
10ef9ab4 3095 }
6b7c5b94 3096
10ef9ab4
SP
3097 if (is_mcc_eqo(eqo))
3098 be_process_mcc(adapter);
93c86700 3099
10ef9ab4
SP
3100 if (max_work < budget) {
3101 napi_complete(napi);
20947770
PR
3102
3103 /* Skyhawk EQ_DB has a provision to set the rearm to interrupt
3104 * delay via a delay multiplier encoding value
3105 */
3106 if (skyhawk_chip(adapter))
3107 mult_enc = be_get_eq_delay_mult_enc(eqo);
3108
3109 be_eq_notify(adapter, eqo->q.id, true, false, num_evts,
3110 mult_enc);
10ef9ab4
SP
3111 } else {
3112 /* As we'll continue in polling mode, count and clear events */
20947770 3113 be_eq_notify(adapter, eqo->q.id, false, false, num_evts, 0);
93c86700 3114 }
10ef9ab4 3115 return max_work;
6b7c5b94
SP
3116}
3117
6384a4d0
SP
3118#ifdef CONFIG_NET_RX_BUSY_POLL
3119static int be_busy_poll(struct napi_struct *napi)
3120{
3121 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
3122 struct be_adapter *adapter = eqo->adapter;
3123 struct be_rx_obj *rxo;
3124 int i, work = 0;
3125
3126 if (!be_lock_busy_poll(eqo))
3127 return LL_FLUSH_BUSY;
3128
3129 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
3130 work = be_process_rx(rxo, napi, 4, BUSY_POLLING);
3131 if (work)
3132 break;
3133 }
3134
3135 be_unlock_busy_poll(eqo);
3136 return work;
3137}
3138#endif
3139
f67ef7ba 3140void be_detect_error(struct be_adapter *adapter)
7c185276 3141{
e1cfb67a
PR
3142 u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
3143 u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
7c185276 3144 u32 i;
eb0eecc1 3145 struct device *dev = &adapter->pdev->dev;
7c185276 3146
954f6825 3147 if (be_check_error(adapter, BE_ERROR_HW))
72f02485
SP
3148 return;
3149
e1cfb67a
PR
3150 if (lancer_chip(adapter)) {
3151 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3152 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
954f6825 3153 be_set_error(adapter, BE_ERROR_UE);
e1cfb67a 3154 sliport_err1 = ioread32(adapter->db +
748b539a 3155 SLIPORT_ERROR1_OFFSET);
e1cfb67a 3156 sliport_err2 = ioread32(adapter->db +
748b539a 3157 SLIPORT_ERROR2_OFFSET);
eb0eecc1
SK
3158 /* Do not log error messages if its a FW reset */
3159 if (sliport_err1 == SLIPORT_ERROR_FW_RESET1 &&
3160 sliport_err2 == SLIPORT_ERROR_FW_RESET2) {
3161 dev_info(dev, "Firmware update in progress\n");
3162 } else {
eb0eecc1
SK
3163 dev_err(dev, "Error detected in the card\n");
3164 dev_err(dev, "ERR: sliport status 0x%x\n",
3165 sliport_status);
3166 dev_err(dev, "ERR: sliport error1 0x%x\n",
3167 sliport_err1);
3168 dev_err(dev, "ERR: sliport error2 0x%x\n",
3169 sliport_err2);
3170 }
e1cfb67a
PR
3171 }
3172 } else {
25848c90
SR
3173 ue_lo = ioread32(adapter->pcicfg + PCICFG_UE_STATUS_LOW);
3174 ue_hi = ioread32(adapter->pcicfg + PCICFG_UE_STATUS_HIGH);
3175 ue_lo_mask = ioread32(adapter->pcicfg +
3176 PCICFG_UE_STATUS_LOW_MASK);
3177 ue_hi_mask = ioread32(adapter->pcicfg +
3178 PCICFG_UE_STATUS_HI_MASK);
e1cfb67a 3179
f67ef7ba
PR
3180 ue_lo = (ue_lo & ~ue_lo_mask);
3181 ue_hi = (ue_hi & ~ue_hi_mask);
7c185276 3182
eb0eecc1
SK
3183 /* On certain platforms BE hardware can indicate spurious UEs.
3184 * Allow HW to stop working completely in case of a real UE.
3185 * Hence not setting the hw_error for UE detection.
3186 */
f67ef7ba 3187
eb0eecc1 3188 if (ue_lo || ue_hi) {
eb0eecc1
SK
3189 dev_err(dev,
3190 "Unrecoverable Error detected in the adapter");
3191 dev_err(dev, "Please reboot server to recover");
3192 if (skyhawk_chip(adapter))
954f6825
VD
3193 be_set_error(adapter, BE_ERROR_UE);
3194
eb0eecc1
SK
3195 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
3196 if (ue_lo & 1)
3197 dev_err(dev, "UE: %s bit set\n",
3198 ue_status_low_desc[i]);
3199 }
3200 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
3201 if (ue_hi & 1)
3202 dev_err(dev, "UE: %s bit set\n",
3203 ue_status_hi_desc[i]);
3204 }
7c185276
AK
3205 }
3206 }
7c185276
AK
3207}
3208
8d56ff11
SP
3209static void be_msix_disable(struct be_adapter *adapter)
3210{
ac6a0c4a 3211 if (msix_enabled(adapter)) {
8d56ff11 3212 pci_disable_msix(adapter->pdev);
ac6a0c4a 3213 adapter->num_msix_vec = 0;
68d7bdcb 3214 adapter->num_msix_roce_vec = 0;
3abcdeda
SP
3215 }
3216}
3217
c2bba3df 3218static int be_msix_enable(struct be_adapter *adapter)
6b7c5b94 3219{
7dc4c064 3220 int i, num_vec;
d379142b 3221 struct device *dev = &adapter->pdev->dev;
6b7c5b94 3222
92bf14ab
SP
3223 /* If RoCE is supported, program the max number of NIC vectors that
3224 * may be configured via set-channels, along with vectors needed for
3225 * RoCe. Else, just program the number we'll use initially.
3226 */
3227 if (be_roce_supported(adapter))
3228 num_vec = min_t(int, 2 * be_max_eqs(adapter),
3229 2 * num_online_cpus());
3230 else
3231 num_vec = adapter->cfg_num_qs;
3abcdeda 3232
ac6a0c4a 3233 for (i = 0; i < num_vec; i++)
6b7c5b94
SP
3234 adapter->msix_entries[i].entry = i;
3235
7dc4c064
AG
3236 num_vec = pci_enable_msix_range(adapter->pdev, adapter->msix_entries,
3237 MIN_MSIX_VECTORS, num_vec);
3238 if (num_vec < 0)
3239 goto fail;
92bf14ab 3240
92bf14ab
SP
3241 if (be_roce_supported(adapter) && num_vec > MIN_MSIX_VECTORS) {
3242 adapter->num_msix_roce_vec = num_vec / 2;
3243 dev_info(dev, "enabled %d MSI-x vector(s) for RoCE\n",
3244 adapter->num_msix_roce_vec);
3245 }
3246
3247 adapter->num_msix_vec = num_vec - adapter->num_msix_roce_vec;
3248
3249 dev_info(dev, "enabled %d MSI-x vector(s) for NIC\n",
3250 adapter->num_msix_vec);
c2bba3df 3251 return 0;
7dc4c064
AG
3252
3253fail:
3254 dev_warn(dev, "MSIx enable failed\n");
3255
3256 /* INTx is not supported in VFs, so fail probe if enable_msix fails */
18c57c74 3257 if (be_virtfn(adapter))
7dc4c064
AG
3258 return num_vec;
3259 return 0;
6b7c5b94
SP
3260}
3261
fe6d2a38 3262static inline int be_msix_vec_get(struct be_adapter *adapter,
748b539a 3263 struct be_eq_obj *eqo)
b628bde2 3264{
f2f781a7 3265 return adapter->msix_entries[eqo->msix_idx].vector;
b628bde2 3266}
6b7c5b94 3267
b628bde2
SP
3268static int be_msix_register(struct be_adapter *adapter)
3269{
10ef9ab4
SP
3270 struct net_device *netdev = adapter->netdev;
3271 struct be_eq_obj *eqo;
3272 int status, i, vec;
6b7c5b94 3273
10ef9ab4
SP
3274 for_all_evt_queues(adapter, eqo, i) {
3275 sprintf(eqo->desc, "%s-q%d", netdev->name, i);
3276 vec = be_msix_vec_get(adapter, eqo);
3277 status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
3abcdeda
SP
3278 if (status)
3279 goto err_msix;
d658d98a
PR
3280
3281 irq_set_affinity_hint(vec, eqo->affinity_mask);
3abcdeda 3282 }
b628bde2 3283
6b7c5b94 3284 return 0;
3abcdeda 3285err_msix:
10ef9ab4
SP
3286 for (i--, eqo = &adapter->eq_obj[i]; i >= 0; i--, eqo--)
3287 free_irq(be_msix_vec_get(adapter, eqo), eqo);
3288 dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
748b539a 3289 status);
ac6a0c4a 3290 be_msix_disable(adapter);
6b7c5b94
SP
3291 return status;
3292}
3293
3294static int be_irq_register(struct be_adapter *adapter)
3295{
3296 struct net_device *netdev = adapter->netdev;
3297 int status;
3298
ac6a0c4a 3299 if (msix_enabled(adapter)) {
6b7c5b94
SP
3300 status = be_msix_register(adapter);
3301 if (status == 0)
3302 goto done;
ba343c77 3303 /* INTx is not supported for VF */
18c57c74 3304 if (be_virtfn(adapter))
ba343c77 3305 return status;
6b7c5b94
SP
3306 }
3307
e49cc34f 3308 /* INTx: only the first EQ is used */
6b7c5b94
SP
3309 netdev->irq = adapter->pdev->irq;
3310 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
e49cc34f 3311 &adapter->eq_obj[0]);
6b7c5b94
SP
3312 if (status) {
3313 dev_err(&adapter->pdev->dev,
3314 "INTx request IRQ failed - err %d\n", status);
3315 return status;
3316 }
3317done:
3318 adapter->isr_registered = true;
3319 return 0;
3320}
3321
3322static void be_irq_unregister(struct be_adapter *adapter)
3323{
3324 struct net_device *netdev = adapter->netdev;
10ef9ab4 3325 struct be_eq_obj *eqo;
d658d98a 3326 int i, vec;
6b7c5b94
SP
3327
3328 if (!adapter->isr_registered)
3329 return;
3330
3331 /* INTx */
ac6a0c4a 3332 if (!msix_enabled(adapter)) {
e49cc34f 3333 free_irq(netdev->irq, &adapter->eq_obj[0]);
6b7c5b94
SP
3334 goto done;
3335 }
3336
3337 /* MSIx */
d658d98a
PR
3338 for_all_evt_queues(adapter, eqo, i) {
3339 vec = be_msix_vec_get(adapter, eqo);
3340 irq_set_affinity_hint(vec, NULL);
3341 free_irq(vec, eqo);
3342 }
3abcdeda 3343
6b7c5b94
SP
3344done:
3345 adapter->isr_registered = false;
6b7c5b94
SP
3346}
3347
10ef9ab4 3348static void be_rx_qs_destroy(struct be_adapter *adapter)
482c9e79
SP
3349{
3350 struct be_queue_info *q;
3351 struct be_rx_obj *rxo;
3352 int i;
3353
3354 for_all_rx_queues(adapter, rxo, i) {
3355 q = &rxo->q;
3356 if (q->created) {
3357 be_cmd_rxq_destroy(adapter, q);
10ef9ab4 3358 be_rx_cq_clean(rxo);
482c9e79 3359 }
10ef9ab4 3360 be_queue_free(adapter, q);
482c9e79
SP
3361 }
3362}
3363
889cd4b2
SP
3364static int be_close(struct net_device *netdev)
3365{
3366 struct be_adapter *adapter = netdev_priv(netdev);
10ef9ab4
SP
3367 struct be_eq_obj *eqo;
3368 int i;
889cd4b2 3369
e1ad8e33
KA
3370 /* This protection is needed as be_close() may be called even when the
3371 * adapter is in cleared state (after eeh perm failure)
3372 */
3373 if (!(adapter->flags & BE_FLAGS_SETUP_DONE))
3374 return 0;
3375
045508a8
PP
3376 be_roce_dev_close(adapter);
3377
dff345c5
IV
3378 if (adapter->flags & BE_FLAGS_NAPI_ENABLED) {
3379 for_all_evt_queues(adapter, eqo, i) {
04d3d624 3380 napi_disable(&eqo->napi);
6384a4d0
SP
3381 be_disable_busy_poll(eqo);
3382 }
71237b6f 3383 adapter->flags &= ~BE_FLAGS_NAPI_ENABLED;
04d3d624 3384 }
a323d9bf
SP
3385
3386 be_async_mcc_disable(adapter);
3387
3388 /* Wait for all pending tx completions to arrive so that
3389 * all tx skbs are freed.
3390 */
fba87559 3391 netif_tx_disable(netdev);
6e1f9975 3392 be_tx_compl_clean(adapter);
a323d9bf
SP
3393
3394 be_rx_qs_destroy(adapter);
f66b7cfd 3395 be_clear_uc_list(adapter);
d11a347d 3396
a323d9bf 3397 for_all_evt_queues(adapter, eqo, i) {
10ef9ab4
SP
3398 if (msix_enabled(adapter))
3399 synchronize_irq(be_msix_vec_get(adapter, eqo));
3400 else
3401 synchronize_irq(netdev->irq);
3402 be_eq_clean(eqo);
63fcb27f
PR
3403 }
3404
889cd4b2
SP
3405 be_irq_unregister(adapter);
3406
482c9e79
SP
3407 return 0;
3408}
3409
10ef9ab4 3410static int be_rx_qs_create(struct be_adapter *adapter)
482c9e79 3411{
1dcf7b1c
ED
3412 struct rss_info *rss = &adapter->rss_info;
3413 u8 rss_key[RSS_HASH_KEY_LEN];
482c9e79 3414 struct be_rx_obj *rxo;
e9008ee9 3415 int rc, i, j;
482c9e79
SP
3416
3417 for_all_rx_queues(adapter, rxo, i) {
10ef9ab4
SP
3418 rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
3419 sizeof(struct be_eth_rx_d));
3420 if (rc)
3421 return rc;
3422 }
3423
71bb8bd0
VV
3424 if (adapter->need_def_rxq || !adapter->num_rss_qs) {
3425 rxo = default_rxo(adapter);
3426 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
3427 rx_frag_size, adapter->if_handle,
3428 false, &rxo->rss_id);
3429 if (rc)
3430 return rc;
3431 }
10ef9ab4
SP
3432
3433 for_all_rss_queues(adapter, rxo, i) {
482c9e79 3434 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
10ef9ab4
SP
3435 rx_frag_size, adapter->if_handle,
3436 true, &rxo->rss_id);
482c9e79
SP
3437 if (rc)
3438 return rc;
3439 }
3440
3441 if (be_multi_rxq(adapter)) {
71bb8bd0 3442 for (j = 0; j < RSS_INDIR_TABLE_LEN; j += adapter->num_rss_qs) {
e9008ee9 3443 for_all_rss_queues(adapter, rxo, i) {
e2557877 3444 if ((j + i) >= RSS_INDIR_TABLE_LEN)
e9008ee9 3445 break;
e2557877
VD
3446 rss->rsstable[j + i] = rxo->rss_id;
3447 rss->rss_queue[j + i] = i;
e9008ee9
PR
3448 }
3449 }
e2557877
VD
3450 rss->rss_flags = RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_IPV4 |
3451 RSS_ENABLE_TCP_IPV6 | RSS_ENABLE_IPV6;
594ad54a
SR
3452
3453 if (!BEx_chip(adapter))
e2557877
VD
3454 rss->rss_flags |= RSS_ENABLE_UDP_IPV4 |
3455 RSS_ENABLE_UDP_IPV6;
da1388d6
VV
3456 } else {
3457 /* Disable RSS, if only default RX Q is created */
e2557877 3458 rss->rss_flags = RSS_ENABLE_NONE;
da1388d6 3459 }
594ad54a 3460
1dcf7b1c 3461 netdev_rss_key_fill(rss_key, RSS_HASH_KEY_LEN);
748b539a 3462 rc = be_cmd_rss_config(adapter, rss->rsstable, rss->rss_flags,
1dcf7b1c 3463 128, rss_key);
da1388d6 3464 if (rc) {
e2557877 3465 rss->rss_flags = RSS_ENABLE_NONE;
da1388d6 3466 return rc;
482c9e79
SP
3467 }
3468
1dcf7b1c 3469 memcpy(rss->rss_hkey, rss_key, RSS_HASH_KEY_LEN);
e2557877 3470
b02e60c8
SR
3471 /* Post 1 less than RXQ-len to avoid head being equal to tail,
3472 * which is a queue empty condition
3473 */
10ef9ab4 3474 for_all_rx_queues(adapter, rxo, i)
b02e60c8
SR
3475 be_post_rx_frags(rxo, GFP_KERNEL, RX_Q_LEN - 1);
3476
889cd4b2
SP
3477 return 0;
3478}
3479
6b7c5b94
SP
3480static int be_open(struct net_device *netdev)
3481{
3482 struct be_adapter *adapter = netdev_priv(netdev);
10ef9ab4 3483 struct be_eq_obj *eqo;
3abcdeda 3484 struct be_rx_obj *rxo;
10ef9ab4 3485 struct be_tx_obj *txo;
b236916a 3486 u8 link_status;
3abcdeda 3487 int status, i;
5fb379ee 3488
10ef9ab4 3489 status = be_rx_qs_create(adapter);
482c9e79
SP
3490 if (status)
3491 goto err;
3492
c2bba3df
SK
3493 status = be_irq_register(adapter);
3494 if (status)
3495 goto err;
5fb379ee 3496
10ef9ab4 3497 for_all_rx_queues(adapter, rxo, i)
3abcdeda 3498 be_cq_notify(adapter, rxo->cq.id, true, 0);
5fb379ee 3499
10ef9ab4
SP
3500 for_all_tx_queues(adapter, txo, i)
3501 be_cq_notify(adapter, txo->cq.id, true, 0);
3502
7a1e9b20
SP
3503 be_async_mcc_enable(adapter);
3504
10ef9ab4
SP
3505 for_all_evt_queues(adapter, eqo, i) {
3506 napi_enable(&eqo->napi);
6384a4d0 3507 be_enable_busy_poll(eqo);
20947770 3508 be_eq_notify(adapter, eqo->q.id, true, true, 0, 0);
10ef9ab4 3509 }
04d3d624 3510 adapter->flags |= BE_FLAGS_NAPI_ENABLED;
10ef9ab4 3511
323ff71e 3512 status = be_cmd_link_status_query(adapter, NULL, &link_status, 0);
b236916a
AK
3513 if (!status)
3514 be_link_status_update(adapter, link_status);
3515
fba87559 3516 netif_tx_start_all_queues(netdev);
045508a8 3517 be_roce_dev_open(adapter);
c9c47142 3518
c5abe7c0 3519#ifdef CONFIG_BE2NET_VXLAN
c9c47142
SP
3520 if (skyhawk_chip(adapter))
3521 vxlan_get_rx_port(netdev);
c5abe7c0
SP
3522#endif
3523
889cd4b2
SP
3524 return 0;
3525err:
3526 be_close(adapter->netdev);
3527 return -EIO;
5fb379ee
SP
3528}
3529
71d8d1b5
AK
3530static int be_setup_wol(struct be_adapter *adapter, bool enable)
3531{
3532 struct be_dma_mem cmd;
3533 int status = 0;
3534 u8 mac[ETH_ALEN];
3535
c7bf7169 3536 eth_zero_addr(mac);
71d8d1b5
AK
3537
3538 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
ede23fa8
JP
3539 cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
3540 GFP_KERNEL);
ddf1169f 3541 if (!cmd.va)
6b568689 3542 return -ENOMEM;
71d8d1b5
AK
3543
3544 if (enable) {
3545 status = pci_write_config_dword(adapter->pdev,
748b539a
SP
3546 PCICFG_PM_CONTROL_OFFSET,
3547 PCICFG_PM_CONTROL_MASK);
71d8d1b5
AK
3548 if (status) {
3549 dev_err(&adapter->pdev->dev,
2381a55c 3550 "Could not enable Wake-on-lan\n");
2b7bcebf
IV
3551 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
3552 cmd.dma);
71d8d1b5
AK
3553 return status;
3554 }
3555 status = be_cmd_enable_magic_wol(adapter,
748b539a
SP
3556 adapter->netdev->dev_addr,
3557 &cmd);
71d8d1b5
AK
3558 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
3559 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
3560 } else {
3561 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
3562 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
3563 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
3564 }
3565
2b7bcebf 3566 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
71d8d1b5
AK
3567 return status;
3568}
3569
f7062ee5
SP
3570static void be_vf_eth_addr_generate(struct be_adapter *adapter, u8 *mac)
3571{
3572 u32 addr;
3573
3574 addr = jhash(adapter->netdev->dev_addr, ETH_ALEN, 0);
3575
3576 mac[5] = (u8)(addr & 0xFF);
3577 mac[4] = (u8)((addr >> 8) & 0xFF);
3578 mac[3] = (u8)((addr >> 16) & 0xFF);
3579 /* Use the OUI from the current MAC address */
3580 memcpy(mac, adapter->netdev->dev_addr, 3);
3581}
3582
6d87f5c3
AK
3583/*
3584 * Generate a seed MAC address from the PF MAC Address using jhash.
3585 * MAC Address for VFs are assigned incrementally starting from the seed.
3586 * These addresses are programmed in the ASIC by the PF and the VF driver
3587 * queries for the MAC address during its probe.
3588 */
4c876616 3589static int be_vf_eth_addr_config(struct be_adapter *adapter)
6d87f5c3 3590{
f9449ab7 3591 u32 vf;
3abcdeda 3592 int status = 0;
6d87f5c3 3593 u8 mac[ETH_ALEN];
11ac75ed 3594 struct be_vf_cfg *vf_cfg;
6d87f5c3
AK
3595
3596 be_vf_eth_addr_generate(adapter, mac);
3597
11ac75ed 3598 for_all_vfs(adapter, vf_cfg, vf) {
3175d8c2 3599 if (BEx_chip(adapter))
590c391d 3600 status = be_cmd_pmac_add(adapter, mac,
11ac75ed
SP
3601 vf_cfg->if_handle,
3602 &vf_cfg->pmac_id, vf + 1);
3175d8c2
SP
3603 else
3604 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
3605 vf + 1);
590c391d 3606
6d87f5c3
AK
3607 if (status)
3608 dev_err(&adapter->pdev->dev,
748b539a
SP
3609 "Mac address assignment failed for VF %d\n",
3610 vf);
6d87f5c3 3611 else
11ac75ed 3612 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
6d87f5c3
AK
3613
3614 mac[5] += 1;
3615 }
3616 return status;
3617}
3618
4c876616
SP
3619static int be_vfs_mac_query(struct be_adapter *adapter)
3620{
3621 int status, vf;
3622 u8 mac[ETH_ALEN];
3623 struct be_vf_cfg *vf_cfg;
4c876616
SP
3624
3625 for_all_vfs(adapter, vf_cfg, vf) {
b188f090
SR
3626 status = be_cmd_get_active_mac(adapter, vf_cfg->pmac_id,
3627 mac, vf_cfg->if_handle,
3628 false, vf+1);
4c876616
SP
3629 if (status)
3630 return status;
3631 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
3632 }
3633 return 0;
3634}
3635
f9449ab7 3636static void be_vf_clear(struct be_adapter *adapter)
6d87f5c3 3637{
11ac75ed 3638 struct be_vf_cfg *vf_cfg;
6d87f5c3
AK
3639 u32 vf;
3640
257a3feb 3641 if (pci_vfs_assigned(adapter->pdev)) {
4c876616
SP
3642 dev_warn(&adapter->pdev->dev,
3643 "VFs are assigned to VMs: not disabling VFs\n");
39f1d94d
SP
3644 goto done;
3645 }
3646
b4c1df93
SP
3647 pci_disable_sriov(adapter->pdev);
3648
11ac75ed 3649 for_all_vfs(adapter, vf_cfg, vf) {
3175d8c2 3650 if (BEx_chip(adapter))
11ac75ed
SP
3651 be_cmd_pmac_del(adapter, vf_cfg->if_handle,
3652 vf_cfg->pmac_id, vf + 1);
3175d8c2
SP
3653 else
3654 be_cmd_set_mac(adapter, NULL, vf_cfg->if_handle,
3655 vf + 1);
f9449ab7 3656
11ac75ed
SP
3657 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
3658 }
39f1d94d
SP
3659done:
3660 kfree(adapter->vf_cfg);
3661 adapter->num_vfs = 0;
f174c7ec 3662 adapter->flags &= ~BE_FLAGS_SRIOV_ENABLED;
6d87f5c3
AK
3663}
3664
7707133c
SP
3665static void be_clear_queues(struct be_adapter *adapter)
3666{
3667 be_mcc_queues_destroy(adapter);
3668 be_rx_cqs_destroy(adapter);
3669 be_tx_queues_destroy(adapter);
3670 be_evt_queues_destroy(adapter);
3671}
3672
68d7bdcb 3673static void be_cancel_worker(struct be_adapter *adapter)
a54769f5 3674{
191eb756
SP
3675 if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) {
3676 cancel_delayed_work_sync(&adapter->work);
3677 adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED;
3678 }
68d7bdcb
SP
3679}
3680
eb7dd46c
SP
3681static void be_cancel_err_detection(struct be_adapter *adapter)
3682{
3683 if (adapter->flags & BE_FLAGS_ERR_DETECTION_SCHEDULED) {
3684 cancel_delayed_work_sync(&adapter->be_err_detection_work);
3685 adapter->flags &= ~BE_FLAGS_ERR_DETECTION_SCHEDULED;
3686 }
3687}
3688
b05004ad 3689static void be_mac_clear(struct be_adapter *adapter)
68d7bdcb 3690{
b05004ad 3691 if (adapter->pmac_id) {
f66b7cfd
SP
3692 be_cmd_pmac_del(adapter, adapter->if_handle,
3693 adapter->pmac_id[0], 0);
b05004ad
SK
3694 kfree(adapter->pmac_id);
3695 adapter->pmac_id = NULL;
3696 }
3697}
3698
c5abe7c0 3699#ifdef CONFIG_BE2NET_VXLAN
c9c47142
SP
3700static void be_disable_vxlan_offloads(struct be_adapter *adapter)
3701{
630f4b70
SB
3702 struct net_device *netdev = adapter->netdev;
3703
c9c47142
SP
3704 if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS)
3705 be_cmd_manage_iface(adapter, adapter->if_handle,
3706 OP_CONVERT_TUNNEL_TO_NORMAL);
3707
3708 if (adapter->vxlan_port)
3709 be_cmd_set_vxlan_port(adapter, 0);
3710
3711 adapter->flags &= ~BE_FLAGS_VXLAN_OFFLOADS;
3712 adapter->vxlan_port = 0;
630f4b70
SB
3713
3714 netdev->hw_enc_features = 0;
3715 netdev->hw_features &= ~(NETIF_F_GSO_UDP_TUNNEL);
ac9a3d84 3716 netdev->features &= ~(NETIF_F_GSO_UDP_TUNNEL);
c9c47142 3717}
c5abe7c0 3718#endif
c9c47142 3719
f2858738
VV
3720static u16 be_calculate_vf_qs(struct be_adapter *adapter, u16 num_vfs)
3721{
3722 struct be_resources res = adapter->pool_res;
3723 u16 num_vf_qs = 1;
3724
3725 /* Distribute the queue resources equally among the PF and it's VFs
3726 * Do not distribute queue resources in multi-channel configuration.
3727 */
3728 if (num_vfs && !be_is_mc(adapter)) {
3729 /* If number of VFs requested is 8 less than max supported,
3730 * assign 8 queue pairs to the PF and divide the remaining
3731 * resources evenly among the VFs
3732 */
3733 if (num_vfs < (be_max_vfs(adapter) - 8))
3734 num_vf_qs = (res.max_rss_qs - 8) / num_vfs;
3735 else
3736 num_vf_qs = res.max_rss_qs / num_vfs;
3737
3738 /* Skyhawk-R chip supports only MAX_RSS_IFACES RSS capable
3739 * interfaces per port. Provide RSS on VFs, only if number
3740 * of VFs requested is less than MAX_RSS_IFACES limit.
3741 */
3742 if (num_vfs >= MAX_RSS_IFACES)
3743 num_vf_qs = 1;
3744 }
3745 return num_vf_qs;
3746}
3747
b05004ad
SK
3748static int be_clear(struct be_adapter *adapter)
3749{
f2858738
VV
3750 struct pci_dev *pdev = adapter->pdev;
3751 u16 num_vf_qs;
3752
68d7bdcb 3753 be_cancel_worker(adapter);
191eb756 3754
11ac75ed 3755 if (sriov_enabled(adapter))
f9449ab7
SP
3756 be_vf_clear(adapter);
3757
bec84e6b
VV
3758 /* Re-configure FW to distribute resources evenly across max-supported
3759 * number of VFs, only when VFs are not already enabled.
3760 */
ace40aff
VV
3761 if (skyhawk_chip(adapter) && be_physfn(adapter) &&
3762 !pci_vfs_assigned(pdev)) {
f2858738
VV
3763 num_vf_qs = be_calculate_vf_qs(adapter,
3764 pci_sriov_get_totalvfs(pdev));
bec84e6b 3765 be_cmd_set_sriov_config(adapter, adapter->pool_res,
f2858738
VV
3766 pci_sriov_get_totalvfs(pdev),
3767 num_vf_qs);
3768 }
bec84e6b 3769
c5abe7c0 3770#ifdef CONFIG_BE2NET_VXLAN
c9c47142 3771 be_disable_vxlan_offloads(adapter);
c5abe7c0 3772#endif
2d17f403 3773 /* delete the primary mac along with the uc-mac list */
b05004ad 3774 be_mac_clear(adapter);
fbc13f01 3775
f9449ab7 3776 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
a54769f5 3777
7707133c 3778 be_clear_queues(adapter);
a54769f5 3779
10ef9ab4 3780 be_msix_disable(adapter);
e1ad8e33 3781 adapter->flags &= ~BE_FLAGS_SETUP_DONE;
a54769f5
SP
3782 return 0;
3783}
3784
0700d816
KA
3785static int be_if_create(struct be_adapter *adapter, u32 *if_handle,
3786 u32 cap_flags, u32 vf)
3787{
3788 u32 en_flags;
0700d816
KA
3789
3790 en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
3791 BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS |
71bb8bd0 3792 BE_IF_FLAGS_RSS | BE_IF_FLAGS_DEFQ_RSS;
0700d816
KA
3793
3794 en_flags &= cap_flags;
3795
435452aa 3796 return be_cmd_if_create(adapter, cap_flags, en_flags, if_handle, vf);
0700d816
KA
3797}
3798
4c876616 3799static int be_vfs_if_create(struct be_adapter *adapter)
abb93951 3800{
92bf14ab 3801 struct be_resources res = {0};
4c876616 3802 struct be_vf_cfg *vf_cfg;
0700d816
KA
3803 u32 cap_flags, vf;
3804 int status;
abb93951 3805
0700d816 3806 /* If a FW profile exists, then cap_flags are updated */
4c876616 3807 cap_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
0ed7d749 3808 BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
abb93951 3809
4c876616 3810 for_all_vfs(adapter, vf_cfg, vf) {
92bf14ab
SP
3811 if (!BE3_chip(adapter)) {
3812 status = be_cmd_get_profile_config(adapter, &res,
f2858738 3813 RESOURCE_LIMITS,
92bf14ab 3814 vf + 1);
435452aa 3815 if (!status) {
92bf14ab 3816 cap_flags = res.if_cap_flags;
435452aa
VV
3817 /* Prevent VFs from enabling VLAN promiscuous
3818 * mode
3819 */
3820 cap_flags &= ~BE_IF_FLAGS_VLAN_PROMISCUOUS;
3821 }
92bf14ab 3822 }
4c876616 3823
0700d816
KA
3824 status = be_if_create(adapter, &vf_cfg->if_handle,
3825 cap_flags, vf + 1);
4c876616 3826 if (status)
0700d816 3827 return status;
4c876616 3828 }
0700d816
KA
3829
3830 return 0;
abb93951
PR
3831}
3832
39f1d94d 3833static int be_vf_setup_init(struct be_adapter *adapter)
30128031 3834{
11ac75ed 3835 struct be_vf_cfg *vf_cfg;
30128031
SP
3836 int vf;
3837
39f1d94d
SP
3838 adapter->vf_cfg = kcalloc(adapter->num_vfs, sizeof(*vf_cfg),
3839 GFP_KERNEL);
3840 if (!adapter->vf_cfg)
3841 return -ENOMEM;
3842
11ac75ed
SP
3843 for_all_vfs(adapter, vf_cfg, vf) {
3844 vf_cfg->if_handle = -1;
3845 vf_cfg->pmac_id = -1;
30128031 3846 }
39f1d94d 3847 return 0;
30128031
SP
3848}
3849
f9449ab7
SP
3850static int be_vf_setup(struct be_adapter *adapter)
3851{
c502224e 3852 struct device *dev = &adapter->pdev->dev;
11ac75ed 3853 struct be_vf_cfg *vf_cfg;
4c876616 3854 int status, old_vfs, vf;
e7bcbd7b 3855 bool spoofchk;
39f1d94d 3856
257a3feb 3857 old_vfs = pci_num_vf(adapter->pdev);
39f1d94d
SP
3858
3859 status = be_vf_setup_init(adapter);
3860 if (status)
3861 goto err;
30128031 3862
4c876616
SP
3863 if (old_vfs) {
3864 for_all_vfs(adapter, vf_cfg, vf) {
3865 status = be_cmd_get_if_id(adapter, vf_cfg, vf);
3866 if (status)
3867 goto err;
3868 }
f9449ab7 3869
4c876616
SP
3870 status = be_vfs_mac_query(adapter);
3871 if (status)
3872 goto err;
3873 } else {
bec84e6b
VV
3874 status = be_vfs_if_create(adapter);
3875 if (status)
3876 goto err;
3877
39f1d94d
SP
3878 status = be_vf_eth_addr_config(adapter);
3879 if (status)
3880 goto err;
3881 }
f9449ab7 3882
11ac75ed 3883 for_all_vfs(adapter, vf_cfg, vf) {
04a06028 3884 /* Allow VFs to programs MAC/VLAN filters */
435452aa
VV
3885 status = be_cmd_get_fn_privileges(adapter, &vf_cfg->privileges,
3886 vf + 1);
3887 if (!status && !(vf_cfg->privileges & BE_PRIV_FILTMGMT)) {
04a06028 3888 status = be_cmd_set_fn_privileges(adapter,
435452aa 3889 vf_cfg->privileges |
04a06028
SP
3890 BE_PRIV_FILTMGMT,
3891 vf + 1);
435452aa
VV
3892 if (!status) {
3893 vf_cfg->privileges |= BE_PRIV_FILTMGMT;
04a06028
SP
3894 dev_info(dev, "VF%d has FILTMGMT privilege\n",
3895 vf);
435452aa 3896 }
04a06028
SP
3897 }
3898
0f77ba73
RN
3899 /* Allow full available bandwidth */
3900 if (!old_vfs)
3901 be_cmd_config_qos(adapter, 0, 0, vf + 1);
f1f3ee1b 3902
e7bcbd7b
KA
3903 status = be_cmd_get_hsw_config(adapter, NULL, vf + 1,
3904 vf_cfg->if_handle, NULL,
3905 &spoofchk);
3906 if (!status)
3907 vf_cfg->spoofchk = spoofchk;
3908
bdce2ad7 3909 if (!old_vfs) {
0599863d 3910 be_cmd_enable_vf(adapter, vf + 1);
bdce2ad7
SR
3911 be_cmd_set_logical_link_config(adapter,
3912 IFLA_VF_LINK_STATE_AUTO,
3913 vf+1);
3914 }
f9449ab7 3915 }
b4c1df93
SP
3916
3917 if (!old_vfs) {
3918 status = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
3919 if (status) {
3920 dev_err(dev, "SRIOV enable failed\n");
3921 adapter->num_vfs = 0;
3922 goto err;
3923 }
3924 }
f174c7ec
VV
3925
3926 adapter->flags |= BE_FLAGS_SRIOV_ENABLED;
f9449ab7
SP
3927 return 0;
3928err:
4c876616
SP
3929 dev_err(dev, "VF setup failed\n");
3930 be_vf_clear(adapter);
f9449ab7
SP
3931 return status;
3932}
3933
f93f160b
VV
3934/* Converting function_mode bits on BE3 to SH mc_type enums */
3935
3936static u8 be_convert_mc_type(u32 function_mode)
3937{
66064dbc 3938 if (function_mode & VNIC_MODE && function_mode & QNQ_MODE)
f93f160b 3939 return vNIC1;
66064dbc 3940 else if (function_mode & QNQ_MODE)
f93f160b
VV
3941 return FLEX10;
3942 else if (function_mode & VNIC_MODE)
3943 return vNIC2;
3944 else if (function_mode & UMC_ENABLED)
3945 return UMC;
3946 else
3947 return MC_NONE;
3948}
3949
92bf14ab
SP
3950/* On BE2/BE3 FW does not suggest the supported limits */
3951static void BEx_get_resources(struct be_adapter *adapter,
3952 struct be_resources *res)
3953{
bec84e6b 3954 bool use_sriov = adapter->num_vfs ? 1 : 0;
92bf14ab
SP
3955
3956 if (be_physfn(adapter))
3957 res->max_uc_mac = BE_UC_PMAC_COUNT;
3958 else
3959 res->max_uc_mac = BE_VF_UC_PMAC_COUNT;
3960
f93f160b
VV
3961 adapter->mc_type = be_convert_mc_type(adapter->function_mode);
3962
3963 if (be_is_mc(adapter)) {
3964 /* Assuming that there are 4 channels per port,
3965 * when multi-channel is enabled
3966 */
3967 if (be_is_qnq_mode(adapter))
3968 res->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
3969 else
3970 /* In a non-qnq multichannel mode, the pvid
3971 * takes up one vlan entry
3972 */
3973 res->max_vlans = (BE_NUM_VLANS_SUPPORTED / 4) - 1;
3974 } else {
92bf14ab 3975 res->max_vlans = BE_NUM_VLANS_SUPPORTED;
f93f160b
VV
3976 }
3977
92bf14ab
SP
3978 res->max_mcast_mac = BE_MAX_MC;
3979
a5243dab
VV
3980 /* 1) For BE3 1Gb ports, FW does not support multiple TXQs
3981 * 2) Create multiple TX rings on a BE3-R multi-channel interface
3982 * *only* if it is RSS-capable.
3983 */
3984 if (BE2_chip(adapter) || use_sriov || (adapter->port_num > 1) ||
18c57c74
KA
3985 be_virtfn(adapter) ||
3986 (be_is_mc(adapter) &&
3987 !(adapter->function_caps & BE_FUNCTION_CAPS_RSS))) {
92bf14ab 3988 res->max_tx_qs = 1;
a28277dc
SR
3989 } else if (adapter->function_caps & BE_FUNCTION_CAPS_SUPER_NIC) {
3990 struct be_resources super_nic_res = {0};
3991
3992 /* On a SuperNIC profile, the driver needs to use the
3993 * GET_PROFILE_CONFIG cmd to query the per-function TXQ limits
3994 */
f2858738
VV
3995 be_cmd_get_profile_config(adapter, &super_nic_res,
3996 RESOURCE_LIMITS, 0);
a28277dc
SR
3997 /* Some old versions of BE3 FW don't report max_tx_qs value */
3998 res->max_tx_qs = super_nic_res.max_tx_qs ? : BE3_MAX_TX_QS;
3999 } else {
92bf14ab 4000 res->max_tx_qs = BE3_MAX_TX_QS;
a28277dc 4001 }
92bf14ab
SP
4002
4003 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
4004 !use_sriov && be_physfn(adapter))
4005 res->max_rss_qs = (adapter->be3_native) ?
4006 BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
4007 res->max_rx_qs = res->max_rss_qs + 1;
4008
e3dc867c 4009 if (be_physfn(adapter))
d3518e21 4010 res->max_evt_qs = (be_max_vfs(adapter) > 0) ?
e3dc867c
SR
4011 BE3_SRIOV_MAX_EVT_QS : BE3_MAX_EVT_QS;
4012 else
4013 res->max_evt_qs = 1;
92bf14ab
SP
4014
4015 res->if_cap_flags = BE_IF_CAP_FLAGS_WANT;
71bb8bd0 4016 res->if_cap_flags &= ~BE_IF_FLAGS_DEFQ_RSS;
92bf14ab
SP
4017 if (!(adapter->function_caps & BE_FUNCTION_CAPS_RSS))
4018 res->if_cap_flags &= ~BE_IF_FLAGS_RSS;
4019}
4020
30128031
SP
4021static void be_setup_init(struct be_adapter *adapter)
4022{
4023 adapter->vlan_prio_bmap = 0xff;
42f11cf2 4024 adapter->phy.link_speed = -1;
30128031
SP
4025 adapter->if_handle = -1;
4026 adapter->be3_native = false;
f66b7cfd 4027 adapter->if_flags = 0;
f25b119c
PR
4028 if (be_physfn(adapter))
4029 adapter->cmd_privileges = MAX_PRIVILEGES;
4030 else
4031 adapter->cmd_privileges = MIN_PRIVILEGES;
30128031
SP
4032}
4033
bec84e6b
VV
4034static int be_get_sriov_config(struct be_adapter *adapter)
4035{
bec84e6b 4036 struct be_resources res = {0};
d3d18312 4037 int max_vfs, old_vfs;
bec84e6b 4038
f2858738 4039 be_cmd_get_profile_config(adapter, &res, RESOURCE_LIMITS, 0);
d3d18312 4040
ace40aff 4041 /* Some old versions of BE3 FW don't report max_vfs value */
bec84e6b
VV
4042 if (BE3_chip(adapter) && !res.max_vfs) {
4043 max_vfs = pci_sriov_get_totalvfs(adapter->pdev);
4044 res.max_vfs = max_vfs > 0 ? min(MAX_VFS, max_vfs) : 0;
4045 }
4046
d3d18312 4047 adapter->pool_res = res;
bec84e6b 4048
ace40aff
VV
4049 /* If during previous unload of the driver, the VFs were not disabled,
4050 * then we cannot rely on the PF POOL limits for the TotalVFs value.
4051 * Instead use the TotalVFs value stored in the pci-dev struct.
4052 */
bec84e6b
VV
4053 old_vfs = pci_num_vf(adapter->pdev);
4054 if (old_vfs) {
ace40aff
VV
4055 dev_info(&adapter->pdev->dev, "%d VFs are already enabled\n",
4056 old_vfs);
4057
4058 adapter->pool_res.max_vfs =
4059 pci_sriov_get_totalvfs(adapter->pdev);
bec84e6b 4060 adapter->num_vfs = old_vfs;
bec84e6b
VV
4061 }
4062
4063 return 0;
4064}
4065
ace40aff
VV
4066static void be_alloc_sriov_res(struct be_adapter *adapter)
4067{
4068 int old_vfs = pci_num_vf(adapter->pdev);
4069 u16 num_vf_qs;
4070 int status;
4071
4072 be_get_sriov_config(adapter);
4073
4074 if (!old_vfs)
4075 pci_sriov_set_totalvfs(adapter->pdev, be_max_vfs(adapter));
4076
4077 /* When the HW is in SRIOV capable configuration, the PF-pool
4078 * resources are given to PF during driver load, if there are no
4079 * old VFs. This facility is not available in BE3 FW.
4080 * Also, this is done by FW in Lancer chip.
4081 */
4082 if (skyhawk_chip(adapter) && be_max_vfs(adapter) && !old_vfs) {
4083 num_vf_qs = be_calculate_vf_qs(adapter, 0);
4084 status = be_cmd_set_sriov_config(adapter, adapter->pool_res, 0,
4085 num_vf_qs);
4086 if (status)
4087 dev_err(&adapter->pdev->dev,
4088 "Failed to optimize SRIOV resources\n");
4089 }
4090}
4091
92bf14ab 4092static int be_get_resources(struct be_adapter *adapter)
abb93951 4093{
92bf14ab
SP
4094 struct device *dev = &adapter->pdev->dev;
4095 struct be_resources res = {0};
4096 int status;
abb93951 4097
92bf14ab
SP
4098 if (BEx_chip(adapter)) {
4099 BEx_get_resources(adapter, &res);
4100 adapter->res = res;
abb93951
PR
4101 }
4102
92bf14ab
SP
4103 /* For Lancer, SH etc read per-function resource limits from FW.
4104 * GET_FUNC_CONFIG returns per function guaranteed limits.
4105 * GET_PROFILE_CONFIG returns PCI-E related limits PF-pool limits
4106 */
4107 if (!BEx_chip(adapter)) {
4108 status = be_cmd_get_func_config(adapter, &res);
4109 if (status)
4110 return status;
abb93951 4111
71bb8bd0
VV
4112 /* If a deafault RXQ must be created, we'll use up one RSSQ*/
4113 if (res.max_rss_qs && res.max_rss_qs == res.max_rx_qs &&
4114 !(res.if_cap_flags & BE_IF_FLAGS_DEFQ_RSS))
4115 res.max_rss_qs -= 1;
4116
92bf14ab
SP
4117 /* If RoCE may be enabled stash away half the EQs for RoCE */
4118 if (be_roce_supported(adapter))
4119 res.max_evt_qs /= 2;
4120 adapter->res = res;
abb93951 4121 }
4c876616 4122
71bb8bd0
VV
4123 /* If FW supports RSS default queue, then skip creating non-RSS
4124 * queue for non-IP traffic.
4125 */
4126 adapter->need_def_rxq = (be_if_cap_flags(adapter) &
4127 BE_IF_FLAGS_DEFQ_RSS) ? 0 : 1;
4128
acbafeb1
SP
4129 dev_info(dev, "Max: txqs %d, rxqs %d, rss %d, eqs %d, vfs %d\n",
4130 be_max_txqs(adapter), be_max_rxqs(adapter),
4131 be_max_rss(adapter), be_max_eqs(adapter),
4132 be_max_vfs(adapter));
4133 dev_info(dev, "Max: uc-macs %d, mc-macs %d, vlans %d\n",
4134 be_max_uc(adapter), be_max_mc(adapter),
4135 be_max_vlans(adapter));
4136
ace40aff
VV
4137 /* Sanitize cfg_num_qs based on HW and platform limits */
4138 adapter->cfg_num_qs = min_t(u16, netif_get_num_default_rss_queues(),
4139 be_max_qs(adapter));
92bf14ab 4140 return 0;
abb93951
PR
4141}
4142
39f1d94d
SP
4143static int be_get_config(struct be_adapter *adapter)
4144{
6b085ba9 4145 int status, level;
542963b7 4146 u16 profile_id;
6b085ba9
SP
4147
4148 status = be_cmd_get_cntl_attributes(adapter);
4149 if (status)
4150 return status;
39f1d94d 4151
e97e3cda 4152 status = be_cmd_query_fw_cfg(adapter);
abb93951 4153 if (status)
92bf14ab 4154 return status;
abb93951 4155
6b085ba9
SP
4156 if (BEx_chip(adapter)) {
4157 level = be_cmd_get_fw_log_level(adapter);
4158 adapter->msg_enable =
4159 level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0;
4160 }
4161
4162 be_cmd_get_acpi_wol_cap(adapter);
4163
21252377
VV
4164 be_cmd_query_port_name(adapter);
4165
4166 if (be_physfn(adapter)) {
542963b7
VV
4167 status = be_cmd_get_active_profile(adapter, &profile_id);
4168 if (!status)
4169 dev_info(&adapter->pdev->dev,
4170 "Using profile 0x%x\n", profile_id);
962bcb75 4171 }
bec84e6b 4172
92bf14ab
SP
4173 status = be_get_resources(adapter);
4174 if (status)
4175 return status;
abb93951 4176
46ee9c14
RN
4177 adapter->pmac_id = kcalloc(be_max_uc(adapter),
4178 sizeof(*adapter->pmac_id), GFP_KERNEL);
92bf14ab
SP
4179 if (!adapter->pmac_id)
4180 return -ENOMEM;
abb93951 4181
92bf14ab 4182 return 0;
39f1d94d
SP
4183}
4184
95046b92
SP
4185static int be_mac_setup(struct be_adapter *adapter)
4186{
4187 u8 mac[ETH_ALEN];
4188 int status;
4189
4190 if (is_zero_ether_addr(adapter->netdev->dev_addr)) {
4191 status = be_cmd_get_perm_mac(adapter, mac);
4192 if (status)
4193 return status;
4194
4195 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
4196 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
4197 } else {
4198 /* Maybe the HW was reset; dev_addr must be re-programmed */
4199 memcpy(mac, adapter->netdev->dev_addr, ETH_ALEN);
4200 }
4201
2c7a9dc1
AK
4202 /* For BE3-R VFs, the PF programs the initial MAC address */
4203 if (!(BEx_chip(adapter) && be_virtfn(adapter)))
4204 be_cmd_pmac_add(adapter, mac, adapter->if_handle,
4205 &adapter->pmac_id[0], 0);
95046b92
SP
4206 return 0;
4207}
4208
68d7bdcb
SP
4209static void be_schedule_worker(struct be_adapter *adapter)
4210{
4211 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
4212 adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
4213}
4214
eb7dd46c
SP
4215static void be_schedule_err_detection(struct be_adapter *adapter)
4216{
4217 schedule_delayed_work(&adapter->be_err_detection_work,
4218 msecs_to_jiffies(1000));
4219 adapter->flags |= BE_FLAGS_ERR_DETECTION_SCHEDULED;
4220}
4221
7707133c 4222static int be_setup_queues(struct be_adapter *adapter)
5fb379ee 4223{
68d7bdcb 4224 struct net_device *netdev = adapter->netdev;
10ef9ab4 4225 int status;
ba343c77 4226
7707133c 4227 status = be_evt_queues_create(adapter);
abb93951
PR
4228 if (status)
4229 goto err;
73d540f2 4230
7707133c 4231 status = be_tx_qs_create(adapter);
c2bba3df
SK
4232 if (status)
4233 goto err;
10ef9ab4 4234
7707133c 4235 status = be_rx_cqs_create(adapter);
10ef9ab4 4236 if (status)
a54769f5 4237 goto err;
6b7c5b94 4238
7707133c 4239 status = be_mcc_queues_create(adapter);
10ef9ab4
SP
4240 if (status)
4241 goto err;
4242
68d7bdcb
SP
4243 status = netif_set_real_num_rx_queues(netdev, adapter->num_rx_qs);
4244 if (status)
4245 goto err;
4246
4247 status = netif_set_real_num_tx_queues(netdev, adapter->num_tx_qs);
4248 if (status)
4249 goto err;
4250
7707133c
SP
4251 return 0;
4252err:
4253 dev_err(&adapter->pdev->dev, "queue_setup failed\n");
4254 return status;
4255}
4256
68d7bdcb
SP
4257int be_update_queues(struct be_adapter *adapter)
4258{
4259 struct net_device *netdev = adapter->netdev;
4260 int status;
4261
4262 if (netif_running(netdev))
4263 be_close(netdev);
4264
4265 be_cancel_worker(adapter);
4266
4267 /* If any vectors have been shared with RoCE we cannot re-program
4268 * the MSIx table.
4269 */
4270 if (!adapter->num_msix_roce_vec)
4271 be_msix_disable(adapter);
4272
4273 be_clear_queues(adapter);
4274
4275 if (!msix_enabled(adapter)) {
4276 status = be_msix_enable(adapter);
4277 if (status)
4278 return status;
4279 }
4280
4281 status = be_setup_queues(adapter);
4282 if (status)
4283 return status;
4284
4285 be_schedule_worker(adapter);
4286
4287 if (netif_running(netdev))
4288 status = be_open(netdev);
4289
4290 return status;
4291}
4292
f7062ee5
SP
4293static inline int fw_major_num(const char *fw_ver)
4294{
4295 int fw_major = 0, i;
4296
4297 i = sscanf(fw_ver, "%d.", &fw_major);
4298 if (i != 1)
4299 return 0;
4300
4301 return fw_major;
4302}
4303
f962f840
SP
4304/* If any VFs are already enabled don't FLR the PF */
4305static bool be_reset_required(struct be_adapter *adapter)
4306{
4307 return pci_num_vf(adapter->pdev) ? false : true;
4308}
4309
4310/* Wait for the FW to be ready and perform the required initialization */
4311static int be_func_init(struct be_adapter *adapter)
4312{
4313 int status;
4314
4315 status = be_fw_wait_ready(adapter);
4316 if (status)
4317 return status;
4318
4319 if (be_reset_required(adapter)) {
4320 status = be_cmd_reset_function(adapter);
4321 if (status)
4322 return status;
4323
4324 /* Wait for interrupts to quiesce after an FLR */
4325 msleep(100);
4326
4327 /* We can clear all errors when function reset succeeds */
954f6825 4328 be_clear_error(adapter, BE_CLEAR_ALL);
f962f840
SP
4329 }
4330
4331 /* Tell FW we're ready to fire cmds */
4332 status = be_cmd_fw_init(adapter);
4333 if (status)
4334 return status;
4335
4336 /* Allow interrupts for other ULPs running on NIC function */
4337 be_intr_set(adapter, true);
4338
4339 return 0;
4340}
4341
7707133c
SP
4342static int be_setup(struct be_adapter *adapter)
4343{
4344 struct device *dev = &adapter->pdev->dev;
7707133c
SP
4345 int status;
4346
f962f840
SP
4347 status = be_func_init(adapter);
4348 if (status)
4349 return status;
4350
7707133c
SP
4351 be_setup_init(adapter);
4352
4353 if (!lancer_chip(adapter))
4354 be_cmd_req_native_mode(adapter);
4355
ace40aff
VV
4356 if (!BE2_chip(adapter) && be_physfn(adapter))
4357 be_alloc_sriov_res(adapter);
4358
7707133c 4359 status = be_get_config(adapter);
10ef9ab4 4360 if (status)
a54769f5 4361 goto err;
6b7c5b94 4362
7707133c 4363 status = be_msix_enable(adapter);
10ef9ab4 4364 if (status)
a54769f5 4365 goto err;
6b7c5b94 4366
0700d816
KA
4367 status = be_if_create(adapter, &adapter->if_handle,
4368 be_if_cap_flags(adapter), 0);
7707133c 4369 if (status)
a54769f5 4370 goto err;
6b7c5b94 4371
68d7bdcb
SP
4372 /* Updating real_num_tx/rx_queues() requires rtnl_lock() */
4373 rtnl_lock();
7707133c 4374 status = be_setup_queues(adapter);
68d7bdcb 4375 rtnl_unlock();
95046b92 4376 if (status)
1578e777
PR
4377 goto err;
4378
7707133c 4379 be_cmd_get_fn_privileges(adapter, &adapter->cmd_privileges, 0);
7707133c
SP
4380
4381 status = be_mac_setup(adapter);
10ef9ab4
SP
4382 if (status)
4383 goto err;
4384
e97e3cda 4385 be_cmd_get_fw_ver(adapter);
acbafeb1 4386 dev_info(dev, "FW version is %s\n", adapter->fw_ver);
5a56eb10 4387
e9e2a904 4388 if (BE2_chip(adapter) && fw_major_num(adapter->fw_ver) < 4) {
50762667 4389 dev_err(dev, "Firmware on card is old(%s), IRQs may not work",
e9e2a904
SK
4390 adapter->fw_ver);
4391 dev_err(dev, "Please upgrade firmware to version >= 4.0\n");
4392 }
4393
1d1e9a46 4394 if (adapter->vlans_added)
10329df8 4395 be_vid_config(adapter);
7ab8b0b4 4396
a54769f5 4397 be_set_rx_mode(adapter->netdev);
5fb379ee 4398
00d594c3
KA
4399 status = be_cmd_set_flow_control(adapter, adapter->tx_fc,
4400 adapter->rx_fc);
4401 if (status)
4402 be_cmd_get_flow_control(adapter, &adapter->tx_fc,
4403 &adapter->rx_fc);
590c391d 4404
00d594c3
KA
4405 dev_info(&adapter->pdev->dev, "HW Flow control - TX:%d RX:%d\n",
4406 adapter->tx_fc, adapter->rx_fc);
2dc1deb6 4407
bdce2ad7
SR
4408 if (be_physfn(adapter))
4409 be_cmd_set_logical_link_config(adapter,
4410 IFLA_VF_LINK_STATE_AUTO, 0);
4411
bec84e6b
VV
4412 if (adapter->num_vfs)
4413 be_vf_setup(adapter);
f9449ab7 4414
f25b119c
PR
4415 status = be_cmd_get_phy_info(adapter);
4416 if (!status && be_pause_supported(adapter))
42f11cf2
AK
4417 adapter->phy.fc_autoneg = 1;
4418
68d7bdcb 4419 be_schedule_worker(adapter);
e1ad8e33 4420 adapter->flags |= BE_FLAGS_SETUP_DONE;
f9449ab7 4421 return 0;
a54769f5
SP
4422err:
4423 be_clear(adapter);
4424 return status;
4425}
6b7c5b94 4426
66268739
IV
4427#ifdef CONFIG_NET_POLL_CONTROLLER
4428static void be_netpoll(struct net_device *netdev)
4429{
4430 struct be_adapter *adapter = netdev_priv(netdev);
10ef9ab4 4431 struct be_eq_obj *eqo;
66268739
IV
4432 int i;
4433
e49cc34f 4434 for_all_evt_queues(adapter, eqo, i) {
20947770 4435 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0, 0);
e49cc34f
SP
4436 napi_schedule(&eqo->napi);
4437 }
66268739
IV
4438}
4439#endif
4440
96c9b2e4 4441static char flash_cookie[2][16] = {"*** SE FLAS", "H DIRECTORY *** "};
fa9a6fed 4442
306f1348
SP
4443static bool phy_flashing_required(struct be_adapter *adapter)
4444{
e02cfd96 4445 return (adapter->phy.phy_type == PHY_TYPE_TN_8022 &&
42f11cf2 4446 adapter->phy.interface_type == PHY_TYPE_BASET_10GB);
306f1348
SP
4447}
4448
c165541e
PR
4449static bool is_comp_in_ufi(struct be_adapter *adapter,
4450 struct flash_section_info *fsec, int type)
4451{
4452 int i = 0, img_type = 0;
4453 struct flash_section_info_g2 *fsec_g2 = NULL;
4454
ca34fe38 4455 if (BE2_chip(adapter))
c165541e
PR
4456 fsec_g2 = (struct flash_section_info_g2 *)fsec;
4457
4458 for (i = 0; i < MAX_FLASH_COMP; i++) {
4459 if (fsec_g2)
4460 img_type = le32_to_cpu(fsec_g2->fsec_entry[i].type);
4461 else
4462 img_type = le32_to_cpu(fsec->fsec_entry[i].type);
4463
4464 if (img_type == type)
4465 return true;
4466 }
4467 return false;
4468
4469}
4470
4188e7df 4471static struct flash_section_info *get_fsec_info(struct be_adapter *adapter,
748b539a
SP
4472 int header_size,
4473 const struct firmware *fw)
c165541e
PR
4474{
4475 struct flash_section_info *fsec = NULL;
4476 const u8 *p = fw->data;
4477
4478 p += header_size;
4479 while (p < (fw->data + fw->size)) {
4480 fsec = (struct flash_section_info *)p;
4481 if (!memcmp(flash_cookie, fsec->cookie, sizeof(flash_cookie)))
4482 return fsec;
4483 p += 32;
4484 }
4485 return NULL;
4486}
4487
96c9b2e4
VV
4488static int be_check_flash_crc(struct be_adapter *adapter, const u8 *p,
4489 u32 img_offset, u32 img_size, int hdr_size,
4490 u16 img_optype, bool *crc_match)
4491{
4492 u32 crc_offset;
4493 int status;
4494 u8 crc[4];
4495
70a7b525
VV
4496 status = be_cmd_get_flash_crc(adapter, crc, img_optype, img_offset,
4497 img_size - 4);
96c9b2e4
VV
4498 if (status)
4499 return status;
4500
4501 crc_offset = hdr_size + img_offset + img_size - 4;
4502
4503 /* Skip flashing, if crc of flashed region matches */
4504 if (!memcmp(crc, p + crc_offset, 4))
4505 *crc_match = true;
4506 else
4507 *crc_match = false;
4508
4509 return status;
4510}
4511
773a2d7c 4512static int be_flash(struct be_adapter *adapter, const u8 *img,
70a7b525
VV
4513 struct be_dma_mem *flash_cmd, int optype, int img_size,
4514 u32 img_offset)
773a2d7c 4515{
70a7b525 4516 u32 flash_op, num_bytes, total_bytes = img_size, bytes_sent = 0;
773a2d7c 4517 struct be_cmd_write_flashrom *req = flash_cmd->va;
96c9b2e4 4518 int status;
773a2d7c 4519
773a2d7c
PR
4520 while (total_bytes) {
4521 num_bytes = min_t(u32, 32*1024, total_bytes);
4522
4523 total_bytes -= num_bytes;
4524
4525 if (!total_bytes) {
4526 if (optype == OPTYPE_PHY_FW)
4527 flash_op = FLASHROM_OPER_PHY_FLASH;
4528 else
4529 flash_op = FLASHROM_OPER_FLASH;
4530 } else {
4531 if (optype == OPTYPE_PHY_FW)
4532 flash_op = FLASHROM_OPER_PHY_SAVE;
4533 else
4534 flash_op = FLASHROM_OPER_SAVE;
4535 }
4536
be716446 4537 memcpy(req->data_buf, img, num_bytes);
773a2d7c
PR
4538 img += num_bytes;
4539 status = be_cmd_write_flashrom(adapter, flash_cmd, optype,
70a7b525
VV
4540 flash_op, img_offset +
4541 bytes_sent, num_bytes);
4c60005f 4542 if (base_status(status) == MCC_STATUS_ILLEGAL_REQUEST &&
96c9b2e4
VV
4543 optype == OPTYPE_PHY_FW)
4544 break;
4545 else if (status)
773a2d7c 4546 return status;
70a7b525
VV
4547
4548 bytes_sent += num_bytes;
773a2d7c
PR
4549 }
4550 return 0;
4551}
4552
0ad3157e 4553/* For BE2, BE3 and BE3-R */
ca34fe38 4554static int be_flash_BEx(struct be_adapter *adapter,
748b539a
SP
4555 const struct firmware *fw,
4556 struct be_dma_mem *flash_cmd, int num_of_images)
84517482 4557{
c165541e 4558 int img_hdrs_size = (num_of_images * sizeof(struct image_hdr));
96c9b2e4 4559 struct device *dev = &adapter->pdev->dev;
c165541e 4560 struct flash_section_info *fsec = NULL;
96c9b2e4
VV
4561 int status, i, filehdr_size, num_comp;
4562 const struct flash_comp *pflashcomp;
4563 bool crc_match;
4564 const u8 *p;
c165541e
PR
4565
4566 struct flash_comp gen3_flash_types[] = {
4567 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, OPTYPE_ISCSI_ACTIVE,
4568 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_iSCSI},
4569 { FLASH_REDBOOT_START_g3, OPTYPE_REDBOOT,
4570 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3, IMAGE_BOOT_CODE},
4571 { FLASH_iSCSI_BIOS_START_g3, OPTYPE_BIOS,
4572 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_ISCSI},
4573 { FLASH_PXE_BIOS_START_g3, OPTYPE_PXE_BIOS,
4574 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_PXE},
4575 { FLASH_FCoE_BIOS_START_g3, OPTYPE_FCOE_BIOS,
4576 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_FCoE},
4577 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, OPTYPE_ISCSI_BACKUP,
4578 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_iSCSI},
4579 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, OPTYPE_FCOE_FW_ACTIVE,
4580 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_FCoE},
4581 { FLASH_FCoE_BACKUP_IMAGE_START_g3, OPTYPE_FCOE_FW_BACKUP,
4582 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_FCoE},
4583 { FLASH_NCSI_START_g3, OPTYPE_NCSI_FW,
4584 FLASH_NCSI_IMAGE_MAX_SIZE_g3, IMAGE_NCSI},
4585 { FLASH_PHY_FW_START_g3, OPTYPE_PHY_FW,
4586 FLASH_PHY_FW_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_PHY}
3f0d4560 4587 };
c165541e
PR
4588
4589 struct flash_comp gen2_flash_types[] = {
4590 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, OPTYPE_ISCSI_ACTIVE,
4591 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_iSCSI},
4592 { FLASH_REDBOOT_START_g2, OPTYPE_REDBOOT,
4593 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2, IMAGE_BOOT_CODE},
4594 { FLASH_iSCSI_BIOS_START_g2, OPTYPE_BIOS,
4595 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_ISCSI},
4596 { FLASH_PXE_BIOS_START_g2, OPTYPE_PXE_BIOS,
4597 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_PXE},
4598 { FLASH_FCoE_BIOS_START_g2, OPTYPE_FCOE_BIOS,
4599 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_FCoE},
4600 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, OPTYPE_ISCSI_BACKUP,
4601 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_iSCSI},
4602 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, OPTYPE_FCOE_FW_ACTIVE,
4603 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_FCoE},
4604 { FLASH_FCoE_BACKUP_IMAGE_START_g2, OPTYPE_FCOE_FW_BACKUP,
4605 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_FCoE}
3f0d4560
AK
4606 };
4607
ca34fe38 4608 if (BE3_chip(adapter)) {
3f0d4560
AK
4609 pflashcomp = gen3_flash_types;
4610 filehdr_size = sizeof(struct flash_file_hdr_g3);
215faf9c 4611 num_comp = ARRAY_SIZE(gen3_flash_types);
3f0d4560
AK
4612 } else {
4613 pflashcomp = gen2_flash_types;
4614 filehdr_size = sizeof(struct flash_file_hdr_g2);
215faf9c 4615 num_comp = ARRAY_SIZE(gen2_flash_types);
5d3acd0d 4616 img_hdrs_size = 0;
84517482 4617 }
ca34fe38 4618
c165541e
PR
4619 /* Get flash section info*/
4620 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
4621 if (!fsec) {
96c9b2e4 4622 dev_err(dev, "Invalid Cookie. FW image may be corrupted\n");
c165541e
PR
4623 return -1;
4624 }
9fe96934 4625 for (i = 0; i < num_comp; i++) {
c165541e 4626 if (!is_comp_in_ufi(adapter, fsec, pflashcomp[i].img_type))
9fe96934 4627 continue;
c165541e
PR
4628
4629 if ((pflashcomp[i].optype == OPTYPE_NCSI_FW) &&
4630 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
4631 continue;
4632
773a2d7c
PR
4633 if (pflashcomp[i].optype == OPTYPE_PHY_FW &&
4634 !phy_flashing_required(adapter))
306f1348 4635 continue;
c165541e 4636
773a2d7c 4637 if (pflashcomp[i].optype == OPTYPE_REDBOOT) {
96c9b2e4
VV
4638 status = be_check_flash_crc(adapter, fw->data,
4639 pflashcomp[i].offset,
4640 pflashcomp[i].size,
4641 filehdr_size +
4642 img_hdrs_size,
4643 OPTYPE_REDBOOT, &crc_match);
4644 if (status) {
4645 dev_err(dev,
4646 "Could not get CRC for 0x%x region\n",
4647 pflashcomp[i].optype);
4648 continue;
4649 }
4650
4651 if (crc_match)
773a2d7c
PR
4652 continue;
4653 }
c165541e 4654
96c9b2e4
VV
4655 p = fw->data + filehdr_size + pflashcomp[i].offset +
4656 img_hdrs_size;
306f1348
SP
4657 if (p + pflashcomp[i].size > fw->data + fw->size)
4658 return -1;
773a2d7c
PR
4659
4660 status = be_flash(adapter, p, flash_cmd, pflashcomp[i].optype,
70a7b525 4661 pflashcomp[i].size, 0);
773a2d7c 4662 if (status) {
96c9b2e4 4663 dev_err(dev, "Flashing section type 0x%x failed\n",
773a2d7c
PR
4664 pflashcomp[i].img_type);
4665 return status;
84517482 4666 }
84517482 4667 }
84517482
AK
4668 return 0;
4669}
4670
96c9b2e4
VV
4671static u16 be_get_img_optype(struct flash_section_entry fsec_entry)
4672{
4673 u32 img_type = le32_to_cpu(fsec_entry.type);
4674 u16 img_optype = le16_to_cpu(fsec_entry.optype);
4675
4676 if (img_optype != 0xFFFF)
4677 return img_optype;
4678
4679 switch (img_type) {
4680 case IMAGE_FIRMWARE_iSCSI:
4681 img_optype = OPTYPE_ISCSI_ACTIVE;
4682 break;
4683 case IMAGE_BOOT_CODE:
4684 img_optype = OPTYPE_REDBOOT;
4685 break;
4686 case IMAGE_OPTION_ROM_ISCSI:
4687 img_optype = OPTYPE_BIOS;
4688 break;
4689 case IMAGE_OPTION_ROM_PXE:
4690 img_optype = OPTYPE_PXE_BIOS;
4691 break;
4692 case IMAGE_OPTION_ROM_FCoE:
4693 img_optype = OPTYPE_FCOE_BIOS;
4694 break;
4695 case IMAGE_FIRMWARE_BACKUP_iSCSI:
4696 img_optype = OPTYPE_ISCSI_BACKUP;
4697 break;
4698 case IMAGE_NCSI:
4699 img_optype = OPTYPE_NCSI_FW;
4700 break;
4701 case IMAGE_FLASHISM_JUMPVECTOR:
4702 img_optype = OPTYPE_FLASHISM_JUMPVECTOR;
4703 break;
4704 case IMAGE_FIRMWARE_PHY:
4705 img_optype = OPTYPE_SH_PHY_FW;
4706 break;
4707 case IMAGE_REDBOOT_DIR:
4708 img_optype = OPTYPE_REDBOOT_DIR;
4709 break;
4710 case IMAGE_REDBOOT_CONFIG:
4711 img_optype = OPTYPE_REDBOOT_CONFIG;
4712 break;
4713 case IMAGE_UFI_DIR:
4714 img_optype = OPTYPE_UFI_DIR;
4715 break;
4716 default:
4717 break;
4718 }
4719
4720 return img_optype;
4721}
4722
773a2d7c 4723static int be_flash_skyhawk(struct be_adapter *adapter,
748b539a
SP
4724 const struct firmware *fw,
4725 struct be_dma_mem *flash_cmd, int num_of_images)
3f0d4560 4726{
773a2d7c 4727 int img_hdrs_size = num_of_images * sizeof(struct image_hdr);
70a7b525 4728 bool crc_match, old_fw_img, flash_offset_support = true;
96c9b2e4 4729 struct device *dev = &adapter->pdev->dev;
773a2d7c 4730 struct flash_section_info *fsec = NULL;
96c9b2e4 4731 u32 img_offset, img_size, img_type;
70a7b525 4732 u16 img_optype, flash_optype;
96c9b2e4 4733 int status, i, filehdr_size;
96c9b2e4 4734 const u8 *p;
773a2d7c
PR
4735
4736 filehdr_size = sizeof(struct flash_file_hdr_g3);
4737 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
4738 if (!fsec) {
96c9b2e4 4739 dev_err(dev, "Invalid Cookie. FW image may be corrupted\n");
56ace3a0 4740 return -EINVAL;
773a2d7c
PR
4741 }
4742
70a7b525 4743retry_flash:
773a2d7c
PR
4744 for (i = 0; i < le32_to_cpu(fsec->fsec_hdr.num_images); i++) {
4745 img_offset = le32_to_cpu(fsec->fsec_entry[i].offset);
4746 img_size = le32_to_cpu(fsec->fsec_entry[i].pad_size);
96c9b2e4
VV
4747 img_type = le32_to_cpu(fsec->fsec_entry[i].type);
4748 img_optype = be_get_img_optype(fsec->fsec_entry[i]);
4749 old_fw_img = fsec->fsec_entry[i].optype == 0xFFFF;
773a2d7c 4750
96c9b2e4 4751 if (img_optype == 0xFFFF)
773a2d7c 4752 continue;
70a7b525
VV
4753
4754 if (flash_offset_support)
4755 flash_optype = OPTYPE_OFFSET_SPECIFIED;
4756 else
4757 flash_optype = img_optype;
4758
96c9b2e4
VV
4759 /* Don't bother verifying CRC if an old FW image is being
4760 * flashed
4761 */
4762 if (old_fw_img)
4763 goto flash;
4764
4765 status = be_check_flash_crc(adapter, fw->data, img_offset,
4766 img_size, filehdr_size +
70a7b525 4767 img_hdrs_size, flash_optype,
96c9b2e4 4768 &crc_match);
4c60005f
KA
4769 if (base_status(status) == MCC_STATUS_ILLEGAL_REQUEST ||
4770 base_status(status) == MCC_STATUS_ILLEGAL_FIELD) {
70a7b525
VV
4771 /* The current FW image on the card does not support
4772 * OFFSET based flashing. Retry using older mechanism
4773 * of OPTYPE based flashing
4774 */
4775 if (flash_optype == OPTYPE_OFFSET_SPECIFIED) {
4776 flash_offset_support = false;
4777 goto retry_flash;
4778 }
4779
4780 /* The current FW image on the card does not recognize
4781 * the new FLASH op_type. The FW download is partially
4782 * complete. Reboot the server now to enable FW image
4783 * to recognize the new FLASH op_type. To complete the
4784 * remaining process, download the same FW again after
4785 * the reboot.
4786 */
96c9b2e4
VV
4787 dev_err(dev, "Flash incomplete. Reset the server\n");
4788 dev_err(dev, "Download FW image again after reset\n");
4789 return -EAGAIN;
4790 } else if (status) {
4791 dev_err(dev, "Could not get CRC for 0x%x region\n",
4792 img_optype);
4793 return -EFAULT;
773a2d7c
PR
4794 }
4795
96c9b2e4
VV
4796 if (crc_match)
4797 continue;
773a2d7c 4798
96c9b2e4
VV
4799flash:
4800 p = fw->data + filehdr_size + img_offset + img_hdrs_size;
773a2d7c
PR
4801 if (p + img_size > fw->data + fw->size)
4802 return -1;
4803
70a7b525
VV
4804 status = be_flash(adapter, p, flash_cmd, flash_optype, img_size,
4805 img_offset);
4806
4807 /* The current FW image on the card does not support OFFSET
4808 * based flashing. Retry using older mechanism of OPTYPE based
4809 * flashing
4810 */
4811 if (base_status(status) == MCC_STATUS_ILLEGAL_FIELD &&
4812 flash_optype == OPTYPE_OFFSET_SPECIFIED) {
4813 flash_offset_support = false;
4814 goto retry_flash;
4815 }
4816
96c9b2e4
VV
4817 /* For old FW images ignore ILLEGAL_FIELD error or errors on
4818 * UFI_DIR region
4819 */
4c60005f
KA
4820 if (old_fw_img &&
4821 (base_status(status) == MCC_STATUS_ILLEGAL_FIELD ||
4822 (img_optype == OPTYPE_UFI_DIR &&
4823 base_status(status) == MCC_STATUS_FAILED))) {
96c9b2e4
VV
4824 continue;
4825 } else if (status) {
4826 dev_err(dev, "Flashing section type 0x%x failed\n",
4827 img_type);
4828 return -EFAULT;
773a2d7c
PR
4829 }
4830 }
4831 return 0;
3f0d4560
AK
4832}
4833
485bf569 4834static int lancer_fw_download(struct be_adapter *adapter,
748b539a 4835 const struct firmware *fw)
84517482 4836{
485bf569
SN
4837#define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024)
4838#define LANCER_FW_DOWNLOAD_LOCATION "/prg"
bb864e07 4839 struct device *dev = &adapter->pdev->dev;
84517482 4840 struct be_dma_mem flash_cmd;
485bf569
SN
4841 const u8 *data_ptr = NULL;
4842 u8 *dest_image_ptr = NULL;
4843 size_t image_size = 0;
4844 u32 chunk_size = 0;
4845 u32 data_written = 0;
4846 u32 offset = 0;
4847 int status = 0;
4848 u8 add_status = 0;
f67ef7ba 4849 u8 change_status;
84517482 4850
485bf569 4851 if (!IS_ALIGNED(fw->size, sizeof(u32))) {
bb864e07 4852 dev_err(dev, "FW image size should be multiple of 4\n");
3fb8cb80 4853 return -EINVAL;
d9efd2af
SB
4854 }
4855
485bf569
SN
4856 flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
4857 + LANCER_FW_DOWNLOAD_CHUNK;
e51000db
SB
4858 flash_cmd.va = dma_zalloc_coherent(dev, flash_cmd.size,
4859 &flash_cmd.dma, GFP_KERNEL);
3fb8cb80
KA
4860 if (!flash_cmd.va)
4861 return -ENOMEM;
84517482 4862
485bf569
SN
4863 dest_image_ptr = flash_cmd.va +
4864 sizeof(struct lancer_cmd_req_write_object);
4865 image_size = fw->size;
4866 data_ptr = fw->data;
4867
4868 while (image_size) {
4869 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
4870
4871 /* Copy the image chunk content. */
4872 memcpy(dest_image_ptr, data_ptr, chunk_size);
4873
4874 status = lancer_cmd_write_object(adapter, &flash_cmd,
f67ef7ba
PR
4875 chunk_size, offset,
4876 LANCER_FW_DOWNLOAD_LOCATION,
4877 &data_written, &change_status,
4878 &add_status);
485bf569
SN
4879 if (status)
4880 break;
4881
4882 offset += data_written;
4883 data_ptr += data_written;
4884 image_size -= data_written;
4885 }
4886
4887 if (!status) {
4888 /* Commit the FW written */
4889 status = lancer_cmd_write_object(adapter, &flash_cmd,
f67ef7ba
PR
4890 0, offset,
4891 LANCER_FW_DOWNLOAD_LOCATION,
4892 &data_written, &change_status,
4893 &add_status);
485bf569
SN
4894 }
4895
bb864e07 4896 dma_free_coherent(dev, flash_cmd.size, flash_cmd.va, flash_cmd.dma);
485bf569 4897 if (status) {
bb864e07 4898 dev_err(dev, "Firmware load error\n");
3fb8cb80 4899 return be_cmd_status(status);
485bf569
SN
4900 }
4901
bb864e07
KA
4902 dev_info(dev, "Firmware flashed successfully\n");
4903
f67ef7ba 4904 if (change_status == LANCER_FW_RESET_NEEDED) {
bb864e07 4905 dev_info(dev, "Resetting adapter to activate new FW\n");
5c510811
SK
4906 status = lancer_physdev_ctrl(adapter,
4907 PHYSDEV_CONTROL_FW_RESET_MASK);
f67ef7ba 4908 if (status) {
bb864e07
KA
4909 dev_err(dev, "Adapter busy, could not reset FW\n");
4910 dev_err(dev, "Reboot server to activate new FW\n");
f67ef7ba
PR
4911 }
4912 } else if (change_status != LANCER_NO_RESET_NEEDED) {
bb864e07 4913 dev_info(dev, "Reboot server to activate new FW\n");
f67ef7ba 4914 }
3fb8cb80
KA
4915
4916 return 0;
485bf569
SN
4917}
4918
a6e6ff6e
VV
4919/* Check if the flash image file is compatible with the adapter that
4920 * is being flashed.
4921 */
4922static bool be_check_ufi_compatibility(struct be_adapter *adapter,
4923 struct flash_file_hdr_g3 *fhdr)
773a2d7c 4924{
5d3acd0d
VV
4925 if (!fhdr) {
4926 dev_err(&adapter->pdev->dev, "Invalid FW UFI file");
4927 return -1;
4928 }
773a2d7c 4929
5d3acd0d
VV
4930 /* First letter of the build version is used to identify
4931 * which chip this image file is meant for.
4932 */
4933 switch (fhdr->build[0]) {
4934 case BLD_STR_UFI_TYPE_SH:
a6e6ff6e
VV
4935 if (!skyhawk_chip(adapter))
4936 return false;
4937 break;
5d3acd0d 4938 case BLD_STR_UFI_TYPE_BE3:
a6e6ff6e
VV
4939 if (!BE3_chip(adapter))
4940 return false;
4941 break;
5d3acd0d 4942 case BLD_STR_UFI_TYPE_BE2:
a6e6ff6e
VV
4943 if (!BE2_chip(adapter))
4944 return false;
4945 break;
5d3acd0d
VV
4946 default:
4947 return false;
4948 }
a6e6ff6e
VV
4949
4950 return (fhdr->asic_type_rev >= adapter->asic_rev);
773a2d7c
PR
4951}
4952
485bf569
SN
4953static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
4954{
5d3acd0d 4955 struct device *dev = &adapter->pdev->dev;
485bf569 4956 struct flash_file_hdr_g3 *fhdr3;
5d3acd0d
VV
4957 struct image_hdr *img_hdr_ptr;
4958 int status = 0, i, num_imgs;
485bf569 4959 struct be_dma_mem flash_cmd;
84517482 4960
5d3acd0d
VV
4961 fhdr3 = (struct flash_file_hdr_g3 *)fw->data;
4962 if (!be_check_ufi_compatibility(adapter, fhdr3)) {
4963 dev_err(dev, "Flash image is not compatible with adapter\n");
4964 return -EINVAL;
84517482
AK
4965 }
4966
5d3acd0d 4967 flash_cmd.size = sizeof(struct be_cmd_write_flashrom);
e51000db
SB
4968 flash_cmd.va = dma_zalloc_coherent(dev, flash_cmd.size, &flash_cmd.dma,
4969 GFP_KERNEL);
5d3acd0d
VV
4970 if (!flash_cmd.va)
4971 return -ENOMEM;
773a2d7c 4972
773a2d7c
PR
4973 num_imgs = le32_to_cpu(fhdr3->num_imgs);
4974 for (i = 0; i < num_imgs; i++) {
4975 img_hdr_ptr = (struct image_hdr *)(fw->data +
4976 (sizeof(struct flash_file_hdr_g3) +
4977 i * sizeof(struct image_hdr)));
5d3acd0d
VV
4978 if (!BE2_chip(adapter) &&
4979 le32_to_cpu(img_hdr_ptr->imageid) != 1)
4980 continue;
84517482 4981
5d3acd0d
VV
4982 if (skyhawk_chip(adapter))
4983 status = be_flash_skyhawk(adapter, fw, &flash_cmd,
4984 num_imgs);
4985 else
4986 status = be_flash_BEx(adapter, fw, &flash_cmd,
4987 num_imgs);
84517482
AK
4988 }
4989
5d3acd0d
VV
4990 dma_free_coherent(dev, flash_cmd.size, flash_cmd.va, flash_cmd.dma);
4991 if (!status)
4992 dev_info(dev, "Firmware flashed successfully\n");
84517482 4993
485bf569
SN
4994 return status;
4995}
4996
4997int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
4998{
4999 const struct firmware *fw;
5000 int status;
5001
5002 if (!netif_running(adapter->netdev)) {
5003 dev_err(&adapter->pdev->dev,
5004 "Firmware load not allowed (interface is down)\n");
940a3fcd 5005 return -ENETDOWN;
485bf569
SN
5006 }
5007
5008 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
5009 if (status)
5010 goto fw_exit;
5011
5012 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
5013
5014 if (lancer_chip(adapter))
5015 status = lancer_fw_download(adapter, fw);
5016 else
5017 status = be_fw_download(adapter, fw);
5018
eeb65ced 5019 if (!status)
e97e3cda 5020 be_cmd_get_fw_ver(adapter);
eeb65ced 5021
84517482
AK
5022fw_exit:
5023 release_firmware(fw);
5024 return status;
5025}
5026
add511b3
RP
5027static int be_ndo_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
5028 u16 flags)
a77dcb8c
AK
5029{
5030 struct be_adapter *adapter = netdev_priv(dev);
5031 struct nlattr *attr, *br_spec;
5032 int rem;
5033 int status = 0;
5034 u16 mode = 0;
5035
5036 if (!sriov_enabled(adapter))
5037 return -EOPNOTSUPP;
5038
5039 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
4ea85e83
TG
5040 if (!br_spec)
5041 return -EINVAL;
a77dcb8c
AK
5042
5043 nla_for_each_nested(attr, br_spec, rem) {
5044 if (nla_type(attr) != IFLA_BRIDGE_MODE)
5045 continue;
5046
b7c1a314
TG
5047 if (nla_len(attr) < sizeof(mode))
5048 return -EINVAL;
5049
a77dcb8c
AK
5050 mode = nla_get_u16(attr);
5051 if (mode != BRIDGE_MODE_VEPA && mode != BRIDGE_MODE_VEB)
5052 return -EINVAL;
5053
5054 status = be_cmd_set_hsw_config(adapter, 0, 0,
5055 adapter->if_handle,
5056 mode == BRIDGE_MODE_VEPA ?
5057 PORT_FWD_TYPE_VEPA :
e7bcbd7b 5058 PORT_FWD_TYPE_VEB, 0);
a77dcb8c
AK
5059 if (status)
5060 goto err;
5061
5062 dev_info(&adapter->pdev->dev, "enabled switch mode: %s\n",
5063 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
5064
5065 return status;
5066 }
5067err:
5068 dev_err(&adapter->pdev->dev, "Failed to set switch mode %s\n",
5069 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
5070
5071 return status;
5072}
5073
5074static int be_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
46c264da
ND
5075 struct net_device *dev, u32 filter_mask,
5076 int nlflags)
a77dcb8c
AK
5077{
5078 struct be_adapter *adapter = netdev_priv(dev);
5079 int status = 0;
5080 u8 hsw_mode;
5081
5082 if (!sriov_enabled(adapter))
5083 return 0;
5084
5085 /* BE and Lancer chips support VEB mode only */
5086 if (BEx_chip(adapter) || lancer_chip(adapter)) {
5087 hsw_mode = PORT_FWD_TYPE_VEB;
5088 } else {
5089 status = be_cmd_get_hsw_config(adapter, NULL, 0,
e7bcbd7b
KA
5090 adapter->if_handle, &hsw_mode,
5091 NULL);
a77dcb8c
AK
5092 if (status)
5093 return 0;
5094 }
5095
5096 return ndo_dflt_bridge_getlink(skb, pid, seq, dev,
5097 hsw_mode == PORT_FWD_TYPE_VEPA ?
2c3c031c 5098 BRIDGE_MODE_VEPA : BRIDGE_MODE_VEB,
7d4f8d87 5099 0, 0, nlflags, filter_mask, NULL);
a77dcb8c
AK
5100}
5101
c5abe7c0 5102#ifdef CONFIG_BE2NET_VXLAN
630f4b70
SB
5103/* VxLAN offload Notes:
5104 *
5105 * The stack defines tunnel offload flags (hw_enc_features) for IP and doesn't
5106 * distinguish various types of transports (VxLAN, GRE, NVGRE ..). So, offload
5107 * is expected to work across all types of IP tunnels once exported. Skyhawk
5108 * supports offloads for either VxLAN or NVGRE, exclusively. So we export VxLAN
16dde0d6
SB
5109 * offloads in hw_enc_features only when a VxLAN port is added. If other (non
5110 * VxLAN) tunnels are configured while VxLAN offloads are enabled, offloads for
5111 * those other tunnels are unexported on the fly through ndo_features_check().
630f4b70
SB
5112 *
5113 * Skyhawk supports VxLAN offloads only for one UDP dport. So, if the stack
5114 * adds more than one port, disable offloads and don't re-enable them again
5115 * until after all the tunnels are removed.
5116 */
c9c47142
SP
5117static void be_add_vxlan_port(struct net_device *netdev, sa_family_t sa_family,
5118 __be16 port)
5119{
5120 struct be_adapter *adapter = netdev_priv(netdev);
5121 struct device *dev = &adapter->pdev->dev;
5122 int status;
5123
5124 if (lancer_chip(adapter) || BEx_chip(adapter))
5125 return;
5126
5127 if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS) {
c9c47142
SP
5128 dev_info(dev,
5129 "Only one UDP port supported for VxLAN offloads\n");
630f4b70
SB
5130 dev_info(dev, "Disabling VxLAN offloads\n");
5131 adapter->vxlan_port_count++;
5132 goto err;
c9c47142
SP
5133 }
5134
630f4b70
SB
5135 if (adapter->vxlan_port_count++ >= 1)
5136 return;
5137
c9c47142
SP
5138 status = be_cmd_manage_iface(adapter, adapter->if_handle,
5139 OP_CONVERT_NORMAL_TO_TUNNEL);
5140 if (status) {
5141 dev_warn(dev, "Failed to convert normal interface to tunnel\n");
5142 goto err;
5143 }
5144
5145 status = be_cmd_set_vxlan_port(adapter, port);
5146 if (status) {
5147 dev_warn(dev, "Failed to add VxLAN port\n");
5148 goto err;
5149 }
5150 adapter->flags |= BE_FLAGS_VXLAN_OFFLOADS;
5151 adapter->vxlan_port = port;
5152
630f4b70
SB
5153 netdev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
5154 NETIF_F_TSO | NETIF_F_TSO6 |
5155 NETIF_F_GSO_UDP_TUNNEL;
5156 netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL;
ac9a3d84 5157 netdev->features |= NETIF_F_GSO_UDP_TUNNEL;
630f4b70 5158
c9c47142
SP
5159 dev_info(dev, "Enabled VxLAN offloads for UDP port %d\n",
5160 be16_to_cpu(port));
5161 return;
5162err:
5163 be_disable_vxlan_offloads(adapter);
c9c47142
SP
5164}
5165
5166static void be_del_vxlan_port(struct net_device *netdev, sa_family_t sa_family,
5167 __be16 port)
5168{
5169 struct be_adapter *adapter = netdev_priv(netdev);
5170
5171 if (lancer_chip(adapter) || BEx_chip(adapter))
5172 return;
5173
5174 if (adapter->vxlan_port != port)
630f4b70 5175 goto done;
c9c47142
SP
5176
5177 be_disable_vxlan_offloads(adapter);
5178
5179 dev_info(&adapter->pdev->dev,
5180 "Disabled VxLAN offloads for UDP port %d\n",
5181 be16_to_cpu(port));
630f4b70
SB
5182done:
5183 adapter->vxlan_port_count--;
c9c47142 5184}
725d548f 5185
5f35227e
JG
5186static netdev_features_t be_features_check(struct sk_buff *skb,
5187 struct net_device *dev,
5188 netdev_features_t features)
725d548f 5189{
16dde0d6
SB
5190 struct be_adapter *adapter = netdev_priv(dev);
5191 u8 l4_hdr = 0;
5192
5193 /* The code below restricts offload features for some tunneled packets.
5194 * Offload features for normal (non tunnel) packets are unchanged.
5195 */
5196 if (!skb->encapsulation ||
5197 !(adapter->flags & BE_FLAGS_VXLAN_OFFLOADS))
5198 return features;
5199
5200 /* It's an encapsulated packet and VxLAN offloads are enabled. We
5201 * should disable tunnel offload features if it's not a VxLAN packet,
5202 * as tunnel offloads have been enabled only for VxLAN. This is done to
5203 * allow other tunneled traffic like GRE work fine while VxLAN
5204 * offloads are configured in Skyhawk-R.
5205 */
5206 switch (vlan_get_protocol(skb)) {
5207 case htons(ETH_P_IP):
5208 l4_hdr = ip_hdr(skb)->protocol;
5209 break;
5210 case htons(ETH_P_IPV6):
5211 l4_hdr = ipv6_hdr(skb)->nexthdr;
5212 break;
5213 default:
5214 return features;
5215 }
5216
5217 if (l4_hdr != IPPROTO_UDP ||
5218 skb->inner_protocol_type != ENCAP_TYPE_ETHER ||
5219 skb->inner_protocol != htons(ETH_P_TEB) ||
5220 skb_inner_mac_header(skb) - skb_transport_header(skb) !=
5221 sizeof(struct udphdr) + sizeof(struct vxlanhdr))
5222 return features & ~(NETIF_F_ALL_CSUM | NETIF_F_GSO_MASK);
5223
5224 return features;
725d548f 5225}
c5abe7c0 5226#endif
c9c47142 5227
e5686ad8 5228static const struct net_device_ops be_netdev_ops = {
6b7c5b94
SP
5229 .ndo_open = be_open,
5230 .ndo_stop = be_close,
5231 .ndo_start_xmit = be_xmit,
a54769f5 5232 .ndo_set_rx_mode = be_set_rx_mode,
6b7c5b94
SP
5233 .ndo_set_mac_address = be_mac_addr_set,
5234 .ndo_change_mtu = be_change_mtu,
ab1594e9 5235 .ndo_get_stats64 = be_get_stats64,
6b7c5b94 5236 .ndo_validate_addr = eth_validate_addr,
6b7c5b94
SP
5237 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
5238 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
64600ea5 5239 .ndo_set_vf_mac = be_set_vf_mac,
1da87b7f 5240 .ndo_set_vf_vlan = be_set_vf_vlan,
ed616689 5241 .ndo_set_vf_rate = be_set_vf_tx_rate,
66268739 5242 .ndo_get_vf_config = be_get_vf_config,
bdce2ad7 5243 .ndo_set_vf_link_state = be_set_vf_link_state,
e7bcbd7b 5244 .ndo_set_vf_spoofchk = be_set_vf_spoofchk,
66268739
IV
5245#ifdef CONFIG_NET_POLL_CONTROLLER
5246 .ndo_poll_controller = be_netpoll,
5247#endif
a77dcb8c
AK
5248 .ndo_bridge_setlink = be_ndo_bridge_setlink,
5249 .ndo_bridge_getlink = be_ndo_bridge_getlink,
6384a4d0 5250#ifdef CONFIG_NET_RX_BUSY_POLL
c9c47142 5251 .ndo_busy_poll = be_busy_poll,
6384a4d0 5252#endif
c5abe7c0 5253#ifdef CONFIG_BE2NET_VXLAN
c9c47142
SP
5254 .ndo_add_vxlan_port = be_add_vxlan_port,
5255 .ndo_del_vxlan_port = be_del_vxlan_port,
5f35227e 5256 .ndo_features_check = be_features_check,
c5abe7c0 5257#endif
6b7c5b94
SP
5258};
5259
5260static void be_netdev_init(struct net_device *netdev)
5261{
5262 struct be_adapter *adapter = netdev_priv(netdev);
5263
6332c8d3 5264 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
8b8ddc68 5265 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
f646968f 5266 NETIF_F_HW_VLAN_CTAG_TX;
8b8ddc68
MM
5267 if (be_multi_rxq(adapter))
5268 netdev->hw_features |= NETIF_F_RXHASH;
6332c8d3
MM
5269
5270 netdev->features |= netdev->hw_features |
f646968f 5271 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER;
4b972914 5272
eb8a50d9 5273 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
79032644 5274 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
51c59870 5275
fbc13f01
AK
5276 netdev->priv_flags |= IFF_UNICAST_FLT;
5277
6b7c5b94
SP
5278 netdev->flags |= IFF_MULTICAST;
5279
b7e5887e 5280 netif_set_gso_max_size(netdev, 65535 - ETH_HLEN);
c190e3c8 5281
10ef9ab4 5282 netdev->netdev_ops = &be_netdev_ops;
6b7c5b94 5283
7ad24ea4 5284 netdev->ethtool_ops = &be_ethtool_ops;
6b7c5b94
SP
5285}
5286
87ac1a52
KA
5287static void be_cleanup(struct be_adapter *adapter)
5288{
5289 struct net_device *netdev = adapter->netdev;
5290
5291 rtnl_lock();
5292 netif_device_detach(netdev);
5293 if (netif_running(netdev))
5294 be_close(netdev);
5295 rtnl_unlock();
5296
5297 be_clear(adapter);
5298}
5299
484d76fd 5300static int be_resume(struct be_adapter *adapter)
78fad34e 5301{
d0e1b319 5302 struct net_device *netdev = adapter->netdev;
78fad34e
SP
5303 int status;
5304
78fad34e
SP
5305 status = be_setup(adapter);
5306 if (status)
484d76fd 5307 return status;
78fad34e 5308
d0e1b319
KA
5309 if (netif_running(netdev)) {
5310 status = be_open(netdev);
78fad34e 5311 if (status)
484d76fd 5312 return status;
78fad34e
SP
5313 }
5314
d0e1b319
KA
5315 netif_device_attach(netdev);
5316
484d76fd
KA
5317 return 0;
5318}
5319
5320static int be_err_recover(struct be_adapter *adapter)
5321{
5322 struct device *dev = &adapter->pdev->dev;
5323 int status;
5324
5325 status = be_resume(adapter);
5326 if (status)
5327 goto err;
5328
9fa465c0 5329 dev_info(dev, "Adapter recovery successful\n");
78fad34e
SP
5330 return 0;
5331err:
9fa465c0 5332 if (be_physfn(adapter))
78fad34e 5333 dev_err(dev, "Adapter recovery failed\n");
9fa465c0
SP
5334 else
5335 dev_err(dev, "Re-trying adapter recovery\n");
78fad34e
SP
5336
5337 return status;
5338}
5339
eb7dd46c 5340static void be_err_detection_task(struct work_struct *work)
78fad34e
SP
5341{
5342 struct be_adapter *adapter =
eb7dd46c
SP
5343 container_of(work, struct be_adapter,
5344 be_err_detection_work.work);
78fad34e
SP
5345 int status = 0;
5346
5347 be_detect_error(adapter);
5348
954f6825 5349 if (be_check_error(adapter, BE_ERROR_HW)) {
87ac1a52 5350 be_cleanup(adapter);
d0e1b319
KA
5351
5352 /* As of now error recovery support is in Lancer only */
5353 if (lancer_chip(adapter))
5354 status = be_err_recover(adapter);
78fad34e
SP
5355 }
5356
9fa465c0
SP
5357 /* Always attempt recovery on VFs */
5358 if (!status || be_virtfn(adapter))
eb7dd46c 5359 be_schedule_err_detection(adapter);
78fad34e
SP
5360}
5361
5362static void be_log_sfp_info(struct be_adapter *adapter)
5363{
5364 int status;
5365
5366 status = be_cmd_query_sfp_info(adapter);
5367 if (!status) {
5368 dev_err(&adapter->pdev->dev,
5369 "Unqualified SFP+ detected on %c from %s part no: %s",
5370 adapter->port_name, adapter->phy.vendor_name,
5371 adapter->phy.vendor_pn);
5372 }
5373 adapter->flags &= ~BE_FLAGS_EVT_INCOMPATIBLE_SFP;
5374}
5375
5376static void be_worker(struct work_struct *work)
5377{
5378 struct be_adapter *adapter =
5379 container_of(work, struct be_adapter, work.work);
5380 struct be_rx_obj *rxo;
5381 int i;
5382
5383 /* when interrupts are not yet enabled, just reap any pending
5384 * mcc completions
5385 */
5386 if (!netif_running(adapter->netdev)) {
5387 local_bh_disable();
5388 be_process_mcc(adapter);
5389 local_bh_enable();
5390 goto reschedule;
5391 }
5392
5393 if (!adapter->stats_cmd_sent) {
5394 if (lancer_chip(adapter))
5395 lancer_cmd_get_pport_stats(adapter,
5396 &adapter->stats_cmd);
5397 else
5398 be_cmd_get_stats(adapter, &adapter->stats_cmd);
5399 }
5400
5401 if (be_physfn(adapter) &&
5402 MODULO(adapter->work_counter, adapter->be_get_temp_freq) == 0)
5403 be_cmd_get_die_temperature(adapter);
5404
5405 for_all_rx_queues(adapter, rxo, i) {
5406 /* Replenish RX-queues starved due to memory
5407 * allocation failures.
5408 */
5409 if (rxo->rx_post_starved)
5410 be_post_rx_frags(rxo, GFP_KERNEL, MAX_RX_POST);
5411 }
5412
20947770
PR
5413 /* EQ-delay update for Skyhawk is done while notifying EQ */
5414 if (!skyhawk_chip(adapter))
5415 be_eqd_update(adapter, false);
78fad34e
SP
5416
5417 if (adapter->flags & BE_FLAGS_EVT_INCOMPATIBLE_SFP)
5418 be_log_sfp_info(adapter);
5419
5420reschedule:
5421 adapter->work_counter++;
5422 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
5423}
5424
6b7c5b94
SP
5425static void be_unmap_pci_bars(struct be_adapter *adapter)
5426{
c5b3ad4c
SP
5427 if (adapter->csr)
5428 pci_iounmap(adapter->pdev, adapter->csr);
8788fdc2 5429 if (adapter->db)
ce66f781 5430 pci_iounmap(adapter->pdev, adapter->db);
045508a8
PP
5431}
5432
ce66f781
SP
5433static int db_bar(struct be_adapter *adapter)
5434{
18c57c74 5435 if (lancer_chip(adapter) || be_virtfn(adapter))
ce66f781
SP
5436 return 0;
5437 else
5438 return 4;
5439}
5440
5441static int be_roce_map_pci_bars(struct be_adapter *adapter)
045508a8 5442{
dbf0f2a7 5443 if (skyhawk_chip(adapter)) {
ce66f781
SP
5444 adapter->roce_db.size = 4096;
5445 adapter->roce_db.io_addr = pci_resource_start(adapter->pdev,
5446 db_bar(adapter));
5447 adapter->roce_db.total_size = pci_resource_len(adapter->pdev,
5448 db_bar(adapter));
5449 }
045508a8 5450 return 0;
6b7c5b94
SP
5451}
5452
5453static int be_map_pci_bars(struct be_adapter *adapter)
5454{
0fa74a4b 5455 struct pci_dev *pdev = adapter->pdev;
6b7c5b94 5456 u8 __iomem *addr;
78fad34e
SP
5457 u32 sli_intf;
5458
5459 pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
5460 adapter->sli_family = (sli_intf & SLI_INTF_FAMILY_MASK) >>
5461 SLI_INTF_FAMILY_SHIFT;
5462 adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
fe6d2a38 5463
c5b3ad4c 5464 if (BEx_chip(adapter) && be_physfn(adapter)) {
0fa74a4b 5465 adapter->csr = pci_iomap(pdev, 2, 0);
ddf1169f 5466 if (!adapter->csr)
c5b3ad4c
SP
5467 return -ENOMEM;
5468 }
5469
25848c90 5470 addr = pci_iomap(pdev, db_bar(adapter), 0);
ddf1169f 5471 if (!addr)
6b7c5b94 5472 goto pci_map_err;
ba343c77 5473 adapter->db = addr;
ce66f781 5474
25848c90
SR
5475 if (skyhawk_chip(adapter) || BEx_chip(adapter)) {
5476 if (be_physfn(adapter)) {
5477 /* PCICFG is the 2nd BAR in BE2 */
5478 addr = pci_iomap(pdev, BE2_chip(adapter) ? 1 : 0, 0);
5479 if (!addr)
5480 goto pci_map_err;
5481 adapter->pcicfg = addr;
5482 } else {
5483 adapter->pcicfg = adapter->db + SRIOV_VF_PCICFG_OFFSET;
5484 }
5485 }
5486
ce66f781 5487 be_roce_map_pci_bars(adapter);
6b7c5b94 5488 return 0;
ce66f781 5489
6b7c5b94 5490pci_map_err:
25848c90 5491 dev_err(&pdev->dev, "Error in mapping PCI BARs\n");
6b7c5b94
SP
5492 be_unmap_pci_bars(adapter);
5493 return -ENOMEM;
5494}
5495
78fad34e 5496static void be_drv_cleanup(struct be_adapter *adapter)
6b7c5b94 5497{
8788fdc2 5498 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
78fad34e 5499 struct device *dev = &adapter->pdev->dev;
6b7c5b94
SP
5500
5501 if (mem->va)
78fad34e 5502 dma_free_coherent(dev, mem->size, mem->va, mem->dma);
e7b909a6 5503
5b8821b7 5504 mem = &adapter->rx_filter;
e7b909a6 5505 if (mem->va)
78fad34e
SP
5506 dma_free_coherent(dev, mem->size, mem->va, mem->dma);
5507
5508 mem = &adapter->stats_cmd;
5509 if (mem->va)
5510 dma_free_coherent(dev, mem->size, mem->va, mem->dma);
6b7c5b94
SP
5511}
5512
78fad34e
SP
5513/* Allocate and initialize various fields in be_adapter struct */
5514static int be_drv_init(struct be_adapter *adapter)
6b7c5b94 5515{
8788fdc2
SP
5516 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
5517 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
5b8821b7 5518 struct be_dma_mem *rx_filter = &adapter->rx_filter;
78fad34e
SP
5519 struct be_dma_mem *stats_cmd = &adapter->stats_cmd;
5520 struct device *dev = &adapter->pdev->dev;
5521 int status = 0;
6b7c5b94
SP
5522
5523 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
e51000db
SB
5524 mbox_mem_alloc->va = dma_zalloc_coherent(dev, mbox_mem_alloc->size,
5525 &mbox_mem_alloc->dma,
5526 GFP_KERNEL);
78fad34e
SP
5527 if (!mbox_mem_alloc->va)
5528 return -ENOMEM;
5529
6b7c5b94
SP
5530 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
5531 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
5532 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
e7b909a6 5533
5b8821b7 5534 rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
78fad34e
SP
5535 rx_filter->va = dma_zalloc_coherent(dev, rx_filter->size,
5536 &rx_filter->dma, GFP_KERNEL);
ddf1169f 5537 if (!rx_filter->va) {
e7b909a6
SP
5538 status = -ENOMEM;
5539 goto free_mbox;
5540 }
1f9061d2 5541
78fad34e
SP
5542 if (lancer_chip(adapter))
5543 stats_cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
5544 else if (BE2_chip(adapter))
5545 stats_cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
5546 else if (BE3_chip(adapter))
5547 stats_cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
5548 else
5549 stats_cmd->size = sizeof(struct be_cmd_req_get_stats_v2);
5550 stats_cmd->va = dma_zalloc_coherent(dev, stats_cmd->size,
5551 &stats_cmd->dma, GFP_KERNEL);
5552 if (!stats_cmd->va) {
5553 status = -ENOMEM;
5554 goto free_rx_filter;
5555 }
5556
2984961c 5557 mutex_init(&adapter->mbox_lock);
8788fdc2
SP
5558 spin_lock_init(&adapter->mcc_lock);
5559 spin_lock_init(&adapter->mcc_cq_lock);
5eeff635 5560 init_completion(&adapter->et_cmd_compl);
e7b909a6 5561
78fad34e 5562 pci_save_state(adapter->pdev);
6b7c5b94 5563
78fad34e 5564 INIT_DELAYED_WORK(&adapter->work, be_worker);
eb7dd46c
SP
5565 INIT_DELAYED_WORK(&adapter->be_err_detection_work,
5566 be_err_detection_task);
6b7c5b94 5567
78fad34e
SP
5568 adapter->rx_fc = true;
5569 adapter->tx_fc = true;
6b7c5b94 5570
78fad34e
SP
5571 /* Must be a power of 2 or else MODULO will BUG_ON */
5572 adapter->be_get_temp_freq = 64;
ca34fe38 5573
6b7c5b94 5574 return 0;
78fad34e
SP
5575
5576free_rx_filter:
5577 dma_free_coherent(dev, rx_filter->size, rx_filter->va, rx_filter->dma);
5578free_mbox:
5579 dma_free_coherent(dev, mbox_mem_alloc->size, mbox_mem_alloc->va,
5580 mbox_mem_alloc->dma);
5581 return status;
6b7c5b94
SP
5582}
5583
3bc6b06c 5584static void be_remove(struct pci_dev *pdev)
6b7c5b94
SP
5585{
5586 struct be_adapter *adapter = pci_get_drvdata(pdev);
8d56ff11 5587
6b7c5b94
SP
5588 if (!adapter)
5589 return;
5590
045508a8 5591 be_roce_dev_remove(adapter);
8cef7a78 5592 be_intr_set(adapter, false);
045508a8 5593
eb7dd46c 5594 be_cancel_err_detection(adapter);
f67ef7ba 5595
6b7c5b94
SP
5596 unregister_netdev(adapter->netdev);
5597
5fb379ee
SP
5598 be_clear(adapter);
5599
bf99e50d
PR
5600 /* tell fw we're done with firing cmds */
5601 be_cmd_fw_clean(adapter);
5602
78fad34e
SP
5603 be_unmap_pci_bars(adapter);
5604 be_drv_cleanup(adapter);
6b7c5b94 5605
d6b6d987
SP
5606 pci_disable_pcie_error_reporting(pdev);
5607
6b7c5b94
SP
5608 pci_release_regions(pdev);
5609 pci_disable_device(pdev);
5610
5611 free_netdev(adapter->netdev);
5612}
5613
9a03259c
AB
5614static ssize_t be_hwmon_show_temp(struct device *dev,
5615 struct device_attribute *dev_attr,
5616 char *buf)
29e9122b
VD
5617{
5618 struct be_adapter *adapter = dev_get_drvdata(dev);
5619
5620 /* Unit: millidegree Celsius */
5621 if (adapter->hwmon_info.be_on_die_temp == BE_INVALID_DIE_TEMP)
5622 return -EIO;
5623 else
5624 return sprintf(buf, "%u\n",
5625 adapter->hwmon_info.be_on_die_temp * 1000);
5626}
5627
5628static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO,
5629 be_hwmon_show_temp, NULL, 1);
5630
5631static struct attribute *be_hwmon_attrs[] = {
5632 &sensor_dev_attr_temp1_input.dev_attr.attr,
5633 NULL
5634};
5635
5636ATTRIBUTE_GROUPS(be_hwmon);
5637
d379142b
SP
5638static char *mc_name(struct be_adapter *adapter)
5639{
f93f160b
VV
5640 char *str = ""; /* default */
5641
5642 switch (adapter->mc_type) {
5643 case UMC:
5644 str = "UMC";
5645 break;
5646 case FLEX10:
5647 str = "FLEX10";
5648 break;
5649 case vNIC1:
5650 str = "vNIC-1";
5651 break;
5652 case nPAR:
5653 str = "nPAR";
5654 break;
5655 case UFP:
5656 str = "UFP";
5657 break;
5658 case vNIC2:
5659 str = "vNIC-2";
5660 break;
5661 default:
5662 str = "";
5663 }
5664
5665 return str;
d379142b
SP
5666}
5667
5668static inline char *func_name(struct be_adapter *adapter)
5669{
5670 return be_physfn(adapter) ? "PF" : "VF";
5671}
5672
f7062ee5
SP
5673static inline char *nic_name(struct pci_dev *pdev)
5674{
5675 switch (pdev->device) {
5676 case OC_DEVICE_ID1:
5677 return OC_NAME;
5678 case OC_DEVICE_ID2:
5679 return OC_NAME_BE;
5680 case OC_DEVICE_ID3:
5681 case OC_DEVICE_ID4:
5682 return OC_NAME_LANCER;
5683 case BE_DEVICE_ID2:
5684 return BE3_NAME;
5685 case OC_DEVICE_ID5:
5686 case OC_DEVICE_ID6:
5687 return OC_NAME_SH;
5688 default:
5689 return BE_NAME;
5690 }
5691}
5692
1dd06ae8 5693static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id)
6b7c5b94 5694{
6b7c5b94
SP
5695 struct be_adapter *adapter;
5696 struct net_device *netdev;
21252377 5697 int status = 0;
6b7c5b94 5698
acbafeb1
SP
5699 dev_info(&pdev->dev, "%s version is %s\n", DRV_NAME, DRV_VER);
5700
6b7c5b94
SP
5701 status = pci_enable_device(pdev);
5702 if (status)
5703 goto do_none;
5704
5705 status = pci_request_regions(pdev, DRV_NAME);
5706 if (status)
5707 goto disable_dev;
5708 pci_set_master(pdev);
5709
7f640062 5710 netdev = alloc_etherdev_mqs(sizeof(*adapter), MAX_TX_QS, MAX_RX_QS);
ddf1169f 5711 if (!netdev) {
6b7c5b94
SP
5712 status = -ENOMEM;
5713 goto rel_reg;
5714 }
5715 adapter = netdev_priv(netdev);
5716 adapter->pdev = pdev;
5717 pci_set_drvdata(pdev, adapter);
5718 adapter->netdev = netdev;
2243e2e9 5719 SET_NETDEV_DEV(netdev, &pdev->dev);
6b7c5b94 5720
4c15c243 5721 status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
6b7c5b94
SP
5722 if (!status) {
5723 netdev->features |= NETIF_F_HIGHDMA;
5724 } else {
4c15c243 5725 status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
6b7c5b94
SP
5726 if (status) {
5727 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
5728 goto free_netdev;
5729 }
5730 }
5731
2f951a9a
KA
5732 status = pci_enable_pcie_error_reporting(pdev);
5733 if (!status)
5734 dev_info(&pdev->dev, "PCIe error reporting enabled\n");
d6b6d987 5735
78fad34e 5736 status = be_map_pci_bars(adapter);
6b7c5b94 5737 if (status)
39f1d94d 5738 goto free_netdev;
6b7c5b94 5739
78fad34e
SP
5740 status = be_drv_init(adapter);
5741 if (status)
5742 goto unmap_bars;
5743
5fb379ee
SP
5744 status = be_setup(adapter);
5745 if (status)
78fad34e 5746 goto drv_cleanup;
2243e2e9 5747
3abcdeda 5748 be_netdev_init(netdev);
6b7c5b94
SP
5749 status = register_netdev(netdev);
5750 if (status != 0)
5fb379ee 5751 goto unsetup;
6b7c5b94 5752
045508a8
PP
5753 be_roce_dev_add(adapter);
5754
eb7dd46c 5755 be_schedule_err_detection(adapter);
b4e32a71 5756
29e9122b 5757 /* On Die temperature not supported for VF. */
9a03259c 5758 if (be_physfn(adapter) && IS_ENABLED(CONFIG_BE2NET_HWMON)) {
29e9122b
VD
5759 adapter->hwmon_info.hwmon_dev =
5760 devm_hwmon_device_register_with_groups(&pdev->dev,
5761 DRV_NAME,
5762 adapter,
5763 be_hwmon_groups);
5764 adapter->hwmon_info.be_on_die_temp = BE_INVALID_DIE_TEMP;
5765 }
5766
d379142b 5767 dev_info(&pdev->dev, "%s: %s %s port %c\n", nic_name(pdev),
21252377 5768 func_name(adapter), mc_name(adapter), adapter->port_name);
34b1ef04 5769
6b7c5b94
SP
5770 return 0;
5771
5fb379ee
SP
5772unsetup:
5773 be_clear(adapter);
78fad34e
SP
5774drv_cleanup:
5775 be_drv_cleanup(adapter);
5776unmap_bars:
5777 be_unmap_pci_bars(adapter);
f9449ab7 5778free_netdev:
fe6d2a38 5779 free_netdev(netdev);
6b7c5b94
SP
5780rel_reg:
5781 pci_release_regions(pdev);
5782disable_dev:
5783 pci_disable_device(pdev);
5784do_none:
c4ca2374 5785 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
6b7c5b94
SP
5786 return status;
5787}
5788
5789static int be_suspend(struct pci_dev *pdev, pm_message_t state)
5790{
5791 struct be_adapter *adapter = pci_get_drvdata(pdev);
6b7c5b94 5792
76a9e08e 5793 if (adapter->wol_en)
71d8d1b5
AK
5794 be_setup_wol(adapter, true);
5795
d4360d6f 5796 be_intr_set(adapter, false);
eb7dd46c 5797 be_cancel_err_detection(adapter);
f67ef7ba 5798
87ac1a52 5799 be_cleanup(adapter);
6b7c5b94
SP
5800
5801 pci_save_state(pdev);
5802 pci_disable_device(pdev);
5803 pci_set_power_state(pdev, pci_choose_state(pdev, state));
5804 return 0;
5805}
5806
484d76fd 5807static int be_pci_resume(struct pci_dev *pdev)
6b7c5b94 5808{
6b7c5b94 5809 struct be_adapter *adapter = pci_get_drvdata(pdev);
484d76fd 5810 int status = 0;
6b7c5b94
SP
5811
5812 status = pci_enable_device(pdev);
5813 if (status)
5814 return status;
5815
1ca01512 5816 pci_set_power_state(pdev, PCI_D0);
6b7c5b94
SP
5817 pci_restore_state(pdev);
5818
484d76fd 5819 status = be_resume(adapter);
2243e2e9
SP
5820 if (status)
5821 return status;
5822
eb7dd46c
SP
5823 be_schedule_err_detection(adapter);
5824
76a9e08e 5825 if (adapter->wol_en)
71d8d1b5 5826 be_setup_wol(adapter, false);
a4ca055f 5827
6b7c5b94
SP
5828 return 0;
5829}
5830
82456b03
SP
5831/*
5832 * An FLR will stop BE from DMAing any data.
5833 */
5834static void be_shutdown(struct pci_dev *pdev)
5835{
5836 struct be_adapter *adapter = pci_get_drvdata(pdev);
82456b03 5837
2d5d4154
AK
5838 if (!adapter)
5839 return;
82456b03 5840
d114f99a 5841 be_roce_dev_shutdown(adapter);
0f4a6828 5842 cancel_delayed_work_sync(&adapter->work);
eb7dd46c 5843 be_cancel_err_detection(adapter);
a4ca055f 5844
2d5d4154 5845 netif_device_detach(adapter->netdev);
82456b03 5846
57841869
AK
5847 be_cmd_reset_function(adapter);
5848
82456b03 5849 pci_disable_device(pdev);
82456b03
SP
5850}
5851
cf588477 5852static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
748b539a 5853 pci_channel_state_t state)
cf588477
SP
5854{
5855 struct be_adapter *adapter = pci_get_drvdata(pdev);
cf588477
SP
5856
5857 dev_err(&adapter->pdev->dev, "EEH error detected\n");
5858
954f6825
VD
5859 if (!be_check_error(adapter, BE_ERROR_EEH)) {
5860 be_set_error(adapter, BE_ERROR_EEH);
cf588477 5861
eb7dd46c 5862 be_cancel_err_detection(adapter);
cf588477 5863
87ac1a52 5864 be_cleanup(adapter);
cf588477 5865 }
cf588477
SP
5866
5867 if (state == pci_channel_io_perm_failure)
5868 return PCI_ERS_RESULT_DISCONNECT;
5869
5870 pci_disable_device(pdev);
5871
eeb7fc7b
SK
5872 /* The error could cause the FW to trigger a flash debug dump.
5873 * Resetting the card while flash dump is in progress
c8a54163
PR
5874 * can cause it not to recover; wait for it to finish.
5875 * Wait only for first function as it is needed only once per
5876 * adapter.
eeb7fc7b 5877 */
c8a54163
PR
5878 if (pdev->devfn == 0)
5879 ssleep(30);
5880
cf588477
SP
5881 return PCI_ERS_RESULT_NEED_RESET;
5882}
5883
5884static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
5885{
5886 struct be_adapter *adapter = pci_get_drvdata(pdev);
5887 int status;
5888
5889 dev_info(&adapter->pdev->dev, "EEH reset\n");
cf588477
SP
5890
5891 status = pci_enable_device(pdev);
5892 if (status)
5893 return PCI_ERS_RESULT_DISCONNECT;
5894
5895 pci_set_master(pdev);
1ca01512 5896 pci_set_power_state(pdev, PCI_D0);
cf588477
SP
5897 pci_restore_state(pdev);
5898
5899 /* Check if card is ok and fw is ready */
c5b3ad4c
SP
5900 dev_info(&adapter->pdev->dev,
5901 "Waiting for FW to be ready after EEH reset\n");
bf99e50d 5902 status = be_fw_wait_ready(adapter);
cf588477
SP
5903 if (status)
5904 return PCI_ERS_RESULT_DISCONNECT;
5905
d6b6d987 5906 pci_cleanup_aer_uncorrect_error_status(pdev);
954f6825 5907 be_clear_error(adapter, BE_CLEAR_ALL);
cf588477
SP
5908 return PCI_ERS_RESULT_RECOVERED;
5909}
5910
5911static void be_eeh_resume(struct pci_dev *pdev)
5912{
5913 int status = 0;
5914 struct be_adapter *adapter = pci_get_drvdata(pdev);
cf588477
SP
5915
5916 dev_info(&adapter->pdev->dev, "EEH resume\n");
5917
5918 pci_save_state(pdev);
5919
484d76fd 5920 status = be_resume(adapter);
bf99e50d
PR
5921 if (status)
5922 goto err;
5923
eb7dd46c 5924 be_schedule_err_detection(adapter);
cf588477
SP
5925 return;
5926err:
5927 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
cf588477
SP
5928}
5929
ace40aff
VV
5930static int be_pci_sriov_configure(struct pci_dev *pdev, int num_vfs)
5931{
5932 struct be_adapter *adapter = pci_get_drvdata(pdev);
5933 u16 num_vf_qs;
5934 int status;
5935
5936 if (!num_vfs)
5937 be_vf_clear(adapter);
5938
5939 adapter->num_vfs = num_vfs;
5940
5941 if (adapter->num_vfs == 0 && pci_vfs_assigned(pdev)) {
5942 dev_warn(&pdev->dev,
5943 "Cannot disable VFs while they are assigned\n");
5944 return -EBUSY;
5945 }
5946
5947 /* When the HW is in SRIOV capable configuration, the PF-pool resources
5948 * are equally distributed across the max-number of VFs. The user may
5949 * request only a subset of the max-vfs to be enabled.
5950 * Based on num_vfs, redistribute the resources across num_vfs so that
5951 * each VF will have access to more number of resources.
5952 * This facility is not available in BE3 FW.
5953 * Also, this is done by FW in Lancer chip.
5954 */
5955 if (skyhawk_chip(adapter) && !pci_num_vf(pdev)) {
5956 num_vf_qs = be_calculate_vf_qs(adapter, adapter->num_vfs);
5957 status = be_cmd_set_sriov_config(adapter, adapter->pool_res,
5958 adapter->num_vfs, num_vf_qs);
5959 if (status)
5960 dev_err(&pdev->dev,
5961 "Failed to optimize SR-IOV resources\n");
5962 }
5963
5964 status = be_get_resources(adapter);
5965 if (status)
5966 return be_cmd_status(status);
5967
5968 /* Updating real_num_tx/rx_queues() requires rtnl_lock() */
5969 rtnl_lock();
5970 status = be_update_queues(adapter);
5971 rtnl_unlock();
5972 if (status)
5973 return be_cmd_status(status);
5974
5975 if (adapter->num_vfs)
5976 status = be_vf_setup(adapter);
5977
5978 if (!status)
5979 return adapter->num_vfs;
5980
5981 return 0;
5982}
5983
3646f0e5 5984static const struct pci_error_handlers be_eeh_handlers = {
cf588477
SP
5985 .error_detected = be_eeh_err_detected,
5986 .slot_reset = be_eeh_reset,
5987 .resume = be_eeh_resume,
5988};
5989
6b7c5b94
SP
5990static struct pci_driver be_driver = {
5991 .name = DRV_NAME,
5992 .id_table = be_dev_ids,
5993 .probe = be_probe,
5994 .remove = be_remove,
5995 .suspend = be_suspend,
484d76fd 5996 .resume = be_pci_resume,
82456b03 5997 .shutdown = be_shutdown,
ace40aff 5998 .sriov_configure = be_pci_sriov_configure,
cf588477 5999 .err_handler = &be_eeh_handlers
6b7c5b94
SP
6000};
6001
6002static int __init be_init_module(void)
6003{
8e95a202
JP
6004 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
6005 rx_frag_size != 2048) {
6b7c5b94
SP
6006 printk(KERN_WARNING DRV_NAME
6007 " : Module param rx_frag_size must be 2048/4096/8192."
6008 " Using 2048\n");
6009 rx_frag_size = 2048;
6010 }
6b7c5b94 6011
ace40aff
VV
6012 if (num_vfs > 0) {
6013 pr_info(DRV_NAME " : Module param num_vfs is obsolete.");
6014 pr_info(DRV_NAME " : Use sysfs method to enable VFs\n");
6015 }
6016
6b7c5b94
SP
6017 return pci_register_driver(&be_driver);
6018}
6019module_init(be_init_module);
6020
6021static void __exit be_exit_module(void)
6022{
6023 pci_unregister_driver(&be_driver);
6024}
6025module_exit(be_exit_module);