Merge branch 'sfc-3.11' of git://git.kernel.org/pub/scm/linux/kernel/git/bwh/sfc
[linux-2.6-block.git] / drivers / net / ethernet / emulex / benet / be_main.c
CommitLineData
6b7c5b94 1/*
c7bb15a6 2 * Copyright (C) 2005 - 2013 Emulex
6b7c5b94
SP
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
d2145cde 11 * linux-drivers@emulex.com
6b7c5b94 12 *
d2145cde
AK
13 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
6b7c5b94
SP
16 */
17
70c71606 18#include <linux/prefetch.h>
9d9779e7 19#include <linux/module.h>
6b7c5b94 20#include "be.h"
8788fdc2 21#include "be_cmds.h"
65f71b8b 22#include <asm/div64.h>
d6b6d987 23#include <linux/aer.h>
6b7c5b94
SP
24
25MODULE_VERSION(DRV_VER);
26MODULE_DEVICE_TABLE(pci, be_dev_ids);
27MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
00d3d51e 28MODULE_AUTHOR("Emulex Corporation");
6b7c5b94
SP
29MODULE_LICENSE("GPL");
30
ba343c77 31static unsigned int num_vfs;
ba343c77 32module_param(num_vfs, uint, S_IRUGO);
ba343c77 33MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
6b7c5b94 34
11ac75ed
SP
35static ushort rx_frag_size = 2048;
36module_param(rx_frag_size, ushort, S_IRUGO);
37MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
38
6b7c5b94 39static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
c4ca2374 40 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
59fd5d87 41 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
c4ca2374
AK
42 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
43 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
fe6d2a38 44 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
12f4d0a8 45 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
ecedb6ae 46 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
76b73530 47 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID6)},
6b7c5b94
SP
48 { 0 }
49};
50MODULE_DEVICE_TABLE(pci, be_dev_ids);
7c185276 51/* UE Status Low CSR */
42c8b11e 52static const char * const ue_status_low_desc[] = {
7c185276
AK
53 "CEV",
54 "CTX",
55 "DBUF",
56 "ERX",
57 "Host",
58 "MPU",
59 "NDMA",
60 "PTC ",
61 "RDMA ",
62 "RXF ",
63 "RXIPS ",
64 "RXULP0 ",
65 "RXULP1 ",
66 "RXULP2 ",
67 "TIM ",
68 "TPOST ",
69 "TPRE ",
70 "TXIPS ",
71 "TXULP0 ",
72 "TXULP1 ",
73 "UC ",
74 "WDMA ",
75 "TXULP2 ",
76 "HOST1 ",
77 "P0_OB_LINK ",
78 "P1_OB_LINK ",
79 "HOST_GPIO ",
80 "MBOX ",
81 "AXGMAC0",
82 "AXGMAC1",
83 "JTAG",
84 "MPU_INTPEND"
85};
86/* UE Status High CSR */
42c8b11e 87static const char * const ue_status_hi_desc[] = {
7c185276
AK
88 "LPCMEMHOST",
89 "MGMT_MAC",
90 "PCS0ONLINE",
91 "MPU_IRAM",
92 "PCS1ONLINE",
93 "PCTL0",
94 "PCTL1",
95 "PMEM",
96 "RR",
97 "TXPB",
98 "RXPP",
99 "XAUI",
100 "TXP",
101 "ARM",
102 "IPC",
103 "HOST2",
104 "HOST3",
105 "HOST4",
106 "HOST5",
107 "HOST6",
108 "HOST7",
109 "HOST8",
110 "HOST9",
42c8b11e 111 "NETC",
7c185276
AK
112 "Unknown",
113 "Unknown",
114 "Unknown",
115 "Unknown",
116 "Unknown",
117 "Unknown",
118 "Unknown",
119 "Unknown"
120};
6b7c5b94 121
752961a1
SP
122/* Is BE in a multi-channel mode */
123static inline bool be_is_mc(struct be_adapter *adapter) {
124 return (adapter->function_mode & FLEX10_MODE ||
125 adapter->function_mode & VNIC_MODE ||
126 adapter->function_mode & UMC_ENABLED);
127}
128
6b7c5b94
SP
129static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
130{
131 struct be_dma_mem *mem = &q->dma_mem;
1cfafab9 132 if (mem->va) {
2b7bcebf
IV
133 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
134 mem->dma);
1cfafab9
SP
135 mem->va = NULL;
136 }
6b7c5b94
SP
137}
138
139static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
140 u16 len, u16 entry_size)
141{
142 struct be_dma_mem *mem = &q->dma_mem;
143
144 memset(q, 0, sizeof(*q));
145 q->len = len;
146 q->entry_size = entry_size;
147 mem->size = len * entry_size;
2b7bcebf 148 mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
1f9061d2 149 GFP_KERNEL | __GFP_ZERO);
6b7c5b94 150 if (!mem->va)
10ef9ab4 151 return -ENOMEM;
6b7c5b94
SP
152 return 0;
153}
154
68c45a2d 155static void be_reg_intr_set(struct be_adapter *adapter, bool enable)
6b7c5b94 156{
db3ea781 157 u32 reg, enabled;
5f0b849e 158
db3ea781
SP
159 pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
160 &reg);
161 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
162
5f0b849e 163 if (!enabled && enable)
6b7c5b94 164 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
5f0b849e 165 else if (enabled && !enable)
6b7c5b94 166 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
5f0b849e 167 else
6b7c5b94 168 return;
5f0b849e 169
db3ea781
SP
170 pci_write_config_dword(adapter->pdev,
171 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
6b7c5b94
SP
172}
173
68c45a2d
SK
174static void be_intr_set(struct be_adapter *adapter, bool enable)
175{
176 int status = 0;
177
178 /* On lancer interrupts can't be controlled via this register */
179 if (lancer_chip(adapter))
180 return;
181
182 if (adapter->eeh_error)
183 return;
184
185 status = be_cmd_intr_set(adapter, enable);
186 if (status)
187 be_reg_intr_set(adapter, enable);
188}
189
8788fdc2 190static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
6b7c5b94
SP
191{
192 u32 val = 0;
193 val |= qid & DB_RQ_RING_ID_MASK;
194 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
f3eb62d2
SP
195
196 wmb();
8788fdc2 197 iowrite32(val, adapter->db + DB_RQ_OFFSET);
6b7c5b94
SP
198}
199
94d73aaa
VV
200static void be_txq_notify(struct be_adapter *adapter, struct be_tx_obj *txo,
201 u16 posted)
6b7c5b94
SP
202{
203 u32 val = 0;
94d73aaa 204 val |= txo->q.id & DB_TXULP_RING_ID_MASK;
6b7c5b94 205 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
f3eb62d2
SP
206
207 wmb();
94d73aaa 208 iowrite32(val, adapter->db + txo->db_offset);
6b7c5b94
SP
209}
210
8788fdc2 211static void be_eq_notify(struct be_adapter *adapter, u16 qid,
6b7c5b94
SP
212 bool arm, bool clear_int, u16 num_popped)
213{
214 u32 val = 0;
215 val |= qid & DB_EQ_RING_ID_MASK;
fe6d2a38
SP
216 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
217 DB_EQ_RING_ID_EXT_MASK_SHIFT);
cf588477 218
f67ef7ba 219 if (adapter->eeh_error)
cf588477
SP
220 return;
221
6b7c5b94
SP
222 if (arm)
223 val |= 1 << DB_EQ_REARM_SHIFT;
224 if (clear_int)
225 val |= 1 << DB_EQ_CLR_SHIFT;
226 val |= 1 << DB_EQ_EVNT_SHIFT;
227 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
8788fdc2 228 iowrite32(val, adapter->db + DB_EQ_OFFSET);
6b7c5b94
SP
229}
230
8788fdc2 231void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
6b7c5b94
SP
232{
233 u32 val = 0;
234 val |= qid & DB_CQ_RING_ID_MASK;
fe6d2a38
SP
235 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
236 DB_CQ_RING_ID_EXT_MASK_SHIFT);
cf588477 237
f67ef7ba 238 if (adapter->eeh_error)
cf588477
SP
239 return;
240
6b7c5b94
SP
241 if (arm)
242 val |= 1 << DB_CQ_REARM_SHIFT;
243 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
8788fdc2 244 iowrite32(val, adapter->db + DB_CQ_OFFSET);
6b7c5b94
SP
245}
246
6b7c5b94
SP
247static int be_mac_addr_set(struct net_device *netdev, void *p)
248{
249 struct be_adapter *adapter = netdev_priv(netdev);
250 struct sockaddr *addr = p;
251 int status = 0;
e3a7ae2c 252 u8 current_mac[ETH_ALEN];
fbc13f01 253 u32 pmac_id = adapter->pmac_id[0];
704e4c88 254 bool active_mac = true;
6b7c5b94 255
ca9e4988
AK
256 if (!is_valid_ether_addr(addr->sa_data))
257 return -EADDRNOTAVAIL;
258
704e4c88
PR
259 /* For BE VF, MAC address is already activated by PF.
260 * Hence only operation left is updating netdev->devaddr.
261 * Update it if user is passing the same MAC which was used
262 * during configuring VF MAC from PF(Hypervisor).
263 */
264 if (!lancer_chip(adapter) && !be_physfn(adapter)) {
265 status = be_cmd_mac_addr_query(adapter, current_mac,
266 false, adapter->if_handle, 0);
267 if (!status && !memcmp(current_mac, addr->sa_data, ETH_ALEN))
268 goto done;
269 else
270 goto err;
271 }
272
273 if (!memcmp(addr->sa_data, netdev->dev_addr, ETH_ALEN))
274 goto done;
275
276 /* For Lancer check if any MAC is active.
277 * If active, get its mac id.
278 */
279 if (lancer_chip(adapter) && !be_physfn(adapter))
280 be_cmd_get_mac_from_list(adapter, current_mac, &active_mac,
281 &pmac_id, 0);
282
283 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
284 adapter->if_handle,
285 &adapter->pmac_id[0], 0);
286
a65027e4 287 if (status)
e3a7ae2c 288 goto err;
6b7c5b94 289
704e4c88
PR
290 if (active_mac)
291 be_cmd_pmac_del(adapter, adapter->if_handle,
292 pmac_id, 0);
293done:
e3a7ae2c
SK
294 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
295 return 0;
296err:
297 dev_err(&adapter->pdev->dev, "MAC %pM set Failed\n", addr->sa_data);
6b7c5b94
SP
298 return status;
299}
300
ca34fe38
SP
301/* BE2 supports only v0 cmd */
302static void *hw_stats_from_cmd(struct be_adapter *adapter)
303{
304 if (BE2_chip(adapter)) {
305 struct be_cmd_resp_get_stats_v0 *cmd = adapter->stats_cmd.va;
306
307 return &cmd->hw_stats;
308 } else {
309 struct be_cmd_resp_get_stats_v1 *cmd = adapter->stats_cmd.va;
310
311 return &cmd->hw_stats;
312 }
313}
314
315/* BE2 supports only v0 cmd */
316static void *be_erx_stats_from_cmd(struct be_adapter *adapter)
317{
318 if (BE2_chip(adapter)) {
319 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
320
321 return &hw_stats->erx;
322 } else {
323 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
324
325 return &hw_stats->erx;
326 }
327}
328
329static void populate_be_v0_stats(struct be_adapter *adapter)
89a88ab8 330{
ac124ff9
SP
331 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
332 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
333 struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
89a88ab8 334 struct be_port_rxf_stats_v0 *port_stats =
ac124ff9
SP
335 &rxf_stats->port[adapter->port_num];
336 struct be_drv_stats *drvs = &adapter->drv_stats;
89a88ab8 337
ac124ff9 338 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
89a88ab8
AK
339 drvs->rx_pause_frames = port_stats->rx_pause_frames;
340 drvs->rx_crc_errors = port_stats->rx_crc_errors;
341 drvs->rx_control_frames = port_stats->rx_control_frames;
342 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
343 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
344 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
345 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
346 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
347 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
348 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
349 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
350 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
351 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
352 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
ac124ff9 353 drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
89a88ab8
AK
354 drvs->rx_dropped_header_too_small =
355 port_stats->rx_dropped_header_too_small;
18fb06a1
SR
356 drvs->rx_address_filtered =
357 port_stats->rx_address_filtered +
358 port_stats->rx_vlan_filtered;
89a88ab8
AK
359 drvs->rx_alignment_symbol_errors =
360 port_stats->rx_alignment_symbol_errors;
361
362 drvs->tx_pauseframes = port_stats->tx_pauseframes;
363 drvs->tx_controlframes = port_stats->tx_controlframes;
364
365 if (adapter->port_num)
ac124ff9 366 drvs->jabber_events = rxf_stats->port1_jabber_events;
89a88ab8 367 else
ac124ff9 368 drvs->jabber_events = rxf_stats->port0_jabber_events;
89a88ab8 369 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
89a88ab8 370 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
89a88ab8
AK
371 drvs->forwarded_packets = rxf_stats->forwarded_packets;
372 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
ac124ff9
SP
373 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
374 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
89a88ab8
AK
375 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
376}
377
ca34fe38 378static void populate_be_v1_stats(struct be_adapter *adapter)
89a88ab8 379{
ac124ff9
SP
380 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
381 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
382 struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
89a88ab8 383 struct be_port_rxf_stats_v1 *port_stats =
ac124ff9
SP
384 &rxf_stats->port[adapter->port_num];
385 struct be_drv_stats *drvs = &adapter->drv_stats;
89a88ab8 386
ac124ff9 387 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
02fe7027
AK
388 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
389 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
89a88ab8
AK
390 drvs->rx_pause_frames = port_stats->rx_pause_frames;
391 drvs->rx_crc_errors = port_stats->rx_crc_errors;
392 drvs->rx_control_frames = port_stats->rx_control_frames;
393 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
394 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
395 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
396 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
397 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
398 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
399 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
400 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
401 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
402 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
403 drvs->rx_dropped_header_too_small =
404 port_stats->rx_dropped_header_too_small;
405 drvs->rx_input_fifo_overflow_drop =
406 port_stats->rx_input_fifo_overflow_drop;
18fb06a1 407 drvs->rx_address_filtered = port_stats->rx_address_filtered;
89a88ab8
AK
408 drvs->rx_alignment_symbol_errors =
409 port_stats->rx_alignment_symbol_errors;
ac124ff9 410 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
89a88ab8
AK
411 drvs->tx_pauseframes = port_stats->tx_pauseframes;
412 drvs->tx_controlframes = port_stats->tx_controlframes;
b5adffc4 413 drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
89a88ab8
AK
414 drvs->jabber_events = port_stats->jabber_events;
415 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
89a88ab8 416 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
89a88ab8
AK
417 drvs->forwarded_packets = rxf_stats->forwarded_packets;
418 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
ac124ff9
SP
419 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
420 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
89a88ab8
AK
421 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
422}
423
005d5696
SX
424static void populate_lancer_stats(struct be_adapter *adapter)
425{
89a88ab8 426
005d5696 427 struct be_drv_stats *drvs = &adapter->drv_stats;
ac124ff9
SP
428 struct lancer_pport_stats *pport_stats =
429 pport_stats_from_cmd(adapter);
430
431 be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
432 drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
433 drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
434 drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
005d5696 435 drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
ac124ff9 436 drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
005d5696
SX
437 drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
438 drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
439 drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
440 drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
441 drvs->rx_dropped_tcp_length =
442 pport_stats->rx_dropped_invalid_tcp_length;
443 drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
444 drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
445 drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
446 drvs->rx_dropped_header_too_small =
447 pport_stats->rx_dropped_header_too_small;
448 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
18fb06a1
SR
449 drvs->rx_address_filtered =
450 pport_stats->rx_address_filtered +
451 pport_stats->rx_vlan_filtered;
ac124ff9 452 drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
005d5696 453 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
ac124ff9
SP
454 drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
455 drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
005d5696 456 drvs->jabber_events = pport_stats->rx_jabbers;
ac124ff9
SP
457 drvs->forwarded_packets = pport_stats->num_forwards_lo;
458 drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
005d5696 459 drvs->rx_drops_too_many_frags =
ac124ff9 460 pport_stats->rx_drops_too_many_frags_lo;
005d5696 461}
89a88ab8 462
09c1c68f
SP
463static void accumulate_16bit_val(u32 *acc, u16 val)
464{
465#define lo(x) (x & 0xFFFF)
466#define hi(x) (x & 0xFFFF0000)
467 bool wrapped = val < lo(*acc);
468 u32 newacc = hi(*acc) + val;
469
470 if (wrapped)
471 newacc += 65536;
472 ACCESS_ONCE(*acc) = newacc;
473}
474
a6c578ef
AK
475void populate_erx_stats(struct be_adapter *adapter,
476 struct be_rx_obj *rxo,
477 u32 erx_stat)
478{
479 if (!BEx_chip(adapter))
480 rx_stats(rxo)->rx_drops_no_frags = erx_stat;
481 else
482 /* below erx HW counter can actually wrap around after
483 * 65535. Driver accumulates a 32-bit value
484 */
485 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
486 (u16)erx_stat);
487}
488
89a88ab8
AK
489void be_parse_stats(struct be_adapter *adapter)
490{
ac124ff9
SP
491 struct be_erx_stats_v1 *erx = be_erx_stats_from_cmd(adapter);
492 struct be_rx_obj *rxo;
493 int i;
a6c578ef 494 u32 erx_stat;
ac124ff9 495
ca34fe38
SP
496 if (lancer_chip(adapter)) {
497 populate_lancer_stats(adapter);
005d5696 498 } else {
ca34fe38
SP
499 if (BE2_chip(adapter))
500 populate_be_v0_stats(adapter);
501 else
502 /* for BE3 and Skyhawk */
503 populate_be_v1_stats(adapter);
d51ebd33 504
ca34fe38
SP
505 /* as erx_v1 is longer than v0, ok to use v1 for v0 access */
506 for_all_rx_queues(adapter, rxo, i) {
a6c578ef
AK
507 erx_stat = erx->rx_drops_no_fragments[rxo->q.id];
508 populate_erx_stats(adapter, rxo, erx_stat);
ca34fe38 509 }
09c1c68f 510 }
89a88ab8
AK
511}
512
ab1594e9
SP
513static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
514 struct rtnl_link_stats64 *stats)
6b7c5b94 515{
ab1594e9 516 struct be_adapter *adapter = netdev_priv(netdev);
89a88ab8 517 struct be_drv_stats *drvs = &adapter->drv_stats;
3abcdeda 518 struct be_rx_obj *rxo;
3c8def97 519 struct be_tx_obj *txo;
ab1594e9
SP
520 u64 pkts, bytes;
521 unsigned int start;
3abcdeda 522 int i;
6b7c5b94 523
3abcdeda 524 for_all_rx_queues(adapter, rxo, i) {
ab1594e9
SP
525 const struct be_rx_stats *rx_stats = rx_stats(rxo);
526 do {
527 start = u64_stats_fetch_begin_bh(&rx_stats->sync);
528 pkts = rx_stats(rxo)->rx_pkts;
529 bytes = rx_stats(rxo)->rx_bytes;
530 } while (u64_stats_fetch_retry_bh(&rx_stats->sync, start));
531 stats->rx_packets += pkts;
532 stats->rx_bytes += bytes;
533 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
534 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
535 rx_stats(rxo)->rx_drops_no_frags;
3abcdeda
SP
536 }
537
3c8def97 538 for_all_tx_queues(adapter, txo, i) {
ab1594e9
SP
539 const struct be_tx_stats *tx_stats = tx_stats(txo);
540 do {
541 start = u64_stats_fetch_begin_bh(&tx_stats->sync);
542 pkts = tx_stats(txo)->tx_pkts;
543 bytes = tx_stats(txo)->tx_bytes;
544 } while (u64_stats_fetch_retry_bh(&tx_stats->sync, start));
545 stats->tx_packets += pkts;
546 stats->tx_bytes += bytes;
3c8def97 547 }
6b7c5b94
SP
548
549 /* bad pkts received */
ab1594e9 550 stats->rx_errors = drvs->rx_crc_errors +
89a88ab8
AK
551 drvs->rx_alignment_symbol_errors +
552 drvs->rx_in_range_errors +
553 drvs->rx_out_range_errors +
554 drvs->rx_frame_too_long +
555 drvs->rx_dropped_too_small +
556 drvs->rx_dropped_too_short +
557 drvs->rx_dropped_header_too_small +
558 drvs->rx_dropped_tcp_length +
ab1594e9 559 drvs->rx_dropped_runt;
68110868 560
6b7c5b94 561 /* detailed rx errors */
ab1594e9 562 stats->rx_length_errors = drvs->rx_in_range_errors +
89a88ab8
AK
563 drvs->rx_out_range_errors +
564 drvs->rx_frame_too_long;
68110868 565
ab1594e9 566 stats->rx_crc_errors = drvs->rx_crc_errors;
6b7c5b94
SP
567
568 /* frame alignment errors */
ab1594e9 569 stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
68110868 570
6b7c5b94
SP
571 /* receiver fifo overrun */
572 /* drops_no_pbuf is no per i/f, it's per BE card */
ab1594e9 573 stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
89a88ab8
AK
574 drvs->rx_input_fifo_overflow_drop +
575 drvs->rx_drops_no_pbuf;
ab1594e9 576 return stats;
6b7c5b94
SP
577}
578
b236916a 579void be_link_status_update(struct be_adapter *adapter, u8 link_status)
6b7c5b94 580{
6b7c5b94
SP
581 struct net_device *netdev = adapter->netdev;
582
b236916a 583 if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
ea172a01 584 netif_carrier_off(netdev);
b236916a 585 adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
6b7c5b94 586 }
b236916a
AK
587
588 if ((link_status & LINK_STATUS_MASK) == LINK_UP)
589 netif_carrier_on(netdev);
590 else
591 netif_carrier_off(netdev);
6b7c5b94
SP
592}
593
3c8def97 594static void be_tx_stats_update(struct be_tx_obj *txo,
91992e44 595 u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
6b7c5b94 596{
3c8def97
SP
597 struct be_tx_stats *stats = tx_stats(txo);
598
ab1594e9 599 u64_stats_update_begin(&stats->sync);
ac124ff9
SP
600 stats->tx_reqs++;
601 stats->tx_wrbs += wrb_cnt;
602 stats->tx_bytes += copied;
603 stats->tx_pkts += (gso_segs ? gso_segs : 1);
6b7c5b94 604 if (stopped)
ac124ff9 605 stats->tx_stops++;
ab1594e9 606 u64_stats_update_end(&stats->sync);
6b7c5b94
SP
607}
608
609/* Determine number of WRB entries needed to xmit data in an skb */
fe6d2a38
SP
610static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
611 bool *dummy)
6b7c5b94 612{
ebc8d2ab
DM
613 int cnt = (skb->len > skb->data_len);
614
615 cnt += skb_shinfo(skb)->nr_frags;
616
6b7c5b94
SP
617 /* to account for hdr wrb */
618 cnt++;
fe6d2a38
SP
619 if (lancer_chip(adapter) || !(cnt & 1)) {
620 *dummy = false;
621 } else {
6b7c5b94
SP
622 /* add a dummy to make it an even num */
623 cnt++;
624 *dummy = true;
fe6d2a38 625 }
6b7c5b94
SP
626 BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
627 return cnt;
628}
629
630static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
631{
632 wrb->frag_pa_hi = upper_32_bits(addr);
633 wrb->frag_pa_lo = addr & 0xFFFFFFFF;
634 wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
89b1f496 635 wrb->rsvd0 = 0;
6b7c5b94
SP
636}
637
1ded132d
AK
638static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
639 struct sk_buff *skb)
640{
641 u8 vlan_prio;
642 u16 vlan_tag;
643
644 vlan_tag = vlan_tx_tag_get(skb);
645 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
646 /* If vlan priority provided by OS is NOT in available bmap */
647 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
648 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
649 adapter->recommended_prio;
650
651 return vlan_tag;
652}
653
cc4ce020 654static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
bc0c3405 655 struct sk_buff *skb, u32 wrb_cnt, u32 len, bool skip_hw_vlan)
6b7c5b94 656{
1ded132d 657 u16 vlan_tag;
cc4ce020 658
6b7c5b94
SP
659 memset(hdr, 0, sizeof(*hdr));
660
661 AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
662
49e4b847 663 if (skb_is_gso(skb)) {
6b7c5b94
SP
664 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
665 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
666 hdr, skb_shinfo(skb)->gso_size);
fe6d2a38 667 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
49e4b847 668 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
6b7c5b94
SP
669 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
670 if (is_tcp_pkt(skb))
671 AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
672 else if (is_udp_pkt(skb))
673 AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
674 }
675
4c5102f9 676 if (vlan_tx_tag_present(skb)) {
6b7c5b94 677 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
1ded132d 678 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
cc4ce020 679 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
6b7c5b94
SP
680 }
681
bc0c3405
AK
682 /* To skip HW VLAN tagging: evt = 1, compl = 0 */
683 AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, !skip_hw_vlan);
6b7c5b94 684 AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
6b7c5b94
SP
685 AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
686 AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
687}
688
2b7bcebf 689static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
7101e111
SP
690 bool unmap_single)
691{
692 dma_addr_t dma;
693
694 be_dws_le_to_cpu(wrb, sizeof(*wrb));
695
696 dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
b681ee77 697 if (wrb->frag_len) {
7101e111 698 if (unmap_single)
2b7bcebf
IV
699 dma_unmap_single(dev, dma, wrb->frag_len,
700 DMA_TO_DEVICE);
7101e111 701 else
2b7bcebf 702 dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
7101e111
SP
703 }
704}
6b7c5b94 705
3c8def97 706static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
bc0c3405
AK
707 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb,
708 bool skip_hw_vlan)
6b7c5b94 709{
7101e111
SP
710 dma_addr_t busaddr;
711 int i, copied = 0;
2b7bcebf 712 struct device *dev = &adapter->pdev->dev;
6b7c5b94 713 struct sk_buff *first_skb = skb;
6b7c5b94
SP
714 struct be_eth_wrb *wrb;
715 struct be_eth_hdr_wrb *hdr;
7101e111
SP
716 bool map_single = false;
717 u16 map_head;
6b7c5b94 718
6b7c5b94
SP
719 hdr = queue_head_node(txq);
720 queue_head_inc(txq);
7101e111 721 map_head = txq->head;
6b7c5b94 722
ebc8d2ab 723 if (skb->len > skb->data_len) {
e743d313 724 int len = skb_headlen(skb);
2b7bcebf
IV
725 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
726 if (dma_mapping_error(dev, busaddr))
7101e111
SP
727 goto dma_err;
728 map_single = true;
ebc8d2ab
DM
729 wrb = queue_head_node(txq);
730 wrb_fill(wrb, busaddr, len);
731 be_dws_cpu_to_le(wrb, sizeof(*wrb));
732 queue_head_inc(txq);
733 copied += len;
734 }
6b7c5b94 735
ebc8d2ab 736 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
9e903e08 737 const struct skb_frag_struct *frag =
ebc8d2ab 738 &skb_shinfo(skb)->frags[i];
b061b39e 739 busaddr = skb_frag_dma_map(dev, frag, 0,
9e903e08 740 skb_frag_size(frag), DMA_TO_DEVICE);
2b7bcebf 741 if (dma_mapping_error(dev, busaddr))
7101e111 742 goto dma_err;
ebc8d2ab 743 wrb = queue_head_node(txq);
9e903e08 744 wrb_fill(wrb, busaddr, skb_frag_size(frag));
ebc8d2ab
DM
745 be_dws_cpu_to_le(wrb, sizeof(*wrb));
746 queue_head_inc(txq);
9e903e08 747 copied += skb_frag_size(frag);
6b7c5b94
SP
748 }
749
750 if (dummy_wrb) {
751 wrb = queue_head_node(txq);
752 wrb_fill(wrb, 0, 0);
753 be_dws_cpu_to_le(wrb, sizeof(*wrb));
754 queue_head_inc(txq);
755 }
756
bc0c3405 757 wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied, skip_hw_vlan);
6b7c5b94
SP
758 be_dws_cpu_to_le(hdr, sizeof(*hdr));
759
760 return copied;
7101e111
SP
761dma_err:
762 txq->head = map_head;
763 while (copied) {
764 wrb = queue_head_node(txq);
2b7bcebf 765 unmap_tx_frag(dev, wrb, map_single);
7101e111
SP
766 map_single = false;
767 copied -= wrb->frag_len;
768 queue_head_inc(txq);
769 }
770 return 0;
6b7c5b94
SP
771}
772
93040ae5 773static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
bc0c3405
AK
774 struct sk_buff *skb,
775 bool *skip_hw_vlan)
93040ae5
SK
776{
777 u16 vlan_tag = 0;
778
779 skb = skb_share_check(skb, GFP_ATOMIC);
780 if (unlikely(!skb))
781 return skb;
782
efee8e87 783 if (vlan_tx_tag_present(skb))
93040ae5 784 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
52fe29e4
SB
785
786 if (qnq_async_evt_rcvd(adapter) && adapter->pvid) {
787 if (!vlan_tag)
788 vlan_tag = adapter->pvid;
789 /* f/w workaround to set skip_hw_vlan = 1, informs the F/W to
790 * skip VLAN insertion
791 */
792 if (skip_hw_vlan)
793 *skip_hw_vlan = true;
794 }
bc0c3405
AK
795
796 if (vlan_tag) {
58717686 797 skb = __vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
bc0c3405
AK
798 if (unlikely(!skb))
799 return skb;
bc0c3405
AK
800 skb->vlan_tci = 0;
801 }
802
803 /* Insert the outer VLAN, if any */
804 if (adapter->qnq_vid) {
805 vlan_tag = adapter->qnq_vid;
58717686 806 skb = __vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
bc0c3405
AK
807 if (unlikely(!skb))
808 return skb;
809 if (skip_hw_vlan)
810 *skip_hw_vlan = true;
811 }
812
93040ae5
SK
813 return skb;
814}
815
bc0c3405
AK
816static bool be_ipv6_exthdr_check(struct sk_buff *skb)
817{
818 struct ethhdr *eh = (struct ethhdr *)skb->data;
819 u16 offset = ETH_HLEN;
820
821 if (eh->h_proto == htons(ETH_P_IPV6)) {
822 struct ipv6hdr *ip6h = (struct ipv6hdr *)(skb->data + offset);
823
824 offset += sizeof(struct ipv6hdr);
825 if (ip6h->nexthdr != NEXTHDR_TCP &&
826 ip6h->nexthdr != NEXTHDR_UDP) {
827 struct ipv6_opt_hdr *ehdr =
828 (struct ipv6_opt_hdr *) (skb->data + offset);
829
830 /* offending pkt: 2nd byte following IPv6 hdr is 0xff */
831 if (ehdr->hdrlen == 0xff)
832 return true;
833 }
834 }
835 return false;
836}
837
838static int be_vlan_tag_tx_chk(struct be_adapter *adapter, struct sk_buff *skb)
839{
840 return vlan_tx_tag_present(skb) || adapter->pvid || adapter->qnq_vid;
841}
842
ee9c799c
SP
843static int be_ipv6_tx_stall_chk(struct be_adapter *adapter,
844 struct sk_buff *skb)
bc0c3405 845{
ee9c799c 846 return BE3_chip(adapter) && be_ipv6_exthdr_check(skb);
bc0c3405
AK
847}
848
ee9c799c
SP
849static struct sk_buff *be_xmit_workarounds(struct be_adapter *adapter,
850 struct sk_buff *skb,
851 bool *skip_hw_vlan)
6b7c5b94 852{
d2cb6ce7 853 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
ee9c799c
SP
854 unsigned int eth_hdr_len;
855 struct iphdr *ip;
93040ae5 856
48265667
SK
857 /* Lancer ASIC has a bug wherein packets that are 32 bytes or less
858 * may cause a transmit stall on that port. So the work-around is to
859 * pad such packets to a 36-byte length.
860 */
861 if (unlikely(lancer_chip(adapter) && skb->len <= 32)) {
862 if (skb_padto(skb, 36))
863 goto tx_drop;
864 skb->len = 36;
865 }
866
1297f9db
AK
867 /* For padded packets, BE HW modifies tot_len field in IP header
868 * incorrecly when VLAN tag is inserted by HW.
3904dcc4 869 * For padded packets, Lancer computes incorrect checksum.
1ded132d 870 */
ee9c799c
SP
871 eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
872 VLAN_ETH_HLEN : ETH_HLEN;
3904dcc4
SK
873 if (skb->len <= 60 &&
874 (lancer_chip(adapter) || vlan_tx_tag_present(skb)) &&
ee9c799c 875 is_ipv4_pkt(skb)) {
93040ae5
SK
876 ip = (struct iphdr *)ip_hdr(skb);
877 pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len));
878 }
1ded132d 879
d2cb6ce7
AK
880 /* If vlan tag is already inlined in the packet, skip HW VLAN
881 * tagging in UMC mode
882 */
883 if ((adapter->function_mode & UMC_ENABLED) &&
884 veh->h_vlan_proto == htons(ETH_P_8021Q))
ee9c799c 885 *skip_hw_vlan = true;
d2cb6ce7 886
93040ae5
SK
887 /* HW has a bug wherein it will calculate CSUM for VLAN
888 * pkts even though it is disabled.
889 * Manually insert VLAN in pkt.
890 */
891 if (skb->ip_summed != CHECKSUM_PARTIAL &&
ee9c799c
SP
892 vlan_tx_tag_present(skb)) {
893 skb = be_insert_vlan_in_pkt(adapter, skb, skip_hw_vlan);
bc0c3405
AK
894 if (unlikely(!skb))
895 goto tx_drop;
896 }
897
898 /* HW may lockup when VLAN HW tagging is requested on
899 * certain ipv6 packets. Drop such pkts if the HW workaround to
900 * skip HW tagging is not enabled by FW.
901 */
902 if (unlikely(be_ipv6_tx_stall_chk(adapter, skb) &&
ee9c799c
SP
903 (adapter->pvid || adapter->qnq_vid) &&
904 !qnq_async_evt_rcvd(adapter)))
bc0c3405
AK
905 goto tx_drop;
906
907 /* Manual VLAN tag insertion to prevent:
908 * ASIC lockup when the ASIC inserts VLAN tag into
909 * certain ipv6 packets. Insert VLAN tags in driver,
910 * and set event, completion, vlan bits accordingly
911 * in the Tx WRB.
912 */
913 if (be_ipv6_tx_stall_chk(adapter, skb) &&
914 be_vlan_tag_tx_chk(adapter, skb)) {
ee9c799c 915 skb = be_insert_vlan_in_pkt(adapter, skb, skip_hw_vlan);
1ded132d
AK
916 if (unlikely(!skb))
917 goto tx_drop;
1ded132d
AK
918 }
919
ee9c799c
SP
920 return skb;
921tx_drop:
922 dev_kfree_skb_any(skb);
923 return NULL;
924}
925
926static netdev_tx_t be_xmit(struct sk_buff *skb, struct net_device *netdev)
927{
928 struct be_adapter *adapter = netdev_priv(netdev);
929 struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
930 struct be_queue_info *txq = &txo->q;
931 bool dummy_wrb, stopped = false;
932 u32 wrb_cnt = 0, copied = 0;
933 bool skip_hw_vlan = false;
934 u32 start = txq->head;
935
936 skb = be_xmit_workarounds(adapter, skb, &skip_hw_vlan);
937 if (!skb)
938 return NETDEV_TX_OK;
939
fe6d2a38 940 wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
6b7c5b94 941
bc0c3405
AK
942 copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb,
943 skip_hw_vlan);
c190e3c8 944 if (copied) {
cd8f76c0
ED
945 int gso_segs = skb_shinfo(skb)->gso_segs;
946
c190e3c8 947 /* record the sent skb in the sent_skb table */
3c8def97
SP
948 BUG_ON(txo->sent_skb_list[start]);
949 txo->sent_skb_list[start] = skb;
c190e3c8
AK
950
951 /* Ensure txq has space for the next skb; Else stop the queue
952 * *BEFORE* ringing the tx doorbell, so that we serialze the
953 * tx compls of the current transmit which'll wake up the queue
954 */
7101e111 955 atomic_add(wrb_cnt, &txq->used);
c190e3c8
AK
956 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
957 txq->len) {
3c8def97 958 netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
c190e3c8
AK
959 stopped = true;
960 }
6b7c5b94 961
94d73aaa 962 be_txq_notify(adapter, txo, wrb_cnt);
6b7c5b94 963
cd8f76c0 964 be_tx_stats_update(txo, wrb_cnt, copied, gso_segs, stopped);
c190e3c8
AK
965 } else {
966 txq->head = start;
967 dev_kfree_skb_any(skb);
6b7c5b94 968 }
6b7c5b94
SP
969 return NETDEV_TX_OK;
970}
971
972static int be_change_mtu(struct net_device *netdev, int new_mtu)
973{
974 struct be_adapter *adapter = netdev_priv(netdev);
975 if (new_mtu < BE_MIN_MTU ||
34a89b8c
AK
976 new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
977 (ETH_HLEN + ETH_FCS_LEN))) {
6b7c5b94
SP
978 dev_info(&adapter->pdev->dev,
979 "MTU must be between %d and %d bytes\n",
34a89b8c
AK
980 BE_MIN_MTU,
981 (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
6b7c5b94
SP
982 return -EINVAL;
983 }
984 dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
985 netdev->mtu, new_mtu);
986 netdev->mtu = new_mtu;
987 return 0;
988}
989
990/*
82903e4b
AK
991 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
992 * If the user configures more, place BE in vlan promiscuous mode.
6b7c5b94 993 */
10329df8 994static int be_vid_config(struct be_adapter *adapter)
6b7c5b94 995{
10329df8
SP
996 u16 vids[BE_NUM_VLANS_SUPPORTED];
997 u16 num = 0, i;
82903e4b 998 int status = 0;
1da87b7f 999
c0e64ef4
SP
1000 /* No need to further configure vids if in promiscuous mode */
1001 if (adapter->promiscuous)
1002 return 0;
1003
0fc16ebf
PR
1004 if (adapter->vlans_added > adapter->max_vlans)
1005 goto set_vlan_promisc;
1006
1007 /* Construct VLAN Table to give to HW */
1008 for (i = 0; i < VLAN_N_VID; i++)
1009 if (adapter->vlan_tag[i])
10329df8 1010 vids[num++] = cpu_to_le16(i);
0fc16ebf
PR
1011
1012 status = be_cmd_vlan_config(adapter, adapter->if_handle,
10329df8 1013 vids, num, 1, 0);
0fc16ebf
PR
1014
1015 /* Set to VLAN promisc mode as setting VLAN filter failed */
1016 if (status) {
1017 dev_info(&adapter->pdev->dev, "Exhausted VLAN HW filters.\n");
1018 dev_info(&adapter->pdev->dev, "Disabling HW VLAN filtering.\n");
1019 goto set_vlan_promisc;
6b7c5b94 1020 }
1da87b7f 1021
b31c50a7 1022 return status;
0fc16ebf
PR
1023
1024set_vlan_promisc:
1025 status = be_cmd_vlan_config(adapter, adapter->if_handle,
1026 NULL, 0, 1, 1);
1027 return status;
6b7c5b94
SP
1028}
1029
80d5c368 1030static int be_vlan_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
6b7c5b94
SP
1031{
1032 struct be_adapter *adapter = netdev_priv(netdev);
80817cbf 1033 int status = 0;
6b7c5b94 1034
a85e9986 1035 if (!lancer_chip(adapter) && !be_physfn(adapter)) {
80817cbf
AK
1036 status = -EINVAL;
1037 goto ret;
1038 }
ba343c77 1039
a85e9986
PR
1040 /* Packets with VID 0 are always received by Lancer by default */
1041 if (lancer_chip(adapter) && vid == 0)
1042 goto ret;
1043
6b7c5b94 1044 adapter->vlan_tag[vid] = 1;
82903e4b 1045 if (adapter->vlans_added <= (adapter->max_vlans + 1))
10329df8 1046 status = be_vid_config(adapter);
8e586137 1047
80817cbf
AK
1048 if (!status)
1049 adapter->vlans_added++;
1050 else
1051 adapter->vlan_tag[vid] = 0;
1052ret:
1053 return status;
6b7c5b94
SP
1054}
1055
80d5c368 1056static int be_vlan_rem_vid(struct net_device *netdev, __be16 proto, u16 vid)
6b7c5b94
SP
1057{
1058 struct be_adapter *adapter = netdev_priv(netdev);
80817cbf 1059 int status = 0;
6b7c5b94 1060
a85e9986 1061 if (!lancer_chip(adapter) && !be_physfn(adapter)) {
80817cbf
AK
1062 status = -EINVAL;
1063 goto ret;
1064 }
ba343c77 1065
a85e9986
PR
1066 /* Packets with VID 0 are always received by Lancer by default */
1067 if (lancer_chip(adapter) && vid == 0)
1068 goto ret;
1069
6b7c5b94 1070 adapter->vlan_tag[vid] = 0;
82903e4b 1071 if (adapter->vlans_added <= adapter->max_vlans)
10329df8 1072 status = be_vid_config(adapter);
8e586137 1073
80817cbf
AK
1074 if (!status)
1075 adapter->vlans_added--;
1076 else
1077 adapter->vlan_tag[vid] = 1;
1078ret:
1079 return status;
6b7c5b94
SP
1080}
1081
a54769f5 1082static void be_set_rx_mode(struct net_device *netdev)
6b7c5b94
SP
1083{
1084 struct be_adapter *adapter = netdev_priv(netdev);
0fc16ebf 1085 int status;
6b7c5b94 1086
24307eef 1087 if (netdev->flags & IFF_PROMISC) {
5b8821b7 1088 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
24307eef
SP
1089 adapter->promiscuous = true;
1090 goto done;
6b7c5b94
SP
1091 }
1092
25985edc 1093 /* BE was previously in promiscuous mode; disable it */
24307eef
SP
1094 if (adapter->promiscuous) {
1095 adapter->promiscuous = false;
5b8821b7 1096 be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
c0e64ef4
SP
1097
1098 if (adapter->vlans_added)
10329df8 1099 be_vid_config(adapter);
6b7c5b94
SP
1100 }
1101
e7b909a6 1102 /* Enable multicast promisc if num configured exceeds what we support */
4cd24eaf 1103 if (netdev->flags & IFF_ALLMULTI ||
abb93951 1104 netdev_mc_count(netdev) > adapter->max_mcast_mac) {
5b8821b7 1105 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
24307eef 1106 goto done;
6b7c5b94 1107 }
6b7c5b94 1108
fbc13f01
AK
1109 if (netdev_uc_count(netdev) != adapter->uc_macs) {
1110 struct netdev_hw_addr *ha;
1111 int i = 1; /* First slot is claimed by the Primary MAC */
1112
1113 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++) {
1114 be_cmd_pmac_del(adapter, adapter->if_handle,
1115 adapter->pmac_id[i], 0);
1116 }
1117
1118 if (netdev_uc_count(netdev) > adapter->max_pmac_cnt) {
1119 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
1120 adapter->promiscuous = true;
1121 goto done;
1122 }
1123
1124 netdev_for_each_uc_addr(ha, adapter->netdev) {
1125 adapter->uc_macs++; /* First slot is for Primary MAC */
1126 be_cmd_pmac_add(adapter, (u8 *)ha->addr,
1127 adapter->if_handle,
1128 &adapter->pmac_id[adapter->uc_macs], 0);
1129 }
1130 }
1131
0fc16ebf
PR
1132 status = be_cmd_rx_filter(adapter, IFF_MULTICAST, ON);
1133
1134 /* Set to MCAST promisc mode if setting MULTICAST address fails */
1135 if (status) {
1136 dev_info(&adapter->pdev->dev, "Exhausted multicast HW filters.\n");
1137 dev_info(&adapter->pdev->dev, "Disabling HW multicast filtering.\n");
1138 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
1139 }
24307eef
SP
1140done:
1141 return;
6b7c5b94
SP
1142}
1143
ba343c77
SB
1144static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
1145{
1146 struct be_adapter *adapter = netdev_priv(netdev);
11ac75ed 1147 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
ba343c77 1148 int status;
704e4c88
PR
1149 bool active_mac = false;
1150 u32 pmac_id;
1151 u8 old_mac[ETH_ALEN];
ba343c77 1152
11ac75ed 1153 if (!sriov_enabled(adapter))
ba343c77
SB
1154 return -EPERM;
1155
11ac75ed 1156 if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
ba343c77
SB
1157 return -EINVAL;
1158
590c391d 1159 if (lancer_chip(adapter)) {
704e4c88
PR
1160 status = be_cmd_get_mac_from_list(adapter, old_mac, &active_mac,
1161 &pmac_id, vf + 1);
1162 if (!status && active_mac)
1163 be_cmd_pmac_del(adapter, vf_cfg->if_handle,
1164 pmac_id, vf + 1);
1165
590c391d
PR
1166 status = be_cmd_set_mac_list(adapter, mac, 1, vf + 1);
1167 } else {
11ac75ed
SP
1168 status = be_cmd_pmac_del(adapter, vf_cfg->if_handle,
1169 vf_cfg->pmac_id, vf + 1);
ba343c77 1170
11ac75ed
SP
1171 status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
1172 &vf_cfg->pmac_id, vf + 1);
590c391d
PR
1173 }
1174
64600ea5 1175 if (status)
ba343c77
SB
1176 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
1177 mac, vf);
64600ea5 1178 else
11ac75ed 1179 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
64600ea5 1180
ba343c77
SB
1181 return status;
1182}
1183
64600ea5
AK
1184static int be_get_vf_config(struct net_device *netdev, int vf,
1185 struct ifla_vf_info *vi)
1186{
1187 struct be_adapter *adapter = netdev_priv(netdev);
11ac75ed 1188 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
64600ea5 1189
11ac75ed 1190 if (!sriov_enabled(adapter))
64600ea5
AK
1191 return -EPERM;
1192
11ac75ed 1193 if (vf >= adapter->num_vfs)
64600ea5
AK
1194 return -EINVAL;
1195
1196 vi->vf = vf;
11ac75ed
SP
1197 vi->tx_rate = vf_cfg->tx_rate;
1198 vi->vlan = vf_cfg->vlan_tag;
64600ea5 1199 vi->qos = 0;
11ac75ed 1200 memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
64600ea5
AK
1201
1202 return 0;
1203}
1204
1da87b7f
AK
1205static int be_set_vf_vlan(struct net_device *netdev,
1206 int vf, u16 vlan, u8 qos)
1207{
1208 struct be_adapter *adapter = netdev_priv(netdev);
1209 int status = 0;
1210
11ac75ed 1211 if (!sriov_enabled(adapter))
1da87b7f
AK
1212 return -EPERM;
1213
11ac75ed 1214 if (vf >= adapter->num_vfs || vlan > 4095)
1da87b7f
AK
1215 return -EINVAL;
1216
1217 if (vlan) {
f1f3ee1b
AK
1218 if (adapter->vf_cfg[vf].vlan_tag != vlan) {
1219 /* If this is new value, program it. Else skip. */
1220 adapter->vf_cfg[vf].vlan_tag = vlan;
1221
1222 status = be_cmd_set_hsw_config(adapter, vlan,
1223 vf + 1, adapter->vf_cfg[vf].if_handle);
1224 }
1da87b7f 1225 } else {
f1f3ee1b 1226 /* Reset Transparent Vlan Tagging. */
11ac75ed 1227 adapter->vf_cfg[vf].vlan_tag = 0;
f1f3ee1b
AK
1228 vlan = adapter->vf_cfg[vf].def_vid;
1229 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
1230 adapter->vf_cfg[vf].if_handle);
1da87b7f
AK
1231 }
1232
1da87b7f
AK
1233
1234 if (status)
1235 dev_info(&adapter->pdev->dev,
1236 "VLAN %d config on VF %d failed\n", vlan, vf);
1237 return status;
1238}
1239
e1d18735
AK
1240static int be_set_vf_tx_rate(struct net_device *netdev,
1241 int vf, int rate)
1242{
1243 struct be_adapter *adapter = netdev_priv(netdev);
1244 int status = 0;
1245
11ac75ed 1246 if (!sriov_enabled(adapter))
e1d18735
AK
1247 return -EPERM;
1248
94f434c2 1249 if (vf >= adapter->num_vfs)
e1d18735
AK
1250 return -EINVAL;
1251
94f434c2
AK
1252 if (rate < 100 || rate > 10000) {
1253 dev_err(&adapter->pdev->dev,
1254 "tx rate must be between 100 and 10000 Mbps\n");
1255 return -EINVAL;
1256 }
e1d18735 1257
d5c18473
PR
1258 if (lancer_chip(adapter))
1259 status = be_cmd_set_profile_config(adapter, rate / 10, vf + 1);
1260 else
1261 status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
e1d18735
AK
1262
1263 if (status)
94f434c2 1264 dev_err(&adapter->pdev->dev,
e1d18735 1265 "tx rate %d on VF %d failed\n", rate, vf);
94f434c2
AK
1266 else
1267 adapter->vf_cfg[vf].tx_rate = rate;
e1d18735
AK
1268 return status;
1269}
1270
10ef9ab4 1271static void be_eqd_update(struct be_adapter *adapter, struct be_eq_obj *eqo)
6b7c5b94 1272{
10ef9ab4 1273 struct be_rx_stats *stats = rx_stats(&adapter->rx_obj[eqo->idx]);
4097f663 1274 ulong now = jiffies;
ac124ff9 1275 ulong delta = now - stats->rx_jiffies;
ab1594e9
SP
1276 u64 pkts;
1277 unsigned int start, eqd;
ac124ff9 1278
10ef9ab4
SP
1279 if (!eqo->enable_aic) {
1280 eqd = eqo->eqd;
1281 goto modify_eqd;
1282 }
1283
1284 if (eqo->idx >= adapter->num_rx_qs)
ac124ff9 1285 return;
6b7c5b94 1286
10ef9ab4
SP
1287 stats = rx_stats(&adapter->rx_obj[eqo->idx]);
1288
4097f663 1289 /* Wrapped around */
3abcdeda
SP
1290 if (time_before(now, stats->rx_jiffies)) {
1291 stats->rx_jiffies = now;
4097f663
SP
1292 return;
1293 }
6b7c5b94 1294
ac124ff9
SP
1295 /* Update once a second */
1296 if (delta < HZ)
6b7c5b94
SP
1297 return;
1298
ab1594e9
SP
1299 do {
1300 start = u64_stats_fetch_begin_bh(&stats->sync);
1301 pkts = stats->rx_pkts;
1302 } while (u64_stats_fetch_retry_bh(&stats->sync, start));
1303
68c3e5a7 1304 stats->rx_pps = (unsigned long)(pkts - stats->rx_pkts_prev) / (delta / HZ);
ab1594e9 1305 stats->rx_pkts_prev = pkts;
3abcdeda 1306 stats->rx_jiffies = now;
10ef9ab4
SP
1307 eqd = (stats->rx_pps / 110000) << 3;
1308 eqd = min(eqd, eqo->max_eqd);
1309 eqd = max(eqd, eqo->min_eqd);
ac124ff9
SP
1310 if (eqd < 10)
1311 eqd = 0;
10ef9ab4
SP
1312
1313modify_eqd:
1314 if (eqd != eqo->cur_eqd) {
1315 be_cmd_modify_eqd(adapter, eqo->q.id, eqd);
1316 eqo->cur_eqd = eqd;
ac124ff9 1317 }
6b7c5b94
SP
1318}
1319
3abcdeda 1320static void be_rx_stats_update(struct be_rx_obj *rxo,
2e588f84 1321 struct be_rx_compl_info *rxcp)
4097f663 1322{
ac124ff9 1323 struct be_rx_stats *stats = rx_stats(rxo);
1ef78abe 1324
ab1594e9 1325 u64_stats_update_begin(&stats->sync);
3abcdeda 1326 stats->rx_compl++;
2e588f84 1327 stats->rx_bytes += rxcp->pkt_size;
3abcdeda 1328 stats->rx_pkts++;
2e588f84 1329 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
3abcdeda 1330 stats->rx_mcast_pkts++;
2e588f84 1331 if (rxcp->err)
ac124ff9 1332 stats->rx_compl_err++;
ab1594e9 1333 u64_stats_update_end(&stats->sync);
4097f663
SP
1334}
1335
2e588f84 1336static inline bool csum_passed(struct be_rx_compl_info *rxcp)
728a9972 1337{
19fad86f
PR
1338 /* L4 checksum is not reliable for non TCP/UDP packets.
1339 * Also ignore ipcksm for ipv6 pkts */
2e588f84
SP
1340 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
1341 (rxcp->ip_csum || rxcp->ipv6);
728a9972
AK
1342}
1343
10ef9ab4
SP
1344static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo,
1345 u16 frag_idx)
6b7c5b94 1346{
10ef9ab4 1347 struct be_adapter *adapter = rxo->adapter;
6b7c5b94 1348 struct be_rx_page_info *rx_page_info;
3abcdeda 1349 struct be_queue_info *rxq = &rxo->q;
6b7c5b94 1350
3abcdeda 1351 rx_page_info = &rxo->page_info_tbl[frag_idx];
6b7c5b94
SP
1352 BUG_ON(!rx_page_info->page);
1353
205859a2 1354 if (rx_page_info->last_page_user) {
2b7bcebf
IV
1355 dma_unmap_page(&adapter->pdev->dev,
1356 dma_unmap_addr(rx_page_info, bus),
1357 adapter->big_page_size, DMA_FROM_DEVICE);
205859a2
AK
1358 rx_page_info->last_page_user = false;
1359 }
6b7c5b94
SP
1360
1361 atomic_dec(&rxq->used);
1362 return rx_page_info;
1363}
1364
1365/* Throwaway the data in the Rx completion */
10ef9ab4
SP
1366static void be_rx_compl_discard(struct be_rx_obj *rxo,
1367 struct be_rx_compl_info *rxcp)
6b7c5b94 1368{
3abcdeda 1369 struct be_queue_info *rxq = &rxo->q;
6b7c5b94 1370 struct be_rx_page_info *page_info;
2e588f84 1371 u16 i, num_rcvd = rxcp->num_rcvd;
6b7c5b94 1372
e80d9da6 1373 for (i = 0; i < num_rcvd; i++) {
10ef9ab4 1374 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
e80d9da6
PR
1375 put_page(page_info->page);
1376 memset(page_info, 0, sizeof(*page_info));
2e588f84 1377 index_inc(&rxcp->rxq_idx, rxq->len);
6b7c5b94
SP
1378 }
1379}
1380
1381/*
1382 * skb_fill_rx_data forms a complete skb for an ether frame
1383 * indicated by rxcp.
1384 */
10ef9ab4
SP
1385static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
1386 struct be_rx_compl_info *rxcp)
6b7c5b94 1387{
3abcdeda 1388 struct be_queue_info *rxq = &rxo->q;
6b7c5b94 1389 struct be_rx_page_info *page_info;
2e588f84
SP
1390 u16 i, j;
1391 u16 hdr_len, curr_frag_len, remaining;
6b7c5b94 1392 u8 *start;
6b7c5b94 1393
10ef9ab4 1394 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
6b7c5b94
SP
1395 start = page_address(page_info->page) + page_info->page_offset;
1396 prefetch(start);
1397
1398 /* Copy data in the first descriptor of this completion */
2e588f84 1399 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
6b7c5b94 1400
6b7c5b94
SP
1401 skb->len = curr_frag_len;
1402 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
ac1ae5f3 1403 memcpy(skb->data, start, curr_frag_len);
6b7c5b94
SP
1404 /* Complete packet has now been moved to data */
1405 put_page(page_info->page);
1406 skb->data_len = 0;
1407 skb->tail += curr_frag_len;
1408 } else {
ac1ae5f3
ED
1409 hdr_len = ETH_HLEN;
1410 memcpy(skb->data, start, hdr_len);
6b7c5b94 1411 skb_shinfo(skb)->nr_frags = 1;
b061b39e 1412 skb_frag_set_page(skb, 0, page_info->page);
6b7c5b94
SP
1413 skb_shinfo(skb)->frags[0].page_offset =
1414 page_info->page_offset + hdr_len;
9e903e08 1415 skb_frag_size_set(&skb_shinfo(skb)->frags[0], curr_frag_len - hdr_len);
6b7c5b94 1416 skb->data_len = curr_frag_len - hdr_len;
bdb28a97 1417 skb->truesize += rx_frag_size;
6b7c5b94
SP
1418 skb->tail += hdr_len;
1419 }
205859a2 1420 page_info->page = NULL;
6b7c5b94 1421
2e588f84
SP
1422 if (rxcp->pkt_size <= rx_frag_size) {
1423 BUG_ON(rxcp->num_rcvd != 1);
1424 return;
6b7c5b94
SP
1425 }
1426
1427 /* More frags present for this completion */
2e588f84
SP
1428 index_inc(&rxcp->rxq_idx, rxq->len);
1429 remaining = rxcp->pkt_size - curr_frag_len;
1430 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
10ef9ab4 1431 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
2e588f84 1432 curr_frag_len = min(remaining, rx_frag_size);
6b7c5b94 1433
bd46cb6c
AK
1434 /* Coalesce all frags from the same physical page in one slot */
1435 if (page_info->page_offset == 0) {
1436 /* Fresh page */
1437 j++;
b061b39e 1438 skb_frag_set_page(skb, j, page_info->page);
bd46cb6c
AK
1439 skb_shinfo(skb)->frags[j].page_offset =
1440 page_info->page_offset;
9e903e08 1441 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
bd46cb6c
AK
1442 skb_shinfo(skb)->nr_frags++;
1443 } else {
1444 put_page(page_info->page);
1445 }
1446
9e903e08 1447 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
6b7c5b94
SP
1448 skb->len += curr_frag_len;
1449 skb->data_len += curr_frag_len;
bdb28a97 1450 skb->truesize += rx_frag_size;
2e588f84
SP
1451 remaining -= curr_frag_len;
1452 index_inc(&rxcp->rxq_idx, rxq->len);
205859a2 1453 page_info->page = NULL;
6b7c5b94 1454 }
bd46cb6c 1455 BUG_ON(j > MAX_SKB_FRAGS);
6b7c5b94
SP
1456}
1457
5be93b9a 1458/* Process the RX completion indicated by rxcp when GRO is disabled */
10ef9ab4
SP
1459static void be_rx_compl_process(struct be_rx_obj *rxo,
1460 struct be_rx_compl_info *rxcp)
6b7c5b94 1461{
10ef9ab4 1462 struct be_adapter *adapter = rxo->adapter;
6332c8d3 1463 struct net_device *netdev = adapter->netdev;
6b7c5b94 1464 struct sk_buff *skb;
89420424 1465
bb349bb4 1466 skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE);
a058a632 1467 if (unlikely(!skb)) {
ac124ff9 1468 rx_stats(rxo)->rx_drops_no_skbs++;
10ef9ab4 1469 be_rx_compl_discard(rxo, rxcp);
6b7c5b94
SP
1470 return;
1471 }
1472
10ef9ab4 1473 skb_fill_rx_data(rxo, skb, rxcp);
6b7c5b94 1474
6332c8d3 1475 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
728a9972 1476 skb->ip_summed = CHECKSUM_UNNECESSARY;
c6ce2f4b
SK
1477 else
1478 skb_checksum_none_assert(skb);
6b7c5b94 1479
6332c8d3 1480 skb->protocol = eth_type_trans(skb, netdev);
aaa6daec 1481 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
10ef9ab4 1482 if (netdev->features & NETIF_F_RXHASH)
4b972914
AK
1483 skb->rxhash = rxcp->rss_hash;
1484
6b7c5b94 1485
343e43c0 1486 if (rxcp->vlanf)
86a9bad3 1487 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
4c5102f9
AK
1488
1489 netif_receive_skb(skb);
6b7c5b94
SP
1490}
1491
5be93b9a 1492/* Process the RX completion indicated by rxcp when GRO is enabled */
10ef9ab4
SP
1493void be_rx_compl_process_gro(struct be_rx_obj *rxo, struct napi_struct *napi,
1494 struct be_rx_compl_info *rxcp)
6b7c5b94 1495{
10ef9ab4 1496 struct be_adapter *adapter = rxo->adapter;
6b7c5b94 1497 struct be_rx_page_info *page_info;
5be93b9a 1498 struct sk_buff *skb = NULL;
3abcdeda 1499 struct be_queue_info *rxq = &rxo->q;
2e588f84
SP
1500 u16 remaining, curr_frag_len;
1501 u16 i, j;
3968fa1e 1502
10ef9ab4 1503 skb = napi_get_frags(napi);
5be93b9a 1504 if (!skb) {
10ef9ab4 1505 be_rx_compl_discard(rxo, rxcp);
5be93b9a
AK
1506 return;
1507 }
1508
2e588f84
SP
1509 remaining = rxcp->pkt_size;
1510 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
10ef9ab4 1511 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
6b7c5b94
SP
1512
1513 curr_frag_len = min(remaining, rx_frag_size);
1514
bd46cb6c
AK
1515 /* Coalesce all frags from the same physical page in one slot */
1516 if (i == 0 || page_info->page_offset == 0) {
1517 /* First frag or Fresh page */
1518 j++;
b061b39e 1519 skb_frag_set_page(skb, j, page_info->page);
5be93b9a
AK
1520 skb_shinfo(skb)->frags[j].page_offset =
1521 page_info->page_offset;
9e903e08 1522 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
bd46cb6c
AK
1523 } else {
1524 put_page(page_info->page);
1525 }
9e903e08 1526 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
bdb28a97 1527 skb->truesize += rx_frag_size;
bd46cb6c 1528 remaining -= curr_frag_len;
2e588f84 1529 index_inc(&rxcp->rxq_idx, rxq->len);
6b7c5b94
SP
1530 memset(page_info, 0, sizeof(*page_info));
1531 }
bd46cb6c 1532 BUG_ON(j > MAX_SKB_FRAGS);
6b7c5b94 1533
5be93b9a 1534 skb_shinfo(skb)->nr_frags = j + 1;
2e588f84
SP
1535 skb->len = rxcp->pkt_size;
1536 skb->data_len = rxcp->pkt_size;
5be93b9a 1537 skb->ip_summed = CHECKSUM_UNNECESSARY;
aaa6daec 1538 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
4b972914
AK
1539 if (adapter->netdev->features & NETIF_F_RXHASH)
1540 skb->rxhash = rxcp->rss_hash;
5be93b9a 1541
343e43c0 1542 if (rxcp->vlanf)
86a9bad3 1543 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
4c5102f9 1544
10ef9ab4 1545 napi_gro_frags(napi);
2e588f84
SP
1546}
1547
10ef9ab4
SP
1548static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
1549 struct be_rx_compl_info *rxcp)
2e588f84
SP
1550{
1551 rxcp->pkt_size =
1552 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1553 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1554 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1555 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
9ecb42fd 1556 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
2e588f84
SP
1557 rxcp->ip_csum =
1558 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1559 rxcp->l4_csum =
1560 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1561 rxcp->ipv6 =
1562 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
1563 rxcp->rxq_idx =
1564 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, fragndx, compl);
1565 rxcp->num_rcvd =
1566 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1567 rxcp->pkt_type =
1568 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
4b972914 1569 rxcp->rss_hash =
c297977e 1570 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, compl);
15d72184
SP
1571 if (rxcp->vlanf) {
1572 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm,
3c709f8f
DM
1573 compl);
1574 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
1575 compl);
15d72184 1576 }
12004ae9 1577 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, port, compl);
2e588f84
SP
1578}
1579
10ef9ab4
SP
1580static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
1581 struct be_rx_compl_info *rxcp)
2e588f84
SP
1582{
1583 rxcp->pkt_size =
1584 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1585 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1586 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1587 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
9ecb42fd 1588 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
2e588f84
SP
1589 rxcp->ip_csum =
1590 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1591 rxcp->l4_csum =
1592 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1593 rxcp->ipv6 =
1594 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
1595 rxcp->rxq_idx =
1596 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, fragndx, compl);
1597 rxcp->num_rcvd =
1598 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1599 rxcp->pkt_type =
1600 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
4b972914 1601 rxcp->rss_hash =
c297977e 1602 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, compl);
15d72184
SP
1603 if (rxcp->vlanf) {
1604 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm,
3c709f8f
DM
1605 compl);
1606 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
1607 compl);
15d72184 1608 }
12004ae9 1609 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, port, compl);
e38b1706
SK
1610 rxcp->ip_frag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0,
1611 ip_frag, compl);
2e588f84
SP
1612}
1613
1614static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1615{
1616 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1617 struct be_rx_compl_info *rxcp = &rxo->rxcp;
1618 struct be_adapter *adapter = rxo->adapter;
6b7c5b94 1619
2e588f84
SP
1620 /* For checking the valid bit it is Ok to use either definition as the
1621 * valid bit is at the same position in both v0 and v1 Rx compl */
1622 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
1623 return NULL;
6b7c5b94 1624
2e588f84
SP
1625 rmb();
1626 be_dws_le_to_cpu(compl, sizeof(*compl));
6b7c5b94 1627
2e588f84 1628 if (adapter->be3_native)
10ef9ab4 1629 be_parse_rx_compl_v1(compl, rxcp);
2e588f84 1630 else
10ef9ab4 1631 be_parse_rx_compl_v0(compl, rxcp);
6b7c5b94 1632
e38b1706
SK
1633 if (rxcp->ip_frag)
1634 rxcp->l4_csum = 0;
1635
15d72184
SP
1636 if (rxcp->vlanf) {
1637 /* vlanf could be wrongly set in some cards.
1638 * ignore if vtm is not set */
752961a1 1639 if ((adapter->function_mode & FLEX10_MODE) && !rxcp->vtm)
15d72184 1640 rxcp->vlanf = 0;
6b7c5b94 1641
15d72184 1642 if (!lancer_chip(adapter))
3c709f8f 1643 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
6b7c5b94 1644
939cf306 1645 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
3c709f8f 1646 !adapter->vlan_tag[rxcp->vlan_tag])
15d72184
SP
1647 rxcp->vlanf = 0;
1648 }
2e588f84
SP
1649
1650 /* As the compl has been parsed, reset it; we wont touch it again */
1651 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
6b7c5b94 1652
3abcdeda 1653 queue_tail_inc(&rxo->cq);
6b7c5b94
SP
1654 return rxcp;
1655}
1656
1829b086 1657static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
6b7c5b94 1658{
6b7c5b94 1659 u32 order = get_order(size);
1829b086 1660
6b7c5b94 1661 if (order > 0)
1829b086
ED
1662 gfp |= __GFP_COMP;
1663 return alloc_pages(gfp, order);
6b7c5b94
SP
1664}
1665
1666/*
1667 * Allocate a page, split it to fragments of size rx_frag_size and post as
1668 * receive buffers to BE
1669 */
1829b086 1670static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
6b7c5b94 1671{
3abcdeda 1672 struct be_adapter *adapter = rxo->adapter;
26d92f92 1673 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
3abcdeda 1674 struct be_queue_info *rxq = &rxo->q;
6b7c5b94
SP
1675 struct page *pagep = NULL;
1676 struct be_eth_rx_d *rxd;
1677 u64 page_dmaaddr = 0, frag_dmaaddr;
1678 u32 posted, page_offset = 0;
1679
3abcdeda 1680 page_info = &rxo->page_info_tbl[rxq->head];
6b7c5b94
SP
1681 for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1682 if (!pagep) {
1829b086 1683 pagep = be_alloc_pages(adapter->big_page_size, gfp);
6b7c5b94 1684 if (unlikely(!pagep)) {
ac124ff9 1685 rx_stats(rxo)->rx_post_fail++;
6b7c5b94
SP
1686 break;
1687 }
2b7bcebf
IV
1688 page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
1689 0, adapter->big_page_size,
1690 DMA_FROM_DEVICE);
6b7c5b94
SP
1691 page_info->page_offset = 0;
1692 } else {
1693 get_page(pagep);
1694 page_info->page_offset = page_offset + rx_frag_size;
1695 }
1696 page_offset = page_info->page_offset;
1697 page_info->page = pagep;
fac6da5b 1698 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
6b7c5b94
SP
1699 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1700
1701 rxd = queue_head_node(rxq);
1702 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1703 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
6b7c5b94
SP
1704
1705 /* Any space left in the current big page for another frag? */
1706 if ((page_offset + rx_frag_size + rx_frag_size) >
1707 adapter->big_page_size) {
1708 pagep = NULL;
1709 page_info->last_page_user = true;
1710 }
26d92f92
SP
1711
1712 prev_page_info = page_info;
1713 queue_head_inc(rxq);
10ef9ab4 1714 page_info = &rxo->page_info_tbl[rxq->head];
6b7c5b94
SP
1715 }
1716 if (pagep)
26d92f92 1717 prev_page_info->last_page_user = true;
6b7c5b94
SP
1718
1719 if (posted) {
6b7c5b94 1720 atomic_add(posted, &rxq->used);
8788fdc2 1721 be_rxq_notify(adapter, rxq->id, posted);
ea1dae11
SP
1722 } else if (atomic_read(&rxq->used) == 0) {
1723 /* Let be_worker replenish when memory is available */
3abcdeda 1724 rxo->rx_post_starved = true;
6b7c5b94 1725 }
6b7c5b94
SP
1726}
1727
5fb379ee 1728static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
6b7c5b94 1729{
6b7c5b94
SP
1730 struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1731
1732 if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1733 return NULL;
1734
f3eb62d2 1735 rmb();
6b7c5b94
SP
1736 be_dws_le_to_cpu(txcp, sizeof(*txcp));
1737
1738 txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1739
1740 queue_tail_inc(tx_cq);
1741 return txcp;
1742}
1743
3c8def97
SP
1744static u16 be_tx_compl_process(struct be_adapter *adapter,
1745 struct be_tx_obj *txo, u16 last_index)
6b7c5b94 1746{
3c8def97 1747 struct be_queue_info *txq = &txo->q;
a73b796e 1748 struct be_eth_wrb *wrb;
3c8def97 1749 struct sk_buff **sent_skbs = txo->sent_skb_list;
6b7c5b94 1750 struct sk_buff *sent_skb;
ec43b1a6
SP
1751 u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1752 bool unmap_skb_hdr = true;
6b7c5b94 1753
ec43b1a6 1754 sent_skb = sent_skbs[txq->tail];
6b7c5b94 1755 BUG_ON(!sent_skb);
ec43b1a6
SP
1756 sent_skbs[txq->tail] = NULL;
1757
1758 /* skip header wrb */
a73b796e 1759 queue_tail_inc(txq);
6b7c5b94 1760
ec43b1a6 1761 do {
6b7c5b94 1762 cur_index = txq->tail;
a73b796e 1763 wrb = queue_tail_node(txq);
2b7bcebf
IV
1764 unmap_tx_frag(&adapter->pdev->dev, wrb,
1765 (unmap_skb_hdr && skb_headlen(sent_skb)));
ec43b1a6
SP
1766 unmap_skb_hdr = false;
1767
6b7c5b94
SP
1768 num_wrbs++;
1769 queue_tail_inc(txq);
ec43b1a6 1770 } while (cur_index != last_index);
6b7c5b94 1771
6b7c5b94 1772 kfree_skb(sent_skb);
4d586b82 1773 return num_wrbs;
6b7c5b94
SP
1774}
1775
10ef9ab4
SP
1776/* Return the number of events in the event queue */
1777static inline int events_get(struct be_eq_obj *eqo)
859b1e4e 1778{
10ef9ab4
SP
1779 struct be_eq_entry *eqe;
1780 int num = 0;
859b1e4e 1781
10ef9ab4
SP
1782 do {
1783 eqe = queue_tail_node(&eqo->q);
1784 if (eqe->evt == 0)
1785 break;
859b1e4e 1786
10ef9ab4
SP
1787 rmb();
1788 eqe->evt = 0;
1789 num++;
1790 queue_tail_inc(&eqo->q);
1791 } while (true);
1792
1793 return num;
859b1e4e
SP
1794}
1795
10ef9ab4
SP
1796/* Leaves the EQ is disarmed state */
1797static void be_eq_clean(struct be_eq_obj *eqo)
859b1e4e 1798{
10ef9ab4 1799 int num = events_get(eqo);
859b1e4e 1800
10ef9ab4 1801 be_eq_notify(eqo->adapter, eqo->q.id, false, true, num);
859b1e4e
SP
1802}
1803
10ef9ab4 1804static void be_rx_cq_clean(struct be_rx_obj *rxo)
6b7c5b94
SP
1805{
1806 struct be_rx_page_info *page_info;
3abcdeda
SP
1807 struct be_queue_info *rxq = &rxo->q;
1808 struct be_queue_info *rx_cq = &rxo->cq;
2e588f84 1809 struct be_rx_compl_info *rxcp;
d23e946c
SP
1810 struct be_adapter *adapter = rxo->adapter;
1811 int flush_wait = 0;
6b7c5b94
SP
1812 u16 tail;
1813
d23e946c
SP
1814 /* Consume pending rx completions.
1815 * Wait for the flush completion (identified by zero num_rcvd)
1816 * to arrive. Notify CQ even when there are no more CQ entries
1817 * for HW to flush partially coalesced CQ entries.
1818 * In Lancer, there is no need to wait for flush compl.
1819 */
1820 for (;;) {
1821 rxcp = be_rx_compl_get(rxo);
1822 if (rxcp == NULL) {
1823 if (lancer_chip(adapter))
1824 break;
1825
1826 if (flush_wait++ > 10 || be_hw_error(adapter)) {
1827 dev_warn(&adapter->pdev->dev,
1828 "did not receive flush compl\n");
1829 break;
1830 }
1831 be_cq_notify(adapter, rx_cq->id, true, 0);
1832 mdelay(1);
1833 } else {
1834 be_rx_compl_discard(rxo, rxcp);
3f5dffe6 1835 be_cq_notify(adapter, rx_cq->id, false, 1);
d23e946c
SP
1836 if (rxcp->num_rcvd == 0)
1837 break;
1838 }
6b7c5b94
SP
1839 }
1840
d23e946c
SP
1841 /* After cleanup, leave the CQ in unarmed state */
1842 be_cq_notify(adapter, rx_cq->id, false, 0);
1843
1844 /* Then free posted rx buffers that were not used */
6b7c5b94 1845 tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
cdab23b7 1846 for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
10ef9ab4 1847 page_info = get_rx_page_info(rxo, tail);
6b7c5b94
SP
1848 put_page(page_info->page);
1849 memset(page_info, 0, sizeof(*page_info));
1850 }
1851 BUG_ON(atomic_read(&rxq->used));
482c9e79 1852 rxq->tail = rxq->head = 0;
6b7c5b94
SP
1853}
1854
0ae57bb3 1855static void be_tx_compl_clean(struct be_adapter *adapter)
6b7c5b94 1856{
0ae57bb3
SP
1857 struct be_tx_obj *txo;
1858 struct be_queue_info *txq;
a8e9179a 1859 struct be_eth_tx_compl *txcp;
4d586b82 1860 u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
b03388d6
SP
1861 struct sk_buff *sent_skb;
1862 bool dummy_wrb;
0ae57bb3 1863 int i, pending_txqs;
a8e9179a
SP
1864
1865 /* Wait for a max of 200ms for all the tx-completions to arrive. */
1866 do {
0ae57bb3
SP
1867 pending_txqs = adapter->num_tx_qs;
1868
1869 for_all_tx_queues(adapter, txo, i) {
1870 txq = &txo->q;
1871 while ((txcp = be_tx_compl_get(&txo->cq))) {
1872 end_idx =
1873 AMAP_GET_BITS(struct amap_eth_tx_compl,
1874 wrb_index, txcp);
1875 num_wrbs += be_tx_compl_process(adapter, txo,
1876 end_idx);
1877 cmpl++;
1878 }
1879 if (cmpl) {
1880 be_cq_notify(adapter, txo->cq.id, false, cmpl);
1881 atomic_sub(num_wrbs, &txq->used);
1882 cmpl = 0;
1883 num_wrbs = 0;
1884 }
1885 if (atomic_read(&txq->used) == 0)
1886 pending_txqs--;
a8e9179a
SP
1887 }
1888
0ae57bb3 1889 if (pending_txqs == 0 || ++timeo > 200)
a8e9179a
SP
1890 break;
1891
1892 mdelay(1);
1893 } while (true);
1894
0ae57bb3
SP
1895 for_all_tx_queues(adapter, txo, i) {
1896 txq = &txo->q;
1897 if (atomic_read(&txq->used))
1898 dev_err(&adapter->pdev->dev, "%d pending tx-compls\n",
1899 atomic_read(&txq->used));
1900
1901 /* free posted tx for which compls will never arrive */
1902 while (atomic_read(&txq->used)) {
1903 sent_skb = txo->sent_skb_list[txq->tail];
1904 end_idx = txq->tail;
1905 num_wrbs = wrb_cnt_for_skb(adapter, sent_skb,
1906 &dummy_wrb);
1907 index_adv(&end_idx, num_wrbs - 1, txq->len);
1908 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
1909 atomic_sub(num_wrbs, &txq->used);
1910 }
b03388d6 1911 }
6b7c5b94
SP
1912}
1913
10ef9ab4
SP
1914static void be_evt_queues_destroy(struct be_adapter *adapter)
1915{
1916 struct be_eq_obj *eqo;
1917 int i;
1918
1919 for_all_evt_queues(adapter, eqo, i) {
19d59aa7
PR
1920 if (eqo->q.created) {
1921 be_eq_clean(eqo);
10ef9ab4 1922 be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
19d59aa7 1923 }
10ef9ab4
SP
1924 be_queue_free(adapter, &eqo->q);
1925 }
1926}
1927
1928static int be_evt_queues_create(struct be_adapter *adapter)
1929{
1930 struct be_queue_info *eq;
1931 struct be_eq_obj *eqo;
1932 int i, rc;
1933
1934 adapter->num_evt_qs = num_irqs(adapter);
1935
1936 for_all_evt_queues(adapter, eqo, i) {
1937 eqo->adapter = adapter;
1938 eqo->tx_budget = BE_TX_BUDGET;
1939 eqo->idx = i;
1940 eqo->max_eqd = BE_MAX_EQD;
1941 eqo->enable_aic = true;
1942
1943 eq = &eqo->q;
1944 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1945 sizeof(struct be_eq_entry));
1946 if (rc)
1947 return rc;
1948
1949 rc = be_cmd_eq_create(adapter, eq, eqo->cur_eqd);
1950 if (rc)
1951 return rc;
1952 }
1cfafab9 1953 return 0;
10ef9ab4
SP
1954}
1955
5fb379ee
SP
1956static void be_mcc_queues_destroy(struct be_adapter *adapter)
1957{
1958 struct be_queue_info *q;
5fb379ee 1959
8788fdc2 1960 q = &adapter->mcc_obj.q;
5fb379ee 1961 if (q->created)
8788fdc2 1962 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
5fb379ee
SP
1963 be_queue_free(adapter, q);
1964
8788fdc2 1965 q = &adapter->mcc_obj.cq;
5fb379ee 1966 if (q->created)
8788fdc2 1967 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
5fb379ee
SP
1968 be_queue_free(adapter, q);
1969}
1970
1971/* Must be called only after TX qs are created as MCC shares TX EQ */
1972static int be_mcc_queues_create(struct be_adapter *adapter)
1973{
1974 struct be_queue_info *q, *cq;
5fb379ee 1975
8788fdc2 1976 cq = &adapter->mcc_obj.cq;
5fb379ee 1977 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
efd2e40a 1978 sizeof(struct be_mcc_compl)))
5fb379ee
SP
1979 goto err;
1980
10ef9ab4
SP
1981 /* Use the default EQ for MCC completions */
1982 if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0))
5fb379ee
SP
1983 goto mcc_cq_free;
1984
8788fdc2 1985 q = &adapter->mcc_obj.q;
5fb379ee
SP
1986 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
1987 goto mcc_cq_destroy;
1988
8788fdc2 1989 if (be_cmd_mccq_create(adapter, q, cq))
5fb379ee
SP
1990 goto mcc_q_free;
1991
1992 return 0;
1993
1994mcc_q_free:
1995 be_queue_free(adapter, q);
1996mcc_cq_destroy:
8788fdc2 1997 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
5fb379ee
SP
1998mcc_cq_free:
1999 be_queue_free(adapter, cq);
2000err:
2001 return -1;
2002}
2003
6b7c5b94
SP
2004static void be_tx_queues_destroy(struct be_adapter *adapter)
2005{
2006 struct be_queue_info *q;
3c8def97
SP
2007 struct be_tx_obj *txo;
2008 u8 i;
6b7c5b94 2009
3c8def97
SP
2010 for_all_tx_queues(adapter, txo, i) {
2011 q = &txo->q;
2012 if (q->created)
2013 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
2014 be_queue_free(adapter, q);
6b7c5b94 2015
3c8def97
SP
2016 q = &txo->cq;
2017 if (q->created)
2018 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2019 be_queue_free(adapter, q);
2020 }
6b7c5b94
SP
2021}
2022
dafc0fe3
SP
2023static int be_num_txqs_want(struct be_adapter *adapter)
2024{
abb93951
PR
2025 if ((!lancer_chip(adapter) && sriov_want(adapter)) ||
2026 be_is_mc(adapter) ||
2027 (!lancer_chip(adapter) && !be_physfn(adapter)) ||
ca34fe38 2028 BE2_chip(adapter))
dafc0fe3
SP
2029 return 1;
2030 else
abb93951 2031 return adapter->max_tx_queues;
dafc0fe3
SP
2032}
2033
10ef9ab4 2034static int be_tx_cqs_create(struct be_adapter *adapter)
6b7c5b94 2035{
10ef9ab4
SP
2036 struct be_queue_info *cq, *eq;
2037 int status;
3c8def97
SP
2038 struct be_tx_obj *txo;
2039 u8 i;
6b7c5b94 2040
dafc0fe3 2041 adapter->num_tx_qs = be_num_txqs_want(adapter);
3bb62f4f
PR
2042 if (adapter->num_tx_qs != MAX_TX_QS) {
2043 rtnl_lock();
dafc0fe3
SP
2044 netif_set_real_num_tx_queues(adapter->netdev,
2045 adapter->num_tx_qs);
3bb62f4f
PR
2046 rtnl_unlock();
2047 }
dafc0fe3 2048
10ef9ab4
SP
2049 for_all_tx_queues(adapter, txo, i) {
2050 cq = &txo->cq;
2051 status = be_queue_alloc(adapter, cq, TX_CQ_LEN,
2052 sizeof(struct be_eth_tx_compl));
2053 if (status)
2054 return status;
3c8def97 2055
10ef9ab4
SP
2056 /* If num_evt_qs is less than num_tx_qs, then more than
2057 * one txq share an eq
2058 */
2059 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2060 status = be_cmd_cq_create(adapter, cq, eq, false, 3);
2061 if (status)
2062 return status;
2063 }
2064 return 0;
2065}
6b7c5b94 2066
10ef9ab4
SP
2067static int be_tx_qs_create(struct be_adapter *adapter)
2068{
2069 struct be_tx_obj *txo;
2070 int i, status;
fe6d2a38 2071
3c8def97 2072 for_all_tx_queues(adapter, txo, i) {
10ef9ab4
SP
2073 status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
2074 sizeof(struct be_eth_wrb));
2075 if (status)
2076 return status;
6b7c5b94 2077
94d73aaa 2078 status = be_cmd_txq_create(adapter, txo);
10ef9ab4
SP
2079 if (status)
2080 return status;
3c8def97 2081 }
6b7c5b94 2082
d379142b
SP
2083 dev_info(&adapter->pdev->dev, "created %d TX queue(s)\n",
2084 adapter->num_tx_qs);
10ef9ab4 2085 return 0;
6b7c5b94
SP
2086}
2087
10ef9ab4 2088static void be_rx_cqs_destroy(struct be_adapter *adapter)
6b7c5b94
SP
2089{
2090 struct be_queue_info *q;
3abcdeda
SP
2091 struct be_rx_obj *rxo;
2092 int i;
2093
2094 for_all_rx_queues(adapter, rxo, i) {
3abcdeda
SP
2095 q = &rxo->cq;
2096 if (q->created)
2097 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2098 be_queue_free(adapter, q);
ac6a0c4a
SP
2099 }
2100}
2101
10ef9ab4 2102static int be_rx_cqs_create(struct be_adapter *adapter)
6b7c5b94 2103{
10ef9ab4 2104 struct be_queue_info *eq, *cq;
3abcdeda
SP
2105 struct be_rx_obj *rxo;
2106 int rc, i;
6b7c5b94 2107
10ef9ab4
SP
2108 /* We'll create as many RSS rings as there are irqs.
2109 * But when there's only one irq there's no use creating RSS rings
2110 */
2111 adapter->num_rx_qs = (num_irqs(adapter) > 1) ?
2112 num_irqs(adapter) + 1 : 1;
7f640062
SP
2113 if (adapter->num_rx_qs != MAX_RX_QS) {
2114 rtnl_lock();
2115 netif_set_real_num_rx_queues(adapter->netdev,
2116 adapter->num_rx_qs);
2117 rtnl_unlock();
2118 }
ac6a0c4a 2119
6b7c5b94 2120 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
3abcdeda
SP
2121 for_all_rx_queues(adapter, rxo, i) {
2122 rxo->adapter = adapter;
3abcdeda
SP
2123 cq = &rxo->cq;
2124 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
2125 sizeof(struct be_eth_rx_compl));
2126 if (rc)
10ef9ab4 2127 return rc;
3abcdeda 2128
10ef9ab4
SP
2129 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2130 rc = be_cmd_cq_create(adapter, cq, eq, false, 3);
3abcdeda 2131 if (rc)
10ef9ab4 2132 return rc;
3abcdeda 2133 }
6b7c5b94 2134
d379142b
SP
2135 dev_info(&adapter->pdev->dev,
2136 "created %d RSS queue(s) and 1 default RX queue\n",
2137 adapter->num_rx_qs - 1);
10ef9ab4 2138 return 0;
b628bde2
SP
2139}
2140
6b7c5b94
SP
2141static irqreturn_t be_intx(int irq, void *dev)
2142{
e49cc34f
SP
2143 struct be_eq_obj *eqo = dev;
2144 struct be_adapter *adapter = eqo->adapter;
2145 int num_evts = 0;
6b7c5b94 2146
d0b9cec3
SP
2147 /* IRQ is not expected when NAPI is scheduled as the EQ
2148 * will not be armed.
2149 * But, this can happen on Lancer INTx where it takes
2150 * a while to de-assert INTx or in BE2 where occasionaly
2151 * an interrupt may be raised even when EQ is unarmed.
2152 * If NAPI is already scheduled, then counting & notifying
2153 * events will orphan them.
e49cc34f 2154 */
d0b9cec3 2155 if (napi_schedule_prep(&eqo->napi)) {
e49cc34f 2156 num_evts = events_get(eqo);
d0b9cec3
SP
2157 __napi_schedule(&eqo->napi);
2158 if (num_evts)
2159 eqo->spurious_intr = 0;
2160 }
2161 be_eq_notify(adapter, eqo->q.id, false, true, num_evts);
e49cc34f 2162
d0b9cec3
SP
2163 /* Return IRQ_HANDLED only for the the first spurious intr
2164 * after a valid intr to stop the kernel from branding
2165 * this irq as a bad one!
e49cc34f 2166 */
d0b9cec3
SP
2167 if (num_evts || eqo->spurious_intr++ == 0)
2168 return IRQ_HANDLED;
2169 else
2170 return IRQ_NONE;
6b7c5b94
SP
2171}
2172
10ef9ab4 2173static irqreturn_t be_msix(int irq, void *dev)
6b7c5b94 2174{
10ef9ab4 2175 struct be_eq_obj *eqo = dev;
6b7c5b94 2176
0b545a62
SP
2177 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
2178 napi_schedule(&eqo->napi);
6b7c5b94
SP
2179 return IRQ_HANDLED;
2180}
2181
2e588f84 2182static inline bool do_gro(struct be_rx_compl_info *rxcp)
6b7c5b94 2183{
e38b1706 2184 return (rxcp->tcpf && !rxcp->err && rxcp->l4_csum) ? true : false;
6b7c5b94
SP
2185}
2186
10ef9ab4
SP
2187static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
2188 int budget)
6b7c5b94 2189{
3abcdeda
SP
2190 struct be_adapter *adapter = rxo->adapter;
2191 struct be_queue_info *rx_cq = &rxo->cq;
2e588f84 2192 struct be_rx_compl_info *rxcp;
6b7c5b94
SP
2193 u32 work_done;
2194
2195 for (work_done = 0; work_done < budget; work_done++) {
3abcdeda 2196 rxcp = be_rx_compl_get(rxo);
6b7c5b94
SP
2197 if (!rxcp)
2198 break;
2199
12004ae9
SP
2200 /* Is it a flush compl that has no data */
2201 if (unlikely(rxcp->num_rcvd == 0))
2202 goto loop_continue;
2203
2204 /* Discard compl with partial DMA Lancer B0 */
2205 if (unlikely(!rxcp->pkt_size)) {
10ef9ab4 2206 be_rx_compl_discard(rxo, rxcp);
12004ae9
SP
2207 goto loop_continue;
2208 }
2209
2210 /* On BE drop pkts that arrive due to imperfect filtering in
2211 * promiscuous mode on some skews
2212 */
2213 if (unlikely(rxcp->port != adapter->port_num &&
2214 !lancer_chip(adapter))) {
10ef9ab4 2215 be_rx_compl_discard(rxo, rxcp);
12004ae9 2216 goto loop_continue;
64642811 2217 }
009dd872 2218
12004ae9 2219 if (do_gro(rxcp))
10ef9ab4 2220 be_rx_compl_process_gro(rxo, napi, rxcp);
12004ae9 2221 else
10ef9ab4 2222 be_rx_compl_process(rxo, rxcp);
12004ae9 2223loop_continue:
2e588f84 2224 be_rx_stats_update(rxo, rxcp);
6b7c5b94
SP
2225 }
2226
10ef9ab4
SP
2227 if (work_done) {
2228 be_cq_notify(adapter, rx_cq->id, true, work_done);
9372cacb 2229
10ef9ab4
SP
2230 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
2231 be_post_rx_frags(rxo, GFP_ATOMIC);
6b7c5b94 2232 }
10ef9ab4 2233
6b7c5b94
SP
2234 return work_done;
2235}
2236
10ef9ab4
SP
2237static bool be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
2238 int budget, int idx)
6b7c5b94 2239{
6b7c5b94 2240 struct be_eth_tx_compl *txcp;
10ef9ab4 2241 int num_wrbs = 0, work_done;
3c8def97 2242
10ef9ab4
SP
2243 for (work_done = 0; work_done < budget; work_done++) {
2244 txcp = be_tx_compl_get(&txo->cq);
2245 if (!txcp)
2246 break;
2247 num_wrbs += be_tx_compl_process(adapter, txo,
3c8def97
SP
2248 AMAP_GET_BITS(struct amap_eth_tx_compl,
2249 wrb_index, txcp));
10ef9ab4 2250 }
6b7c5b94 2251
10ef9ab4
SP
2252 if (work_done) {
2253 be_cq_notify(adapter, txo->cq.id, true, work_done);
2254 atomic_sub(num_wrbs, &txo->q.used);
3c8def97 2255
10ef9ab4
SP
2256 /* As Tx wrbs have been freed up, wake up netdev queue
2257 * if it was stopped due to lack of tx wrbs. */
2258 if (__netif_subqueue_stopped(adapter->netdev, idx) &&
2259 atomic_read(&txo->q.used) < txo->q.len / 2) {
2260 netif_wake_subqueue(adapter->netdev, idx);
3c8def97 2261 }
10ef9ab4
SP
2262
2263 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
2264 tx_stats(txo)->tx_compl += work_done;
2265 u64_stats_update_end(&tx_stats(txo)->sync_compl);
6b7c5b94 2266 }
10ef9ab4
SP
2267 return (work_done < budget); /* Done */
2268}
6b7c5b94 2269
10ef9ab4
SP
2270int be_poll(struct napi_struct *napi, int budget)
2271{
2272 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2273 struct be_adapter *adapter = eqo->adapter;
0b545a62 2274 int max_work = 0, work, i, num_evts;
10ef9ab4 2275 bool tx_done;
f31e50a8 2276
0b545a62
SP
2277 num_evts = events_get(eqo);
2278
10ef9ab4
SP
2279 /* Process all TXQs serviced by this EQ */
2280 for (i = eqo->idx; i < adapter->num_tx_qs; i += adapter->num_evt_qs) {
2281 tx_done = be_process_tx(adapter, &adapter->tx_obj[i],
2282 eqo->tx_budget, i);
2283 if (!tx_done)
2284 max_work = budget;
f31e50a8
SP
2285 }
2286
10ef9ab4
SP
2287 /* This loop will iterate twice for EQ0 in which
2288 * completions of the last RXQ (default one) are also processed
2289 * For other EQs the loop iterates only once
2290 */
2291 for (i = eqo->idx; i < adapter->num_rx_qs; i += adapter->num_evt_qs) {
2292 work = be_process_rx(&adapter->rx_obj[i], napi, budget);
2293 max_work = max(work, max_work);
2294 }
6b7c5b94 2295
10ef9ab4
SP
2296 if (is_mcc_eqo(eqo))
2297 be_process_mcc(adapter);
93c86700 2298
10ef9ab4
SP
2299 if (max_work < budget) {
2300 napi_complete(napi);
0b545a62 2301 be_eq_notify(adapter, eqo->q.id, true, false, num_evts);
10ef9ab4
SP
2302 } else {
2303 /* As we'll continue in polling mode, count and clear events */
0b545a62 2304 be_eq_notify(adapter, eqo->q.id, false, false, num_evts);
93c86700 2305 }
10ef9ab4 2306 return max_work;
6b7c5b94
SP
2307}
2308
f67ef7ba 2309void be_detect_error(struct be_adapter *adapter)
7c185276 2310{
e1cfb67a
PR
2311 u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
2312 u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
7c185276
AK
2313 u32 i;
2314
d23e946c 2315 if (be_hw_error(adapter))
72f02485
SP
2316 return;
2317
e1cfb67a
PR
2318 if (lancer_chip(adapter)) {
2319 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2320 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2321 sliport_err1 = ioread32(adapter->db +
2322 SLIPORT_ERROR1_OFFSET);
2323 sliport_err2 = ioread32(adapter->db +
2324 SLIPORT_ERROR2_OFFSET);
2325 }
2326 } else {
2327 pci_read_config_dword(adapter->pdev,
2328 PCICFG_UE_STATUS_LOW, &ue_lo);
2329 pci_read_config_dword(adapter->pdev,
2330 PCICFG_UE_STATUS_HIGH, &ue_hi);
2331 pci_read_config_dword(adapter->pdev,
2332 PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask);
2333 pci_read_config_dword(adapter->pdev,
2334 PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
2335
f67ef7ba
PR
2336 ue_lo = (ue_lo & ~ue_lo_mask);
2337 ue_hi = (ue_hi & ~ue_hi_mask);
e1cfb67a 2338 }
7c185276 2339
1451ae6e
AK
2340 /* On certain platforms BE hardware can indicate spurious UEs.
2341 * Allow the h/w to stop working completely in case of a real UE.
2342 * Hence not setting the hw_error for UE detection.
2343 */
2344 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
f67ef7ba 2345 adapter->hw_error = true;
434b3648 2346 dev_err(&adapter->pdev->dev,
f67ef7ba
PR
2347 "Error detected in the card\n");
2348 }
2349
2350 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2351 dev_err(&adapter->pdev->dev,
2352 "ERR: sliport status 0x%x\n", sliport_status);
2353 dev_err(&adapter->pdev->dev,
2354 "ERR: sliport error1 0x%x\n", sliport_err1);
2355 dev_err(&adapter->pdev->dev,
2356 "ERR: sliport error2 0x%x\n", sliport_err2);
d053de91
AK
2357 }
2358
e1cfb67a
PR
2359 if (ue_lo) {
2360 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
2361 if (ue_lo & 1)
7c185276
AK
2362 dev_err(&adapter->pdev->dev,
2363 "UE: %s bit set\n", ue_status_low_desc[i]);
2364 }
2365 }
f67ef7ba 2366
e1cfb67a
PR
2367 if (ue_hi) {
2368 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
2369 if (ue_hi & 1)
7c185276
AK
2370 dev_err(&adapter->pdev->dev,
2371 "UE: %s bit set\n", ue_status_hi_desc[i]);
2372 }
2373 }
2374
2375}
2376
8d56ff11
SP
2377static void be_msix_disable(struct be_adapter *adapter)
2378{
ac6a0c4a 2379 if (msix_enabled(adapter)) {
8d56ff11 2380 pci_disable_msix(adapter->pdev);
ac6a0c4a 2381 adapter->num_msix_vec = 0;
3abcdeda
SP
2382 }
2383}
2384
10ef9ab4
SP
2385static uint be_num_rss_want(struct be_adapter *adapter)
2386{
30e80b55 2387 u32 num = 0;
abb93951 2388
10ef9ab4 2389 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
abb93951
PR
2390 (lancer_chip(adapter) ||
2391 (!sriov_want(adapter) && be_physfn(adapter)))) {
2392 num = adapter->max_rss_queues;
30e80b55
YM
2393 num = min_t(u32, num, (u32)netif_get_num_default_rss_queues());
2394 }
2395 return num;
10ef9ab4
SP
2396}
2397
c2bba3df 2398static int be_msix_enable(struct be_adapter *adapter)
6b7c5b94 2399{
10ef9ab4 2400#define BE_MIN_MSIX_VECTORS 1
045508a8 2401 int i, status, num_vec, num_roce_vec = 0;
d379142b 2402 struct device *dev = &adapter->pdev->dev;
6b7c5b94 2403
10ef9ab4
SP
2404 /* If RSS queues are not used, need a vec for default RX Q */
2405 num_vec = min(be_num_rss_want(adapter), num_online_cpus());
045508a8
PP
2406 if (be_roce_supported(adapter)) {
2407 num_roce_vec = min_t(u32, MAX_ROCE_MSIX_VECTORS,
2408 (num_online_cpus() + 1));
2409 num_roce_vec = min(num_roce_vec, MAX_ROCE_EQS);
2410 num_vec += num_roce_vec;
2411 num_vec = min(num_vec, MAX_MSIX_VECTORS);
2412 }
10ef9ab4 2413 num_vec = max(num_vec, BE_MIN_MSIX_VECTORS);
3abcdeda 2414
ac6a0c4a 2415 for (i = 0; i < num_vec; i++)
6b7c5b94
SP
2416 adapter->msix_entries[i].entry = i;
2417
ac6a0c4a 2418 status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
3abcdeda
SP
2419 if (status == 0) {
2420 goto done;
2421 } else if (status >= BE_MIN_MSIX_VECTORS) {
ac6a0c4a 2422 num_vec = status;
c2bba3df
SK
2423 status = pci_enable_msix(adapter->pdev, adapter->msix_entries,
2424 num_vec);
2425 if (!status)
3abcdeda 2426 goto done;
3abcdeda 2427 }
d379142b
SP
2428
2429 dev_warn(dev, "MSIx enable failed\n");
c2bba3df
SK
2430 /* INTx is not supported in VFs, so fail probe if enable_msix fails */
2431 if (!be_physfn(adapter))
2432 return status;
2433 return 0;
3abcdeda 2434done:
045508a8
PP
2435 if (be_roce_supported(adapter)) {
2436 if (num_vec > num_roce_vec) {
2437 adapter->num_msix_vec = num_vec - num_roce_vec;
2438 adapter->num_msix_roce_vec =
2439 num_vec - adapter->num_msix_vec;
2440 } else {
2441 adapter->num_msix_vec = num_vec;
2442 adapter->num_msix_roce_vec = 0;
2443 }
2444 } else
2445 adapter->num_msix_vec = num_vec;
d379142b 2446 dev_info(dev, "enabled %d MSI-x vector(s)\n", adapter->num_msix_vec);
c2bba3df 2447 return 0;
6b7c5b94
SP
2448}
2449
fe6d2a38 2450static inline int be_msix_vec_get(struct be_adapter *adapter,
10ef9ab4 2451 struct be_eq_obj *eqo)
b628bde2 2452{
10ef9ab4 2453 return adapter->msix_entries[eqo->idx].vector;
b628bde2 2454}
6b7c5b94 2455
b628bde2
SP
2456static int be_msix_register(struct be_adapter *adapter)
2457{
10ef9ab4
SP
2458 struct net_device *netdev = adapter->netdev;
2459 struct be_eq_obj *eqo;
2460 int status, i, vec;
6b7c5b94 2461
10ef9ab4
SP
2462 for_all_evt_queues(adapter, eqo, i) {
2463 sprintf(eqo->desc, "%s-q%d", netdev->name, i);
2464 vec = be_msix_vec_get(adapter, eqo);
2465 status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
3abcdeda
SP
2466 if (status)
2467 goto err_msix;
2468 }
b628bde2 2469
6b7c5b94 2470 return 0;
3abcdeda 2471err_msix:
10ef9ab4
SP
2472 for (i--, eqo = &adapter->eq_obj[i]; i >= 0; i--, eqo--)
2473 free_irq(be_msix_vec_get(adapter, eqo), eqo);
2474 dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
2475 status);
ac6a0c4a 2476 be_msix_disable(adapter);
6b7c5b94
SP
2477 return status;
2478}
2479
2480static int be_irq_register(struct be_adapter *adapter)
2481{
2482 struct net_device *netdev = adapter->netdev;
2483 int status;
2484
ac6a0c4a 2485 if (msix_enabled(adapter)) {
6b7c5b94
SP
2486 status = be_msix_register(adapter);
2487 if (status == 0)
2488 goto done;
ba343c77
SB
2489 /* INTx is not supported for VF */
2490 if (!be_physfn(adapter))
2491 return status;
6b7c5b94
SP
2492 }
2493
e49cc34f 2494 /* INTx: only the first EQ is used */
6b7c5b94
SP
2495 netdev->irq = adapter->pdev->irq;
2496 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
e49cc34f 2497 &adapter->eq_obj[0]);
6b7c5b94
SP
2498 if (status) {
2499 dev_err(&adapter->pdev->dev,
2500 "INTx request IRQ failed - err %d\n", status);
2501 return status;
2502 }
2503done:
2504 adapter->isr_registered = true;
2505 return 0;
2506}
2507
2508static void be_irq_unregister(struct be_adapter *adapter)
2509{
2510 struct net_device *netdev = adapter->netdev;
10ef9ab4 2511 struct be_eq_obj *eqo;
3abcdeda 2512 int i;
6b7c5b94
SP
2513
2514 if (!adapter->isr_registered)
2515 return;
2516
2517 /* INTx */
ac6a0c4a 2518 if (!msix_enabled(adapter)) {
e49cc34f 2519 free_irq(netdev->irq, &adapter->eq_obj[0]);
6b7c5b94
SP
2520 goto done;
2521 }
2522
2523 /* MSIx */
10ef9ab4
SP
2524 for_all_evt_queues(adapter, eqo, i)
2525 free_irq(be_msix_vec_get(adapter, eqo), eqo);
3abcdeda 2526
6b7c5b94
SP
2527done:
2528 adapter->isr_registered = false;
6b7c5b94
SP
2529}
2530
10ef9ab4 2531static void be_rx_qs_destroy(struct be_adapter *adapter)
482c9e79
SP
2532{
2533 struct be_queue_info *q;
2534 struct be_rx_obj *rxo;
2535 int i;
2536
2537 for_all_rx_queues(adapter, rxo, i) {
2538 q = &rxo->q;
2539 if (q->created) {
2540 be_cmd_rxq_destroy(adapter, q);
10ef9ab4 2541 be_rx_cq_clean(rxo);
482c9e79 2542 }
10ef9ab4 2543 be_queue_free(adapter, q);
482c9e79
SP
2544 }
2545}
2546
889cd4b2
SP
2547static int be_close(struct net_device *netdev)
2548{
2549 struct be_adapter *adapter = netdev_priv(netdev);
10ef9ab4
SP
2550 struct be_eq_obj *eqo;
2551 int i;
889cd4b2 2552
045508a8
PP
2553 be_roce_dev_close(adapter);
2554
04d3d624
SK
2555 if (adapter->flags & BE_FLAGS_NAPI_ENABLED) {
2556 for_all_evt_queues(adapter, eqo, i)
2557 napi_disable(&eqo->napi);
2558 adapter->flags &= ~BE_FLAGS_NAPI_ENABLED;
2559 }
a323d9bf
SP
2560
2561 be_async_mcc_disable(adapter);
2562
2563 /* Wait for all pending tx completions to arrive so that
2564 * all tx skbs are freed.
2565 */
2566 be_tx_compl_clean(adapter);
fba87559 2567 netif_tx_disable(netdev);
a323d9bf
SP
2568
2569 be_rx_qs_destroy(adapter);
2570
2571 for_all_evt_queues(adapter, eqo, i) {
10ef9ab4
SP
2572 if (msix_enabled(adapter))
2573 synchronize_irq(be_msix_vec_get(adapter, eqo));
2574 else
2575 synchronize_irq(netdev->irq);
2576 be_eq_clean(eqo);
63fcb27f
PR
2577 }
2578
889cd4b2
SP
2579 be_irq_unregister(adapter);
2580
482c9e79
SP
2581 return 0;
2582}
2583
10ef9ab4 2584static int be_rx_qs_create(struct be_adapter *adapter)
482c9e79
SP
2585{
2586 struct be_rx_obj *rxo;
e9008ee9
PR
2587 int rc, i, j;
2588 u8 rsstable[128];
482c9e79
SP
2589
2590 for_all_rx_queues(adapter, rxo, i) {
10ef9ab4
SP
2591 rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
2592 sizeof(struct be_eth_rx_d));
2593 if (rc)
2594 return rc;
2595 }
2596
2597 /* The FW would like the default RXQ to be created first */
2598 rxo = default_rxo(adapter);
2599 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id, rx_frag_size,
2600 adapter->if_handle, false, &rxo->rss_id);
2601 if (rc)
2602 return rc;
2603
2604 for_all_rss_queues(adapter, rxo, i) {
482c9e79 2605 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
10ef9ab4
SP
2606 rx_frag_size, adapter->if_handle,
2607 true, &rxo->rss_id);
482c9e79
SP
2608 if (rc)
2609 return rc;
2610 }
2611
2612 if (be_multi_rxq(adapter)) {
e9008ee9
PR
2613 for (j = 0; j < 128; j += adapter->num_rx_qs - 1) {
2614 for_all_rss_queues(adapter, rxo, i) {
2615 if ((j + i) >= 128)
2616 break;
2617 rsstable[j + i] = rxo->rss_id;
2618 }
2619 }
594ad54a
SR
2620 adapter->rss_flags = RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_IPV4 |
2621 RSS_ENABLE_TCP_IPV6 | RSS_ENABLE_IPV6;
2622
2623 if (!BEx_chip(adapter))
2624 adapter->rss_flags |= RSS_ENABLE_UDP_IPV4 |
2625 RSS_ENABLE_UDP_IPV6;
2626
2627 rc = be_cmd_rss_config(adapter, rsstable, adapter->rss_flags,
2628 128);
2629 if (rc) {
2630 adapter->rss_flags = 0;
482c9e79 2631 return rc;
594ad54a 2632 }
482c9e79
SP
2633 }
2634
2635 /* First time posting */
10ef9ab4 2636 for_all_rx_queues(adapter, rxo, i)
482c9e79 2637 be_post_rx_frags(rxo, GFP_KERNEL);
889cd4b2
SP
2638 return 0;
2639}
2640
6b7c5b94
SP
2641static int be_open(struct net_device *netdev)
2642{
2643 struct be_adapter *adapter = netdev_priv(netdev);
10ef9ab4 2644 struct be_eq_obj *eqo;
3abcdeda 2645 struct be_rx_obj *rxo;
10ef9ab4 2646 struct be_tx_obj *txo;
b236916a 2647 u8 link_status;
3abcdeda 2648 int status, i;
5fb379ee 2649
10ef9ab4 2650 status = be_rx_qs_create(adapter);
482c9e79
SP
2651 if (status)
2652 goto err;
2653
c2bba3df
SK
2654 status = be_irq_register(adapter);
2655 if (status)
2656 goto err;
5fb379ee 2657
10ef9ab4 2658 for_all_rx_queues(adapter, rxo, i)
3abcdeda 2659 be_cq_notify(adapter, rxo->cq.id, true, 0);
5fb379ee 2660
10ef9ab4
SP
2661 for_all_tx_queues(adapter, txo, i)
2662 be_cq_notify(adapter, txo->cq.id, true, 0);
2663
7a1e9b20
SP
2664 be_async_mcc_enable(adapter);
2665
10ef9ab4
SP
2666 for_all_evt_queues(adapter, eqo, i) {
2667 napi_enable(&eqo->napi);
2668 be_eq_notify(adapter, eqo->q.id, true, false, 0);
2669 }
04d3d624 2670 adapter->flags |= BE_FLAGS_NAPI_ENABLED;
10ef9ab4 2671
323ff71e 2672 status = be_cmd_link_status_query(adapter, NULL, &link_status, 0);
b236916a
AK
2673 if (!status)
2674 be_link_status_update(adapter, link_status);
2675
fba87559 2676 netif_tx_start_all_queues(netdev);
045508a8 2677 be_roce_dev_open(adapter);
889cd4b2
SP
2678 return 0;
2679err:
2680 be_close(adapter->netdev);
2681 return -EIO;
5fb379ee
SP
2682}
2683
71d8d1b5
AK
2684static int be_setup_wol(struct be_adapter *adapter, bool enable)
2685{
2686 struct be_dma_mem cmd;
2687 int status = 0;
2688 u8 mac[ETH_ALEN];
2689
2690 memset(mac, 0, ETH_ALEN);
2691
2692 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
2b7bcebf 2693 cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
1f9061d2 2694 GFP_KERNEL | __GFP_ZERO);
71d8d1b5
AK
2695 if (cmd.va == NULL)
2696 return -1;
71d8d1b5
AK
2697
2698 if (enable) {
2699 status = pci_write_config_dword(adapter->pdev,
2700 PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2701 if (status) {
2702 dev_err(&adapter->pdev->dev,
2381a55c 2703 "Could not enable Wake-on-lan\n");
2b7bcebf
IV
2704 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2705 cmd.dma);
71d8d1b5
AK
2706 return status;
2707 }
2708 status = be_cmd_enable_magic_wol(adapter,
2709 adapter->netdev->dev_addr, &cmd);
2710 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2711 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2712 } else {
2713 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2714 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2715 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2716 }
2717
2b7bcebf 2718 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
71d8d1b5
AK
2719 return status;
2720}
2721
6d87f5c3
AK
2722/*
2723 * Generate a seed MAC address from the PF MAC Address using jhash.
2724 * MAC Address for VFs are assigned incrementally starting from the seed.
2725 * These addresses are programmed in the ASIC by the PF and the VF driver
2726 * queries for the MAC address during its probe.
2727 */
4c876616 2728static int be_vf_eth_addr_config(struct be_adapter *adapter)
6d87f5c3 2729{
f9449ab7 2730 u32 vf;
3abcdeda 2731 int status = 0;
6d87f5c3 2732 u8 mac[ETH_ALEN];
11ac75ed 2733 struct be_vf_cfg *vf_cfg;
6d87f5c3
AK
2734
2735 be_vf_eth_addr_generate(adapter, mac);
2736
11ac75ed 2737 for_all_vfs(adapter, vf_cfg, vf) {
590c391d
PR
2738 if (lancer_chip(adapter)) {
2739 status = be_cmd_set_mac_list(adapter, mac, 1, vf + 1);
2740 } else {
2741 status = be_cmd_pmac_add(adapter, mac,
11ac75ed
SP
2742 vf_cfg->if_handle,
2743 &vf_cfg->pmac_id, vf + 1);
590c391d
PR
2744 }
2745
6d87f5c3
AK
2746 if (status)
2747 dev_err(&adapter->pdev->dev,
590c391d 2748 "Mac address assignment failed for VF %d\n", vf);
6d87f5c3 2749 else
11ac75ed 2750 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
6d87f5c3
AK
2751
2752 mac[5] += 1;
2753 }
2754 return status;
2755}
2756
4c876616
SP
2757static int be_vfs_mac_query(struct be_adapter *adapter)
2758{
2759 int status, vf;
2760 u8 mac[ETH_ALEN];
2761 struct be_vf_cfg *vf_cfg;
2762 bool active;
2763
2764 for_all_vfs(adapter, vf_cfg, vf) {
2765 be_cmd_get_mac_from_list(adapter, mac, &active,
2766 &vf_cfg->pmac_id, 0);
2767
2768 status = be_cmd_mac_addr_query(adapter, mac, false,
2769 vf_cfg->if_handle, 0);
2770 if (status)
2771 return status;
2772 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
2773 }
2774 return 0;
2775}
2776
f9449ab7 2777static void be_vf_clear(struct be_adapter *adapter)
6d87f5c3 2778{
11ac75ed 2779 struct be_vf_cfg *vf_cfg;
6d87f5c3
AK
2780 u32 vf;
2781
257a3feb 2782 if (pci_vfs_assigned(adapter->pdev)) {
4c876616
SP
2783 dev_warn(&adapter->pdev->dev,
2784 "VFs are assigned to VMs: not disabling VFs\n");
39f1d94d
SP
2785 goto done;
2786 }
2787
b4c1df93
SP
2788 pci_disable_sriov(adapter->pdev);
2789
11ac75ed 2790 for_all_vfs(adapter, vf_cfg, vf) {
590c391d
PR
2791 if (lancer_chip(adapter))
2792 be_cmd_set_mac_list(adapter, NULL, 0, vf + 1);
2793 else
11ac75ed
SP
2794 be_cmd_pmac_del(adapter, vf_cfg->if_handle,
2795 vf_cfg->pmac_id, vf + 1);
f9449ab7 2796
11ac75ed
SP
2797 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
2798 }
39f1d94d
SP
2799done:
2800 kfree(adapter->vf_cfg);
2801 adapter->num_vfs = 0;
6d87f5c3
AK
2802}
2803
a54769f5
SP
2804static int be_clear(struct be_adapter *adapter)
2805{
fbc13f01
AK
2806 int i = 1;
2807
191eb756
SP
2808 if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) {
2809 cancel_delayed_work_sync(&adapter->work);
2810 adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED;
2811 }
2812
11ac75ed 2813 if (sriov_enabled(adapter))
f9449ab7
SP
2814 be_vf_clear(adapter);
2815
fbc13f01
AK
2816 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++)
2817 be_cmd_pmac_del(adapter, adapter->if_handle,
2818 adapter->pmac_id[i], 0);
2819
f9449ab7 2820 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
a54769f5
SP
2821
2822 be_mcc_queues_destroy(adapter);
10ef9ab4 2823 be_rx_cqs_destroy(adapter);
a54769f5 2824 be_tx_queues_destroy(adapter);
10ef9ab4 2825 be_evt_queues_destroy(adapter);
a54769f5 2826
abb93951
PR
2827 kfree(adapter->pmac_id);
2828 adapter->pmac_id = NULL;
2829
10ef9ab4 2830 be_msix_disable(adapter);
a54769f5
SP
2831 return 0;
2832}
2833
4c876616 2834static int be_vfs_if_create(struct be_adapter *adapter)
abb93951 2835{
4c876616
SP
2836 struct be_vf_cfg *vf_cfg;
2837 u32 cap_flags, en_flags, vf;
abb93951
PR
2838 int status;
2839
4c876616
SP
2840 cap_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2841 BE_IF_FLAGS_MULTICAST;
abb93951 2842
4c876616
SP
2843 for_all_vfs(adapter, vf_cfg, vf) {
2844 if (!BE3_chip(adapter))
a05f99db
VV
2845 be_cmd_get_profile_config(adapter, &cap_flags,
2846 NULL, vf + 1);
4c876616
SP
2847
2848 /* If a FW profile exists, then cap_flags are updated */
2849 en_flags = cap_flags & (BE_IF_FLAGS_UNTAGGED |
2850 BE_IF_FLAGS_BROADCAST | BE_IF_FLAGS_MULTICAST);
2851 status = be_cmd_if_create(adapter, cap_flags, en_flags,
2852 &vf_cfg->if_handle, vf + 1);
2853 if (status)
2854 goto err;
2855 }
2856err:
2857 return status;
abb93951
PR
2858}
2859
39f1d94d 2860static int be_vf_setup_init(struct be_adapter *adapter)
30128031 2861{
11ac75ed 2862 struct be_vf_cfg *vf_cfg;
30128031
SP
2863 int vf;
2864
39f1d94d
SP
2865 adapter->vf_cfg = kcalloc(adapter->num_vfs, sizeof(*vf_cfg),
2866 GFP_KERNEL);
2867 if (!adapter->vf_cfg)
2868 return -ENOMEM;
2869
11ac75ed
SP
2870 for_all_vfs(adapter, vf_cfg, vf) {
2871 vf_cfg->if_handle = -1;
2872 vf_cfg->pmac_id = -1;
30128031 2873 }
39f1d94d 2874 return 0;
30128031
SP
2875}
2876
f9449ab7
SP
2877static int be_vf_setup(struct be_adapter *adapter)
2878{
11ac75ed 2879 struct be_vf_cfg *vf_cfg;
f1f3ee1b 2880 u16 def_vlan, lnk_speed;
4c876616
SP
2881 int status, old_vfs, vf;
2882 struct device *dev = &adapter->pdev->dev;
39f1d94d 2883
257a3feb 2884 old_vfs = pci_num_vf(adapter->pdev);
4c876616
SP
2885 if (old_vfs) {
2886 dev_info(dev, "%d VFs are already enabled\n", old_vfs);
2887 if (old_vfs != num_vfs)
2888 dev_warn(dev, "Ignoring num_vfs=%d setting\n", num_vfs);
2889 adapter->num_vfs = old_vfs;
39f1d94d 2890 } else {
4c876616
SP
2891 if (num_vfs > adapter->dev_num_vfs)
2892 dev_info(dev, "Device supports %d VFs and not %d\n",
2893 adapter->dev_num_vfs, num_vfs);
2894 adapter->num_vfs = min_t(u16, num_vfs, adapter->dev_num_vfs);
b4c1df93 2895 if (!adapter->num_vfs)
4c876616 2896 return 0;
39f1d94d
SP
2897 }
2898
2899 status = be_vf_setup_init(adapter);
2900 if (status)
2901 goto err;
30128031 2902
4c876616
SP
2903 if (old_vfs) {
2904 for_all_vfs(adapter, vf_cfg, vf) {
2905 status = be_cmd_get_if_id(adapter, vf_cfg, vf);
2906 if (status)
2907 goto err;
2908 }
2909 } else {
2910 status = be_vfs_if_create(adapter);
f9449ab7
SP
2911 if (status)
2912 goto err;
f9449ab7
SP
2913 }
2914
4c876616
SP
2915 if (old_vfs) {
2916 status = be_vfs_mac_query(adapter);
2917 if (status)
2918 goto err;
2919 } else {
39f1d94d
SP
2920 status = be_vf_eth_addr_config(adapter);
2921 if (status)
2922 goto err;
2923 }
f9449ab7 2924
11ac75ed 2925 for_all_vfs(adapter, vf_cfg, vf) {
4c876616
SP
2926 /* BE3 FW, by default, caps VF TX-rate to 100mbps.
2927 * Allow full available bandwidth
2928 */
2929 if (BE3_chip(adapter) && !old_vfs)
2930 be_cmd_set_qos(adapter, 1000, vf+1);
2931
2932 status = be_cmd_link_status_query(adapter, &lnk_speed,
2933 NULL, vf + 1);
2934 if (!status)
2935 vf_cfg->tx_rate = lnk_speed;
f1f3ee1b
AK
2936
2937 status = be_cmd_get_hsw_config(adapter, &def_vlan,
4c876616 2938 vf + 1, vf_cfg->if_handle);
f1f3ee1b
AK
2939 if (status)
2940 goto err;
2941 vf_cfg->def_vid = def_vlan;
dcf7ebba
PR
2942
2943 be_cmd_enable_vf(adapter, vf + 1);
f9449ab7 2944 }
b4c1df93
SP
2945
2946 if (!old_vfs) {
2947 status = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
2948 if (status) {
2949 dev_err(dev, "SRIOV enable failed\n");
2950 adapter->num_vfs = 0;
2951 goto err;
2952 }
2953 }
f9449ab7
SP
2954 return 0;
2955err:
4c876616
SP
2956 dev_err(dev, "VF setup failed\n");
2957 be_vf_clear(adapter);
f9449ab7
SP
2958 return status;
2959}
2960
30128031
SP
2961static void be_setup_init(struct be_adapter *adapter)
2962{
2963 adapter->vlan_prio_bmap = 0xff;
42f11cf2 2964 adapter->phy.link_speed = -1;
30128031
SP
2965 adapter->if_handle = -1;
2966 adapter->be3_native = false;
2967 adapter->promiscuous = false;
f25b119c
PR
2968 if (be_physfn(adapter))
2969 adapter->cmd_privileges = MAX_PRIVILEGES;
2970 else
2971 adapter->cmd_privileges = MIN_PRIVILEGES;
30128031
SP
2972}
2973
1578e777
PR
2974static int be_get_mac_addr(struct be_adapter *adapter, u8 *mac, u32 if_handle,
2975 bool *active_mac, u32 *pmac_id)
590c391d 2976{
1578e777 2977 int status = 0;
e5e1ee89 2978
1578e777
PR
2979 if (!is_zero_ether_addr(adapter->netdev->perm_addr)) {
2980 memcpy(mac, adapter->netdev->dev_addr, ETH_ALEN);
2981 if (!lancer_chip(adapter) && !be_physfn(adapter))
2982 *active_mac = true;
2983 else
2984 *active_mac = false;
e5e1ee89 2985
1578e777
PR
2986 return status;
2987 }
e5e1ee89 2988
1578e777
PR
2989 if (lancer_chip(adapter)) {
2990 status = be_cmd_get_mac_from_list(adapter, mac,
2991 active_mac, pmac_id, 0);
2992 if (*active_mac) {
5ee4979b
SP
2993 status = be_cmd_mac_addr_query(adapter, mac, false,
2994 if_handle, *pmac_id);
1578e777
PR
2995 }
2996 } else if (be_physfn(adapter)) {
2997 /* For BE3, for PF get permanent MAC */
5ee4979b 2998 status = be_cmd_mac_addr_query(adapter, mac, true, 0, 0);
1578e777 2999 *active_mac = false;
e5e1ee89 3000 } else {
1578e777 3001 /* For BE3, for VF get soft MAC assigned by PF*/
5ee4979b 3002 status = be_cmd_mac_addr_query(adapter, mac, false,
1578e777
PR
3003 if_handle, 0);
3004 *active_mac = true;
e5e1ee89 3005 }
590c391d
PR
3006 return status;
3007}
3008
abb93951
PR
3009static void be_get_resources(struct be_adapter *adapter)
3010{
4c876616
SP
3011 u16 dev_num_vfs;
3012 int pos, status;
abb93951 3013 bool profile_present = false;
a05f99db 3014 u16 txq_count = 0;
abb93951 3015
4c876616 3016 if (!BEx_chip(adapter)) {
abb93951 3017 status = be_cmd_get_func_config(adapter);
abb93951
PR
3018 if (!status)
3019 profile_present = true;
a05f99db
VV
3020 } else if (BE3_chip(adapter) && be_physfn(adapter)) {
3021 be_cmd_get_profile_config(adapter, NULL, &txq_count, 0);
abb93951
PR
3022 }
3023
3024 if (profile_present) {
3025 /* Sanity fixes for Lancer */
3026 adapter->max_pmac_cnt = min_t(u16, adapter->max_pmac_cnt,
3027 BE_UC_PMAC_COUNT);
3028 adapter->max_vlans = min_t(u16, adapter->max_vlans,
3029 BE_NUM_VLANS_SUPPORTED);
3030 adapter->max_mcast_mac = min_t(u16, adapter->max_mcast_mac,
3031 BE_MAX_MC);
3032 adapter->max_tx_queues = min_t(u16, adapter->max_tx_queues,
3033 MAX_TX_QS);
3034 adapter->max_rss_queues = min_t(u16, adapter->max_rss_queues,
3035 BE3_MAX_RSS_QS);
3036 adapter->max_event_queues = min_t(u16,
3037 adapter->max_event_queues,
3038 BE3_MAX_RSS_QS);
3039
3040 if (adapter->max_rss_queues &&
3041 adapter->max_rss_queues == adapter->max_rx_queues)
3042 adapter->max_rss_queues -= 1;
3043
3044 if (adapter->max_event_queues < adapter->max_rss_queues)
3045 adapter->max_rss_queues = adapter->max_event_queues;
3046
3047 } else {
3048 if (be_physfn(adapter))
3049 adapter->max_pmac_cnt = BE_UC_PMAC_COUNT;
3050 else
3051 adapter->max_pmac_cnt = BE_VF_UC_PMAC_COUNT;
3052
3053 if (adapter->function_mode & FLEX10_MODE)
3054 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
3055 else
3056 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
3057
3058 adapter->max_mcast_mac = BE_MAX_MC;
a05f99db
VV
3059 adapter->max_tx_queues = txq_count ? txq_count : MAX_TX_QS;
3060 adapter->max_tx_queues = min_t(u16, adapter->max_tx_queues,
3061 MAX_TX_QS);
abb93951
PR
3062 adapter->max_rss_queues = (adapter->be3_native) ?
3063 BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
3064 adapter->max_event_queues = BE3_MAX_RSS_QS;
3065
3066 adapter->if_cap_flags = BE_IF_FLAGS_UNTAGGED |
3067 BE_IF_FLAGS_BROADCAST |
3068 BE_IF_FLAGS_MULTICAST |
3069 BE_IF_FLAGS_PASS_L3L4_ERRORS |
3070 BE_IF_FLAGS_MCAST_PROMISCUOUS |
3071 BE_IF_FLAGS_VLAN_PROMISCUOUS |
3072 BE_IF_FLAGS_PROMISCUOUS;
3073
3074 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS)
3075 adapter->if_cap_flags |= BE_IF_FLAGS_RSS;
3076 }
4c876616
SP
3077
3078 pos = pci_find_ext_capability(adapter->pdev, PCI_EXT_CAP_ID_SRIOV);
3079 if (pos) {
3080 pci_read_config_word(adapter->pdev, pos + PCI_SRIOV_TOTAL_VF,
3081 &dev_num_vfs);
3082 if (BE3_chip(adapter))
3083 dev_num_vfs = min_t(u16, dev_num_vfs, MAX_VFS);
3084 adapter->dev_num_vfs = dev_num_vfs;
3085 }
abb93951
PR
3086}
3087
39f1d94d
SP
3088/* Routine to query per function resource limits */
3089static int be_get_config(struct be_adapter *adapter)
3090{
4c876616 3091 int status;
39f1d94d 3092
abb93951
PR
3093 status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
3094 &adapter->function_mode,
0ad3157e
VV
3095 &adapter->function_caps,
3096 &adapter->asic_rev);
abb93951
PR
3097 if (status)
3098 goto err;
3099
3100 be_get_resources(adapter);
3101
3102 /* primary mac needs 1 pmac entry */
3103 adapter->pmac_id = kcalloc(adapter->max_pmac_cnt + 1,
3104 sizeof(u32), GFP_KERNEL);
3105 if (!adapter->pmac_id) {
3106 status = -ENOMEM;
3107 goto err;
3108 }
3109
abb93951
PR
3110err:
3111 return status;
39f1d94d
SP
3112}
3113
5fb379ee
SP
3114static int be_setup(struct be_adapter *adapter)
3115{
39f1d94d 3116 struct device *dev = &adapter->pdev->dev;
abb93951 3117 u32 en_flags;
a54769f5 3118 u32 tx_fc, rx_fc;
10ef9ab4 3119 int status;
ba343c77 3120 u8 mac[ETH_ALEN];
1578e777 3121 bool active_mac;
ba343c77 3122
30128031 3123 be_setup_init(adapter);
6b7c5b94 3124
abb93951
PR
3125 if (!lancer_chip(adapter))
3126 be_cmd_req_native_mode(adapter);
39f1d94d 3127
abb93951
PR
3128 status = be_get_config(adapter);
3129 if (status)
3130 goto err;
73d540f2 3131
c2bba3df
SK
3132 status = be_msix_enable(adapter);
3133 if (status)
3134 goto err;
10ef9ab4
SP
3135
3136 status = be_evt_queues_create(adapter);
3137 if (status)
a54769f5 3138 goto err;
6b7c5b94 3139
10ef9ab4
SP
3140 status = be_tx_cqs_create(adapter);
3141 if (status)
3142 goto err;
3143
3144 status = be_rx_cqs_create(adapter);
3145 if (status)
a54769f5 3146 goto err;
6b7c5b94 3147
f9449ab7 3148 status = be_mcc_queues_create(adapter);
10ef9ab4 3149 if (status)
a54769f5 3150 goto err;
6b7c5b94 3151
f25b119c
PR
3152 be_cmd_get_fn_privileges(adapter, &adapter->cmd_privileges, 0);
3153 /* In UMC mode FW does not return right privileges.
3154 * Override with correct privilege equivalent to PF.
3155 */
3156 if (be_is_mc(adapter))
3157 adapter->cmd_privileges = MAX_PRIVILEGES;
3158
f9449ab7
SP
3159 en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
3160 BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
5d5adb93 3161
abb93951 3162 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS)
f9449ab7 3163 en_flags |= BE_IF_FLAGS_RSS;
1578e777 3164
abb93951 3165 en_flags = en_flags & adapter->if_cap_flags;
0b13fb45 3166
abb93951 3167 status = be_cmd_if_create(adapter, adapter->if_cap_flags, en_flags,
1578e777 3168 &adapter->if_handle, 0);
5fb379ee 3169 if (status != 0)
a54769f5 3170 goto err;
6b7c5b94 3171
1578e777
PR
3172 memset(mac, 0, ETH_ALEN);
3173 active_mac = false;
3174 status = be_get_mac_addr(adapter, mac, adapter->if_handle,
3175 &active_mac, &adapter->pmac_id[0]);
3176 if (status != 0)
3177 goto err;
3178
3179 if (!active_mac) {
3180 status = be_cmd_pmac_add(adapter, mac, adapter->if_handle,
3181 &adapter->pmac_id[0], 0);
3182 if (status != 0)
3183 goto err;
3184 }
3185
3186 if (is_zero_ether_addr(adapter->netdev->dev_addr)) {
3187 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
3188 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
f9449ab7 3189 }
0dffc83e 3190
10ef9ab4
SP
3191 status = be_tx_qs_create(adapter);
3192 if (status)
3193 goto err;
3194
eeb65ced 3195 be_cmd_get_fw_ver(adapter, adapter->fw_ver, adapter->fw_on_flash);
5a56eb10 3196
1d1e9a46 3197 if (adapter->vlans_added)
10329df8 3198 be_vid_config(adapter);
7ab8b0b4 3199
a54769f5 3200 be_set_rx_mode(adapter->netdev);
5fb379ee 3201
ddc3f5cb 3202 be_cmd_get_flow_control(adapter, &tx_fc, &rx_fc);
590c391d 3203
ddc3f5cb
AK
3204 if (rx_fc != adapter->rx_fc || tx_fc != adapter->tx_fc)
3205 be_cmd_set_flow_control(adapter, adapter->tx_fc,
a54769f5 3206 adapter->rx_fc);
2dc1deb6 3207
b4c1df93 3208 if (be_physfn(adapter)) {
39f1d94d
SP
3209 if (adapter->dev_num_vfs)
3210 be_vf_setup(adapter);
3211 else
3212 dev_warn(dev, "device doesn't support SRIOV\n");
f9449ab7
SP
3213 }
3214
f25b119c
PR
3215 status = be_cmd_get_phy_info(adapter);
3216 if (!status && be_pause_supported(adapter))
42f11cf2
AK
3217 adapter->phy.fc_autoneg = 1;
3218
191eb756
SP
3219 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
3220 adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
f9449ab7 3221 return 0;
a54769f5
SP
3222err:
3223 be_clear(adapter);
3224 return status;
3225}
6b7c5b94 3226
66268739
IV
3227#ifdef CONFIG_NET_POLL_CONTROLLER
3228static void be_netpoll(struct net_device *netdev)
3229{
3230 struct be_adapter *adapter = netdev_priv(netdev);
10ef9ab4 3231 struct be_eq_obj *eqo;
66268739
IV
3232 int i;
3233
e49cc34f
SP
3234 for_all_evt_queues(adapter, eqo, i) {
3235 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
3236 napi_schedule(&eqo->napi);
3237 }
10ef9ab4
SP
3238
3239 return;
66268739
IV
3240}
3241#endif
3242
84517482 3243#define FW_FILE_HDR_SIGN "ServerEngines Corp. "
c165541e
PR
3244char flash_cookie[2][16] = {"*** SE FLAS", "H DIRECTORY *** "};
3245
fa9a6fed 3246static bool be_flash_redboot(struct be_adapter *adapter,
3f0d4560
AK
3247 const u8 *p, u32 img_start, int image_size,
3248 int hdr_size)
fa9a6fed
SB
3249{
3250 u32 crc_offset;
3251 u8 flashed_crc[4];
3252 int status;
3f0d4560
AK
3253
3254 crc_offset = hdr_size + img_start + image_size - 4;
3255
fa9a6fed 3256 p += crc_offset;
3f0d4560
AK
3257
3258 status = be_cmd_get_flash_crc(adapter, flashed_crc,
f510fc64 3259 (image_size - 4));
fa9a6fed
SB
3260 if (status) {
3261 dev_err(&adapter->pdev->dev,
3262 "could not get crc from flash, not flashing redboot\n");
3263 return false;
3264 }
3265
3266 /*update redboot only if crc does not match*/
3267 if (!memcmp(flashed_crc, p, 4))
3268 return false;
3269 else
3270 return true;
fa9a6fed
SB
3271}
3272
306f1348
SP
3273static bool phy_flashing_required(struct be_adapter *adapter)
3274{
42f11cf2
AK
3275 return (adapter->phy.phy_type == TN_8022 &&
3276 adapter->phy.interface_type == PHY_TYPE_BASET_10GB);
306f1348
SP
3277}
3278
c165541e
PR
3279static bool is_comp_in_ufi(struct be_adapter *adapter,
3280 struct flash_section_info *fsec, int type)
3281{
3282 int i = 0, img_type = 0;
3283 struct flash_section_info_g2 *fsec_g2 = NULL;
3284
ca34fe38 3285 if (BE2_chip(adapter))
c165541e
PR
3286 fsec_g2 = (struct flash_section_info_g2 *)fsec;
3287
3288 for (i = 0; i < MAX_FLASH_COMP; i++) {
3289 if (fsec_g2)
3290 img_type = le32_to_cpu(fsec_g2->fsec_entry[i].type);
3291 else
3292 img_type = le32_to_cpu(fsec->fsec_entry[i].type);
3293
3294 if (img_type == type)
3295 return true;
3296 }
3297 return false;
3298
3299}
3300
3301struct flash_section_info *get_fsec_info(struct be_adapter *adapter,
3302 int header_size,
3303 const struct firmware *fw)
3304{
3305 struct flash_section_info *fsec = NULL;
3306 const u8 *p = fw->data;
3307
3308 p += header_size;
3309 while (p < (fw->data + fw->size)) {
3310 fsec = (struct flash_section_info *)p;
3311 if (!memcmp(flash_cookie, fsec->cookie, sizeof(flash_cookie)))
3312 return fsec;
3313 p += 32;
3314 }
3315 return NULL;
3316}
3317
773a2d7c
PR
3318static int be_flash(struct be_adapter *adapter, const u8 *img,
3319 struct be_dma_mem *flash_cmd, int optype, int img_size)
3320{
3321 u32 total_bytes = 0, flash_op, num_bytes = 0;
3322 int status = 0;
3323 struct be_cmd_write_flashrom *req = flash_cmd->va;
3324
3325 total_bytes = img_size;
3326 while (total_bytes) {
3327 num_bytes = min_t(u32, 32*1024, total_bytes);
3328
3329 total_bytes -= num_bytes;
3330
3331 if (!total_bytes) {
3332 if (optype == OPTYPE_PHY_FW)
3333 flash_op = FLASHROM_OPER_PHY_FLASH;
3334 else
3335 flash_op = FLASHROM_OPER_FLASH;
3336 } else {
3337 if (optype == OPTYPE_PHY_FW)
3338 flash_op = FLASHROM_OPER_PHY_SAVE;
3339 else
3340 flash_op = FLASHROM_OPER_SAVE;
3341 }
3342
be716446 3343 memcpy(req->data_buf, img, num_bytes);
773a2d7c
PR
3344 img += num_bytes;
3345 status = be_cmd_write_flashrom(adapter, flash_cmd, optype,
3346 flash_op, num_bytes);
3347 if (status) {
3348 if (status == ILLEGAL_IOCTL_REQ &&
3349 optype == OPTYPE_PHY_FW)
3350 break;
3351 dev_err(&adapter->pdev->dev,
3352 "cmd to write to flash rom failed.\n");
3353 return status;
3354 }
3355 }
3356 return 0;
3357}
3358
0ad3157e 3359/* For BE2, BE3 and BE3-R */
ca34fe38 3360static int be_flash_BEx(struct be_adapter *adapter,
c165541e
PR
3361 const struct firmware *fw,
3362 struct be_dma_mem *flash_cmd,
3363 int num_of_images)
3f0d4560 3364
84517482 3365{
3f0d4560 3366 int status = 0, i, filehdr_size = 0;
c165541e 3367 int img_hdrs_size = (num_of_images * sizeof(struct image_hdr));
84517482 3368 const u8 *p = fw->data;
215faf9c 3369 const struct flash_comp *pflashcomp;
773a2d7c 3370 int num_comp, redboot;
c165541e
PR
3371 struct flash_section_info *fsec = NULL;
3372
3373 struct flash_comp gen3_flash_types[] = {
3374 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, OPTYPE_ISCSI_ACTIVE,
3375 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_iSCSI},
3376 { FLASH_REDBOOT_START_g3, OPTYPE_REDBOOT,
3377 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3, IMAGE_BOOT_CODE},
3378 { FLASH_iSCSI_BIOS_START_g3, OPTYPE_BIOS,
3379 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_ISCSI},
3380 { FLASH_PXE_BIOS_START_g3, OPTYPE_PXE_BIOS,
3381 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_PXE},
3382 { FLASH_FCoE_BIOS_START_g3, OPTYPE_FCOE_BIOS,
3383 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_FCoE},
3384 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, OPTYPE_ISCSI_BACKUP,
3385 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_iSCSI},
3386 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, OPTYPE_FCOE_FW_ACTIVE,
3387 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_FCoE},
3388 { FLASH_FCoE_BACKUP_IMAGE_START_g3, OPTYPE_FCOE_FW_BACKUP,
3389 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_FCoE},
3390 { FLASH_NCSI_START_g3, OPTYPE_NCSI_FW,
3391 FLASH_NCSI_IMAGE_MAX_SIZE_g3, IMAGE_NCSI},
3392 { FLASH_PHY_FW_START_g3, OPTYPE_PHY_FW,
3393 FLASH_PHY_FW_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_PHY}
3f0d4560 3394 };
c165541e
PR
3395
3396 struct flash_comp gen2_flash_types[] = {
3397 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, OPTYPE_ISCSI_ACTIVE,
3398 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_iSCSI},
3399 { FLASH_REDBOOT_START_g2, OPTYPE_REDBOOT,
3400 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2, IMAGE_BOOT_CODE},
3401 { FLASH_iSCSI_BIOS_START_g2, OPTYPE_BIOS,
3402 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_ISCSI},
3403 { FLASH_PXE_BIOS_START_g2, OPTYPE_PXE_BIOS,
3404 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_PXE},
3405 { FLASH_FCoE_BIOS_START_g2, OPTYPE_FCOE_BIOS,
3406 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_FCoE},
3407 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, OPTYPE_ISCSI_BACKUP,
3408 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_iSCSI},
3409 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, OPTYPE_FCOE_FW_ACTIVE,
3410 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_FCoE},
3411 { FLASH_FCoE_BACKUP_IMAGE_START_g2, OPTYPE_FCOE_FW_BACKUP,
3412 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_FCoE}
3f0d4560
AK
3413 };
3414
ca34fe38 3415 if (BE3_chip(adapter)) {
3f0d4560
AK
3416 pflashcomp = gen3_flash_types;
3417 filehdr_size = sizeof(struct flash_file_hdr_g3);
215faf9c 3418 num_comp = ARRAY_SIZE(gen3_flash_types);
3f0d4560
AK
3419 } else {
3420 pflashcomp = gen2_flash_types;
3421 filehdr_size = sizeof(struct flash_file_hdr_g2);
215faf9c 3422 num_comp = ARRAY_SIZE(gen2_flash_types);
84517482 3423 }
ca34fe38 3424
c165541e
PR
3425 /* Get flash section info*/
3426 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3427 if (!fsec) {
3428 dev_err(&adapter->pdev->dev,
3429 "Invalid Cookie. UFI corrupted ?\n");
3430 return -1;
3431 }
9fe96934 3432 for (i = 0; i < num_comp; i++) {
c165541e 3433 if (!is_comp_in_ufi(adapter, fsec, pflashcomp[i].img_type))
9fe96934 3434 continue;
c165541e
PR
3435
3436 if ((pflashcomp[i].optype == OPTYPE_NCSI_FW) &&
3437 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
3438 continue;
3439
773a2d7c
PR
3440 if (pflashcomp[i].optype == OPTYPE_PHY_FW &&
3441 !phy_flashing_required(adapter))
306f1348 3442 continue;
c165541e 3443
773a2d7c
PR
3444 if (pflashcomp[i].optype == OPTYPE_REDBOOT) {
3445 redboot = be_flash_redboot(adapter, fw->data,
3446 pflashcomp[i].offset, pflashcomp[i].size,
3447 filehdr_size + img_hdrs_size);
3448 if (!redboot)
3449 continue;
3450 }
c165541e 3451
3f0d4560 3452 p = fw->data;
c165541e 3453 p += filehdr_size + pflashcomp[i].offset + img_hdrs_size;
306f1348
SP
3454 if (p + pflashcomp[i].size > fw->data + fw->size)
3455 return -1;
773a2d7c
PR
3456
3457 status = be_flash(adapter, p, flash_cmd, pflashcomp[i].optype,
3458 pflashcomp[i].size);
3459 if (status) {
3460 dev_err(&adapter->pdev->dev,
3461 "Flashing section type %d failed.\n",
3462 pflashcomp[i].img_type);
3463 return status;
84517482 3464 }
84517482 3465 }
84517482
AK
3466 return 0;
3467}
3468
773a2d7c
PR
3469static int be_flash_skyhawk(struct be_adapter *adapter,
3470 const struct firmware *fw,
3471 struct be_dma_mem *flash_cmd, int num_of_images)
3f0d4560 3472{
773a2d7c
PR
3473 int status = 0, i, filehdr_size = 0;
3474 int img_offset, img_size, img_optype, redboot;
3475 int img_hdrs_size = num_of_images * sizeof(struct image_hdr);
3476 const u8 *p = fw->data;
3477 struct flash_section_info *fsec = NULL;
3478
3479 filehdr_size = sizeof(struct flash_file_hdr_g3);
3480 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3481 if (!fsec) {
3482 dev_err(&adapter->pdev->dev,
3483 "Invalid Cookie. UFI corrupted ?\n");
3484 return -1;
3485 }
3486
3487 for (i = 0; i < le32_to_cpu(fsec->fsec_hdr.num_images); i++) {
3488 img_offset = le32_to_cpu(fsec->fsec_entry[i].offset);
3489 img_size = le32_to_cpu(fsec->fsec_entry[i].pad_size);
3490
3491 switch (le32_to_cpu(fsec->fsec_entry[i].type)) {
3492 case IMAGE_FIRMWARE_iSCSI:
3493 img_optype = OPTYPE_ISCSI_ACTIVE;
3494 break;
3495 case IMAGE_BOOT_CODE:
3496 img_optype = OPTYPE_REDBOOT;
3497 break;
3498 case IMAGE_OPTION_ROM_ISCSI:
3499 img_optype = OPTYPE_BIOS;
3500 break;
3501 case IMAGE_OPTION_ROM_PXE:
3502 img_optype = OPTYPE_PXE_BIOS;
3503 break;
3504 case IMAGE_OPTION_ROM_FCoE:
3505 img_optype = OPTYPE_FCOE_BIOS;
3506 break;
3507 case IMAGE_FIRMWARE_BACKUP_iSCSI:
3508 img_optype = OPTYPE_ISCSI_BACKUP;
3509 break;
3510 case IMAGE_NCSI:
3511 img_optype = OPTYPE_NCSI_FW;
3512 break;
3513 default:
3514 continue;
3515 }
3516
3517 if (img_optype == OPTYPE_REDBOOT) {
3518 redboot = be_flash_redboot(adapter, fw->data,
3519 img_offset, img_size,
3520 filehdr_size + img_hdrs_size);
3521 if (!redboot)
3522 continue;
3523 }
3524
3525 p = fw->data;
3526 p += filehdr_size + img_offset + img_hdrs_size;
3527 if (p + img_size > fw->data + fw->size)
3528 return -1;
3529
3530 status = be_flash(adapter, p, flash_cmd, img_optype, img_size);
3531 if (status) {
3532 dev_err(&adapter->pdev->dev,
3533 "Flashing section type %d failed.\n",
3534 fsec->fsec_entry[i].type);
3535 return status;
3536 }
3537 }
3538 return 0;
3f0d4560
AK
3539}
3540
485bf569
SN
3541static int lancer_fw_download(struct be_adapter *adapter,
3542 const struct firmware *fw)
84517482 3543{
485bf569
SN
3544#define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024)
3545#define LANCER_FW_DOWNLOAD_LOCATION "/prg"
84517482 3546 struct be_dma_mem flash_cmd;
485bf569
SN
3547 const u8 *data_ptr = NULL;
3548 u8 *dest_image_ptr = NULL;
3549 size_t image_size = 0;
3550 u32 chunk_size = 0;
3551 u32 data_written = 0;
3552 u32 offset = 0;
3553 int status = 0;
3554 u8 add_status = 0;
f67ef7ba 3555 u8 change_status;
84517482 3556
485bf569 3557 if (!IS_ALIGNED(fw->size, sizeof(u32))) {
d9efd2af 3558 dev_err(&adapter->pdev->dev,
485bf569
SN
3559 "FW Image not properly aligned. "
3560 "Length must be 4 byte aligned.\n");
3561 status = -EINVAL;
3562 goto lancer_fw_exit;
d9efd2af
SB
3563 }
3564
485bf569
SN
3565 flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
3566 + LANCER_FW_DOWNLOAD_CHUNK;
3567 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
d0320f75 3568 &flash_cmd.dma, GFP_KERNEL);
485bf569
SN
3569 if (!flash_cmd.va) {
3570 status = -ENOMEM;
485bf569
SN
3571 goto lancer_fw_exit;
3572 }
84517482 3573
485bf569
SN
3574 dest_image_ptr = flash_cmd.va +
3575 sizeof(struct lancer_cmd_req_write_object);
3576 image_size = fw->size;
3577 data_ptr = fw->data;
3578
3579 while (image_size) {
3580 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
3581
3582 /* Copy the image chunk content. */
3583 memcpy(dest_image_ptr, data_ptr, chunk_size);
3584
3585 status = lancer_cmd_write_object(adapter, &flash_cmd,
f67ef7ba
PR
3586 chunk_size, offset,
3587 LANCER_FW_DOWNLOAD_LOCATION,
3588 &data_written, &change_status,
3589 &add_status);
485bf569
SN
3590 if (status)
3591 break;
3592
3593 offset += data_written;
3594 data_ptr += data_written;
3595 image_size -= data_written;
3596 }
3597
3598 if (!status) {
3599 /* Commit the FW written */
3600 status = lancer_cmd_write_object(adapter, &flash_cmd,
f67ef7ba
PR
3601 0, offset,
3602 LANCER_FW_DOWNLOAD_LOCATION,
3603 &data_written, &change_status,
3604 &add_status);
485bf569
SN
3605 }
3606
3607 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3608 flash_cmd.dma);
3609 if (status) {
3610 dev_err(&adapter->pdev->dev,
3611 "Firmware load error. "
3612 "Status code: 0x%x Additional Status: 0x%x\n",
3613 status, add_status);
3614 goto lancer_fw_exit;
3615 }
3616
f67ef7ba 3617 if (change_status == LANCER_FW_RESET_NEEDED) {
5c510811
SK
3618 status = lancer_physdev_ctrl(adapter,
3619 PHYSDEV_CONTROL_FW_RESET_MASK);
f67ef7ba
PR
3620 if (status) {
3621 dev_err(&adapter->pdev->dev,
3622 "Adapter busy for FW reset.\n"
3623 "New FW will not be active.\n");
3624 goto lancer_fw_exit;
3625 }
3626 } else if (change_status != LANCER_NO_RESET_NEEDED) {
3627 dev_err(&adapter->pdev->dev,
3628 "System reboot required for new FW"
3629 " to be active\n");
3630 }
3631
485bf569
SN
3632 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
3633lancer_fw_exit:
3634 return status;
3635}
3636
ca34fe38
SP
3637#define UFI_TYPE2 2
3638#define UFI_TYPE3 3
0ad3157e 3639#define UFI_TYPE3R 10
ca34fe38
SP
3640#define UFI_TYPE4 4
3641static int be_get_ufi_type(struct be_adapter *adapter,
0ad3157e 3642 struct flash_file_hdr_g3 *fhdr)
773a2d7c
PR
3643{
3644 if (fhdr == NULL)
3645 goto be_get_ufi_exit;
3646
ca34fe38
SP
3647 if (skyhawk_chip(adapter) && fhdr->build[0] == '4')
3648 return UFI_TYPE4;
0ad3157e
VV
3649 else if (BE3_chip(adapter) && fhdr->build[0] == '3') {
3650 if (fhdr->asic_type_rev == 0x10)
3651 return UFI_TYPE3R;
3652 else
3653 return UFI_TYPE3;
3654 } else if (BE2_chip(adapter) && fhdr->build[0] == '2')
ca34fe38 3655 return UFI_TYPE2;
773a2d7c
PR
3656
3657be_get_ufi_exit:
3658 dev_err(&adapter->pdev->dev,
3659 "UFI and Interface are not compatible for flashing\n");
3660 return -1;
3661}
3662
485bf569
SN
3663static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
3664{
485bf569
SN
3665 struct flash_file_hdr_g3 *fhdr3;
3666 struct image_hdr *img_hdr_ptr = NULL;
3667 struct be_dma_mem flash_cmd;
3668 const u8 *p;
773a2d7c 3669 int status = 0, i = 0, num_imgs = 0, ufi_type = 0;
84517482 3670
be716446 3671 flash_cmd.size = sizeof(struct be_cmd_write_flashrom);
2b7bcebf
IV
3672 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
3673 &flash_cmd.dma, GFP_KERNEL);
84517482
AK
3674 if (!flash_cmd.va) {
3675 status = -ENOMEM;
485bf569 3676 goto be_fw_exit;
84517482
AK
3677 }
3678
773a2d7c 3679 p = fw->data;
0ad3157e 3680 fhdr3 = (struct flash_file_hdr_g3 *)p;
773a2d7c 3681
0ad3157e 3682 ufi_type = be_get_ufi_type(adapter, fhdr3);
773a2d7c 3683
773a2d7c
PR
3684 num_imgs = le32_to_cpu(fhdr3->num_imgs);
3685 for (i = 0; i < num_imgs; i++) {
3686 img_hdr_ptr = (struct image_hdr *)(fw->data +
3687 (sizeof(struct flash_file_hdr_g3) +
3688 i * sizeof(struct image_hdr)));
3689 if (le32_to_cpu(img_hdr_ptr->imageid) == 1) {
0ad3157e
VV
3690 switch (ufi_type) {
3691 case UFI_TYPE4:
773a2d7c
PR
3692 status = be_flash_skyhawk(adapter, fw,
3693 &flash_cmd, num_imgs);
0ad3157e
VV
3694 break;
3695 case UFI_TYPE3R:
ca34fe38
SP
3696 status = be_flash_BEx(adapter, fw, &flash_cmd,
3697 num_imgs);
0ad3157e
VV
3698 break;
3699 case UFI_TYPE3:
3700 /* Do not flash this ufi on BE3-R cards */
3701 if (adapter->asic_rev < 0x10)
3702 status = be_flash_BEx(adapter, fw,
3703 &flash_cmd,
3704 num_imgs);
3705 else {
3706 status = -1;
3707 dev_err(&adapter->pdev->dev,
3708 "Can't load BE3 UFI on BE3R\n");
3709 }
3710 }
3f0d4560 3711 }
773a2d7c
PR
3712 }
3713
ca34fe38
SP
3714 if (ufi_type == UFI_TYPE2)
3715 status = be_flash_BEx(adapter, fw, &flash_cmd, 0);
773a2d7c 3716 else if (ufi_type == -1)
3f0d4560 3717 status = -1;
84517482 3718
2b7bcebf
IV
3719 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3720 flash_cmd.dma);
84517482
AK
3721 if (status) {
3722 dev_err(&adapter->pdev->dev, "Firmware load error\n");
485bf569 3723 goto be_fw_exit;
84517482
AK
3724 }
3725
af901ca1 3726 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
84517482 3727
485bf569
SN
3728be_fw_exit:
3729 return status;
3730}
3731
3732int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
3733{
3734 const struct firmware *fw;
3735 int status;
3736
3737 if (!netif_running(adapter->netdev)) {
3738 dev_err(&adapter->pdev->dev,
3739 "Firmware load not allowed (interface is down)\n");
3740 return -1;
3741 }
3742
3743 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
3744 if (status)
3745 goto fw_exit;
3746
3747 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
3748
3749 if (lancer_chip(adapter))
3750 status = lancer_fw_download(adapter, fw);
3751 else
3752 status = be_fw_download(adapter, fw);
3753
eeb65ced
SK
3754 if (!status)
3755 be_cmd_get_fw_ver(adapter, adapter->fw_ver,
3756 adapter->fw_on_flash);
3757
84517482
AK
3758fw_exit:
3759 release_firmware(fw);
3760 return status;
3761}
3762
e5686ad8 3763static const struct net_device_ops be_netdev_ops = {
6b7c5b94
SP
3764 .ndo_open = be_open,
3765 .ndo_stop = be_close,
3766 .ndo_start_xmit = be_xmit,
a54769f5 3767 .ndo_set_rx_mode = be_set_rx_mode,
6b7c5b94
SP
3768 .ndo_set_mac_address = be_mac_addr_set,
3769 .ndo_change_mtu = be_change_mtu,
ab1594e9 3770 .ndo_get_stats64 = be_get_stats64,
6b7c5b94 3771 .ndo_validate_addr = eth_validate_addr,
6b7c5b94
SP
3772 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
3773 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
64600ea5 3774 .ndo_set_vf_mac = be_set_vf_mac,
1da87b7f 3775 .ndo_set_vf_vlan = be_set_vf_vlan,
e1d18735 3776 .ndo_set_vf_tx_rate = be_set_vf_tx_rate,
66268739
IV
3777 .ndo_get_vf_config = be_get_vf_config,
3778#ifdef CONFIG_NET_POLL_CONTROLLER
3779 .ndo_poll_controller = be_netpoll,
3780#endif
6b7c5b94
SP
3781};
3782
3783static void be_netdev_init(struct net_device *netdev)
3784{
3785 struct be_adapter *adapter = netdev_priv(netdev);
10ef9ab4 3786 struct be_eq_obj *eqo;
3abcdeda 3787 int i;
6b7c5b94 3788
6332c8d3 3789 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
8b8ddc68 3790 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
f646968f 3791 NETIF_F_HW_VLAN_CTAG_TX;
8b8ddc68
MM
3792 if (be_multi_rxq(adapter))
3793 netdev->hw_features |= NETIF_F_RXHASH;
6332c8d3
MM
3794
3795 netdev->features |= netdev->hw_features |
f646968f 3796 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER;
4b972914 3797
eb8a50d9 3798 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
79032644 3799 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
51c59870 3800
fbc13f01
AK
3801 netdev->priv_flags |= IFF_UNICAST_FLT;
3802
6b7c5b94
SP
3803 netdev->flags |= IFF_MULTICAST;
3804
b7e5887e 3805 netif_set_gso_max_size(netdev, 65535 - ETH_HLEN);
c190e3c8 3806
10ef9ab4 3807 netdev->netdev_ops = &be_netdev_ops;
6b7c5b94
SP
3808
3809 SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
3810
10ef9ab4
SP
3811 for_all_evt_queues(adapter, eqo, i)
3812 netif_napi_add(netdev, &eqo->napi, be_poll, BE_NAPI_WEIGHT);
6b7c5b94
SP
3813}
3814
3815static void be_unmap_pci_bars(struct be_adapter *adapter)
3816{
c5b3ad4c
SP
3817 if (adapter->csr)
3818 pci_iounmap(adapter->pdev, adapter->csr);
8788fdc2 3819 if (adapter->db)
ce66f781 3820 pci_iounmap(adapter->pdev, adapter->db);
045508a8
PP
3821}
3822
ce66f781
SP
3823static int db_bar(struct be_adapter *adapter)
3824{
3825 if (lancer_chip(adapter) || !be_physfn(adapter))
3826 return 0;
3827 else
3828 return 4;
3829}
3830
3831static int be_roce_map_pci_bars(struct be_adapter *adapter)
045508a8 3832{
dbf0f2a7 3833 if (skyhawk_chip(adapter)) {
ce66f781
SP
3834 adapter->roce_db.size = 4096;
3835 adapter->roce_db.io_addr = pci_resource_start(adapter->pdev,
3836 db_bar(adapter));
3837 adapter->roce_db.total_size = pci_resource_len(adapter->pdev,
3838 db_bar(adapter));
3839 }
045508a8 3840 return 0;
6b7c5b94
SP
3841}
3842
3843static int be_map_pci_bars(struct be_adapter *adapter)
3844{
3845 u8 __iomem *addr;
ce66f781 3846 u32 sli_intf;
6b7c5b94 3847
ce66f781
SP
3848 pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3849 adapter->if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
3850 SLI_INTF_IF_TYPE_SHIFT;
fe6d2a38 3851
c5b3ad4c
SP
3852 if (BEx_chip(adapter) && be_physfn(adapter)) {
3853 adapter->csr = pci_iomap(adapter->pdev, 2, 0);
3854 if (adapter->csr == NULL)
3855 return -ENOMEM;
3856 }
3857
ce66f781 3858 addr = pci_iomap(adapter->pdev, db_bar(adapter), 0);
6b7c5b94
SP
3859 if (addr == NULL)
3860 goto pci_map_err;
ba343c77 3861 adapter->db = addr;
ce66f781
SP
3862
3863 be_roce_map_pci_bars(adapter);
6b7c5b94 3864 return 0;
ce66f781 3865
6b7c5b94
SP
3866pci_map_err:
3867 be_unmap_pci_bars(adapter);
3868 return -ENOMEM;
3869}
3870
6b7c5b94
SP
3871static void be_ctrl_cleanup(struct be_adapter *adapter)
3872{
8788fdc2 3873 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
6b7c5b94
SP
3874
3875 be_unmap_pci_bars(adapter);
3876
3877 if (mem->va)
2b7bcebf
IV
3878 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3879 mem->dma);
e7b909a6 3880
5b8821b7 3881 mem = &adapter->rx_filter;
e7b909a6 3882 if (mem->va)
2b7bcebf
IV
3883 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3884 mem->dma);
6b7c5b94
SP
3885}
3886
6b7c5b94
SP
3887static int be_ctrl_init(struct be_adapter *adapter)
3888{
8788fdc2
SP
3889 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
3890 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
5b8821b7 3891 struct be_dma_mem *rx_filter = &adapter->rx_filter;
ce66f781 3892 u32 sli_intf;
6b7c5b94 3893 int status;
6b7c5b94 3894
ce66f781
SP
3895 pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3896 adapter->sli_family = (sli_intf & SLI_INTF_FAMILY_MASK) >>
3897 SLI_INTF_FAMILY_SHIFT;
3898 adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
3899
6b7c5b94
SP
3900 status = be_map_pci_bars(adapter);
3901 if (status)
e7b909a6 3902 goto done;
6b7c5b94
SP
3903
3904 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
2b7bcebf
IV
3905 mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
3906 mbox_mem_alloc->size,
3907 &mbox_mem_alloc->dma,
3908 GFP_KERNEL);
6b7c5b94 3909 if (!mbox_mem_alloc->va) {
e7b909a6
SP
3910 status = -ENOMEM;
3911 goto unmap_pci_bars;
6b7c5b94
SP
3912 }
3913 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
3914 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
3915 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
3916 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
e7b909a6 3917
5b8821b7
SP
3918 rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
3919 rx_filter->va = dma_alloc_coherent(&adapter->pdev->dev, rx_filter->size,
1f9061d2
JP
3920 &rx_filter->dma,
3921 GFP_KERNEL | __GFP_ZERO);
5b8821b7 3922 if (rx_filter->va == NULL) {
e7b909a6
SP
3923 status = -ENOMEM;
3924 goto free_mbox;
3925 }
1f9061d2 3926
2984961c 3927 mutex_init(&adapter->mbox_lock);
8788fdc2
SP
3928 spin_lock_init(&adapter->mcc_lock);
3929 spin_lock_init(&adapter->mcc_cq_lock);
a8f447bd 3930
dd131e76 3931 init_completion(&adapter->flash_compl);
cf588477 3932 pci_save_state(adapter->pdev);
6b7c5b94 3933 return 0;
e7b909a6
SP
3934
3935free_mbox:
2b7bcebf
IV
3936 dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
3937 mbox_mem_alloc->va, mbox_mem_alloc->dma);
e7b909a6
SP
3938
3939unmap_pci_bars:
3940 be_unmap_pci_bars(adapter);
3941
3942done:
3943 return status;
6b7c5b94
SP
3944}
3945
3946static void be_stats_cleanup(struct be_adapter *adapter)
3947{
3abcdeda 3948 struct be_dma_mem *cmd = &adapter->stats_cmd;
6b7c5b94
SP
3949
3950 if (cmd->va)
2b7bcebf
IV
3951 dma_free_coherent(&adapter->pdev->dev, cmd->size,
3952 cmd->va, cmd->dma);
6b7c5b94
SP
3953}
3954
3955static int be_stats_init(struct be_adapter *adapter)
3956{
3abcdeda 3957 struct be_dma_mem *cmd = &adapter->stats_cmd;
6b7c5b94 3958
ca34fe38
SP
3959 if (lancer_chip(adapter))
3960 cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
3961 else if (BE2_chip(adapter))
89a88ab8 3962 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
ca34fe38
SP
3963 else
3964 /* BE3 and Skyhawk */
3965 cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
3966
2b7bcebf 3967 cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
1f9061d2 3968 GFP_KERNEL | __GFP_ZERO);
6b7c5b94
SP
3969 if (cmd->va == NULL)
3970 return -1;
3971 return 0;
3972}
3973
3bc6b06c 3974static void be_remove(struct pci_dev *pdev)
6b7c5b94
SP
3975{
3976 struct be_adapter *adapter = pci_get_drvdata(pdev);
8d56ff11 3977
6b7c5b94
SP
3978 if (!adapter)
3979 return;
3980
045508a8 3981 be_roce_dev_remove(adapter);
8cef7a78 3982 be_intr_set(adapter, false);
045508a8 3983
f67ef7ba
PR
3984 cancel_delayed_work_sync(&adapter->func_recovery_work);
3985
6b7c5b94
SP
3986 unregister_netdev(adapter->netdev);
3987
5fb379ee
SP
3988 be_clear(adapter);
3989
bf99e50d
PR
3990 /* tell fw we're done with firing cmds */
3991 be_cmd_fw_clean(adapter);
3992
6b7c5b94
SP
3993 be_stats_cleanup(adapter);
3994
3995 be_ctrl_cleanup(adapter);
3996
d6b6d987
SP
3997 pci_disable_pcie_error_reporting(pdev);
3998
6b7c5b94
SP
3999 pci_set_drvdata(pdev, NULL);
4000 pci_release_regions(pdev);
4001 pci_disable_device(pdev);
4002
4003 free_netdev(adapter->netdev);
4004}
4005
4762f6ce
AK
4006bool be_is_wol_supported(struct be_adapter *adapter)
4007{
4008 return ((adapter->wol_cap & BE_WOL_CAP) &&
4009 !be_is_wol_excluded(adapter)) ? true : false;
4010}
4011
941a77d5
SK
4012u32 be_get_fw_log_level(struct be_adapter *adapter)
4013{
4014 struct be_dma_mem extfat_cmd;
4015 struct be_fat_conf_params *cfgs;
4016 int status;
4017 u32 level = 0;
4018 int j;
4019
f25b119c
PR
4020 if (lancer_chip(adapter))
4021 return 0;
4022
941a77d5
SK
4023 memset(&extfat_cmd, 0, sizeof(struct be_dma_mem));
4024 extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps);
4025 extfat_cmd.va = pci_alloc_consistent(adapter->pdev, extfat_cmd.size,
4026 &extfat_cmd.dma);
4027
4028 if (!extfat_cmd.va) {
4029 dev_err(&adapter->pdev->dev, "%s: Memory allocation failure\n",
4030 __func__);
4031 goto err;
4032 }
4033
4034 status = be_cmd_get_ext_fat_capabilites(adapter, &extfat_cmd);
4035 if (!status) {
4036 cfgs = (struct be_fat_conf_params *)(extfat_cmd.va +
4037 sizeof(struct be_cmd_resp_hdr));
ac46a462 4038 for (j = 0; j < le32_to_cpu(cfgs->module[0].num_modes); j++) {
941a77d5
SK
4039 if (cfgs->module[0].trace_lvl[j].mode == MODE_UART)
4040 level = cfgs->module[0].trace_lvl[j].dbg_lvl;
4041 }
4042 }
4043 pci_free_consistent(adapter->pdev, extfat_cmd.size, extfat_cmd.va,
4044 extfat_cmd.dma);
4045err:
4046 return level;
4047}
abb93951 4048
39f1d94d 4049static int be_get_initial_config(struct be_adapter *adapter)
6b7c5b94 4050{
6b7c5b94 4051 int status;
941a77d5 4052 u32 level;
6b7c5b94 4053
9e1453c5
AK
4054 status = be_cmd_get_cntl_attributes(adapter);
4055 if (status)
4056 return status;
4057
4762f6ce
AK
4058 status = be_cmd_get_acpi_wol_cap(adapter);
4059 if (status) {
4060 /* in case of a failure to get wol capabillities
4061 * check the exclusion list to determine WOL capability */
4062 if (!be_is_wol_excluded(adapter))
4063 adapter->wol_cap |= BE_WOL_CAP;
4064 }
4065
4066 if (be_is_wol_supported(adapter))
4067 adapter->wol = true;
4068
7aeb2156
PR
4069 /* Must be a power of 2 or else MODULO will BUG_ON */
4070 adapter->be_get_temp_freq = 64;
4071
941a77d5
SK
4072 level = be_get_fw_log_level(adapter);
4073 adapter->msg_enable = level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0;
4074
2243e2e9 4075 return 0;
6b7c5b94
SP
4076}
4077
f67ef7ba 4078static int lancer_recover_func(struct be_adapter *adapter)
d8110f62 4079{
01e5b2c4 4080 struct device *dev = &adapter->pdev->dev;
d8110f62 4081 int status;
d8110f62 4082
f67ef7ba
PR
4083 status = lancer_test_and_set_rdy_state(adapter);
4084 if (status)
4085 goto err;
d8110f62 4086
f67ef7ba
PR
4087 if (netif_running(adapter->netdev))
4088 be_close(adapter->netdev);
d8110f62 4089
f67ef7ba
PR
4090 be_clear(adapter);
4091
01e5b2c4 4092 be_clear_all_error(adapter);
f67ef7ba
PR
4093
4094 status = be_setup(adapter);
4095 if (status)
4096 goto err;
d8110f62 4097
f67ef7ba
PR
4098 if (netif_running(adapter->netdev)) {
4099 status = be_open(adapter->netdev);
d8110f62
PR
4100 if (status)
4101 goto err;
f67ef7ba 4102 }
d8110f62 4103
01e5b2c4 4104 dev_err(dev, "Error recovery successful\n");
f67ef7ba
PR
4105 return 0;
4106err:
01e5b2c4
SK
4107 if (status == -EAGAIN)
4108 dev_err(dev, "Waiting for resource provisioning\n");
4109 else
4110 dev_err(dev, "Error recovery failed\n");
d8110f62 4111
f67ef7ba
PR
4112 return status;
4113}
4114
4115static void be_func_recovery_task(struct work_struct *work)
4116{
4117 struct be_adapter *adapter =
4118 container_of(work, struct be_adapter, func_recovery_work.work);
01e5b2c4 4119 int status = 0;
d8110f62 4120
f67ef7ba 4121 be_detect_error(adapter);
d8110f62 4122
f67ef7ba 4123 if (adapter->hw_error && lancer_chip(adapter)) {
d8110f62 4124
f67ef7ba
PR
4125 rtnl_lock();
4126 netif_device_detach(adapter->netdev);
4127 rtnl_unlock();
d8110f62 4128
f67ef7ba 4129 status = lancer_recover_func(adapter);
f67ef7ba
PR
4130 if (!status)
4131 netif_device_attach(adapter->netdev);
d8110f62 4132 }
f67ef7ba 4133
01e5b2c4
SK
4134 /* In Lancer, for all errors other than provisioning error (-EAGAIN),
4135 * no need to attempt further recovery.
4136 */
4137 if (!status || status == -EAGAIN)
4138 schedule_delayed_work(&adapter->func_recovery_work,
4139 msecs_to_jiffies(1000));
d8110f62
PR
4140}
4141
4142static void be_worker(struct work_struct *work)
4143{
4144 struct be_adapter *adapter =
4145 container_of(work, struct be_adapter, work.work);
4146 struct be_rx_obj *rxo;
10ef9ab4 4147 struct be_eq_obj *eqo;
d8110f62
PR
4148 int i;
4149
d8110f62
PR
4150 /* when interrupts are not yet enabled, just reap any pending
4151 * mcc completions */
4152 if (!netif_running(adapter->netdev)) {
072a9c48 4153 local_bh_disable();
10ef9ab4 4154 be_process_mcc(adapter);
072a9c48 4155 local_bh_enable();
d8110f62
PR
4156 goto reschedule;
4157 }
4158
4159 if (!adapter->stats_cmd_sent) {
4160 if (lancer_chip(adapter))
4161 lancer_cmd_get_pport_stats(adapter,
4162 &adapter->stats_cmd);
4163 else
4164 be_cmd_get_stats(adapter, &adapter->stats_cmd);
4165 }
4166
7aeb2156
PR
4167 if (MODULO(adapter->work_counter, adapter->be_get_temp_freq) == 0)
4168 be_cmd_get_die_temperature(adapter);
4169
d8110f62 4170 for_all_rx_queues(adapter, rxo, i) {
d8110f62
PR
4171 if (rxo->rx_post_starved) {
4172 rxo->rx_post_starved = false;
4173 be_post_rx_frags(rxo, GFP_KERNEL);
4174 }
4175 }
4176
10ef9ab4
SP
4177 for_all_evt_queues(adapter, eqo, i)
4178 be_eqd_update(adapter, eqo);
4179
d8110f62
PR
4180reschedule:
4181 adapter->work_counter++;
4182 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
4183}
4184
257a3feb 4185/* If any VFs are already enabled don't FLR the PF */
39f1d94d
SP
4186static bool be_reset_required(struct be_adapter *adapter)
4187{
257a3feb 4188 return pci_num_vf(adapter->pdev) ? false : true;
39f1d94d
SP
4189}
4190
d379142b
SP
4191static char *mc_name(struct be_adapter *adapter)
4192{
4193 if (adapter->function_mode & FLEX10_MODE)
4194 return "FLEX10";
4195 else if (adapter->function_mode & VNIC_MODE)
4196 return "vNIC";
4197 else if (adapter->function_mode & UMC_ENABLED)
4198 return "UMC";
4199 else
4200 return "";
4201}
4202
4203static inline char *func_name(struct be_adapter *adapter)
4204{
4205 return be_physfn(adapter) ? "PF" : "VF";
4206}
4207
1dd06ae8 4208static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id)
6b7c5b94
SP
4209{
4210 int status = 0;
4211 struct be_adapter *adapter;
4212 struct net_device *netdev;
b4e32a71 4213 char port_name;
6b7c5b94
SP
4214
4215 status = pci_enable_device(pdev);
4216 if (status)
4217 goto do_none;
4218
4219 status = pci_request_regions(pdev, DRV_NAME);
4220 if (status)
4221 goto disable_dev;
4222 pci_set_master(pdev);
4223
7f640062 4224 netdev = alloc_etherdev_mqs(sizeof(*adapter), MAX_TX_QS, MAX_RX_QS);
6b7c5b94
SP
4225 if (netdev == NULL) {
4226 status = -ENOMEM;
4227 goto rel_reg;
4228 }
4229 adapter = netdev_priv(netdev);
4230 adapter->pdev = pdev;
4231 pci_set_drvdata(pdev, adapter);
4232 adapter->netdev = netdev;
2243e2e9 4233 SET_NETDEV_DEV(netdev, &pdev->dev);
6b7c5b94 4234
2b7bcebf 4235 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
6b7c5b94 4236 if (!status) {
2bd92cd2
CH
4237 status = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
4238 if (status < 0) {
4239 dev_err(&pdev->dev, "dma_set_coherent_mask failed\n");
4240 goto free_netdev;
4241 }
6b7c5b94
SP
4242 netdev->features |= NETIF_F_HIGHDMA;
4243 } else {
2b7bcebf 4244 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
0c5fed09
SK
4245 if (!status)
4246 status = dma_set_coherent_mask(&pdev->dev,
4247 DMA_BIT_MASK(32));
6b7c5b94
SP
4248 if (status) {
4249 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
4250 goto free_netdev;
4251 }
4252 }
4253
d6b6d987
SP
4254 status = pci_enable_pcie_error_reporting(pdev);
4255 if (status)
4256 dev_err(&pdev->dev, "Could not use PCIe error reporting\n");
4257
6b7c5b94
SP
4258 status = be_ctrl_init(adapter);
4259 if (status)
39f1d94d 4260 goto free_netdev;
6b7c5b94 4261
2243e2e9 4262 /* sync up with fw's ready state */
ba343c77 4263 if (be_physfn(adapter)) {
bf99e50d 4264 status = be_fw_wait_ready(adapter);
ba343c77
SB
4265 if (status)
4266 goto ctrl_clean;
ba343c77 4267 }
6b7c5b94 4268
39f1d94d
SP
4269 if (be_reset_required(adapter)) {
4270 status = be_cmd_reset_function(adapter);
4271 if (status)
4272 goto ctrl_clean;
556ae191 4273
2d177be8
KA
4274 /* Wait for interrupts to quiesce after an FLR */
4275 msleep(100);
4276 }
8cef7a78
SK
4277
4278 /* Allow interrupts for other ULPs running on NIC function */
4279 be_intr_set(adapter, true);
10ef9ab4 4280
2d177be8
KA
4281 /* tell fw we're ready to fire cmds */
4282 status = be_cmd_fw_init(adapter);
4283 if (status)
4284 goto ctrl_clean;
4285
2243e2e9
SP
4286 status = be_stats_init(adapter);
4287 if (status)
4288 goto ctrl_clean;
4289
39f1d94d 4290 status = be_get_initial_config(adapter);
6b7c5b94
SP
4291 if (status)
4292 goto stats_clean;
6b7c5b94
SP
4293
4294 INIT_DELAYED_WORK(&adapter->work, be_worker);
f67ef7ba 4295 INIT_DELAYED_WORK(&adapter->func_recovery_work, be_func_recovery_task);
a54769f5 4296 adapter->rx_fc = adapter->tx_fc = true;
6b7c5b94 4297
5fb379ee
SP
4298 status = be_setup(adapter);
4299 if (status)
55f5c3c5 4300 goto stats_clean;
2243e2e9 4301
3abcdeda 4302 be_netdev_init(netdev);
6b7c5b94
SP
4303 status = register_netdev(netdev);
4304 if (status != 0)
5fb379ee 4305 goto unsetup;
6b7c5b94 4306
045508a8
PP
4307 be_roce_dev_add(adapter);
4308
f67ef7ba
PR
4309 schedule_delayed_work(&adapter->func_recovery_work,
4310 msecs_to_jiffies(1000));
b4e32a71
PR
4311
4312 be_cmd_query_port_name(adapter, &port_name);
4313
d379142b
SP
4314 dev_info(&pdev->dev, "%s: %s %s port %c\n", nic_name(pdev),
4315 func_name(adapter), mc_name(adapter), port_name);
34b1ef04 4316
6b7c5b94
SP
4317 return 0;
4318
5fb379ee
SP
4319unsetup:
4320 be_clear(adapter);
6b7c5b94
SP
4321stats_clean:
4322 be_stats_cleanup(adapter);
4323ctrl_clean:
4324 be_ctrl_cleanup(adapter);
f9449ab7 4325free_netdev:
fe6d2a38 4326 free_netdev(netdev);
8d56ff11 4327 pci_set_drvdata(pdev, NULL);
6b7c5b94
SP
4328rel_reg:
4329 pci_release_regions(pdev);
4330disable_dev:
4331 pci_disable_device(pdev);
4332do_none:
c4ca2374 4333 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
6b7c5b94
SP
4334 return status;
4335}
4336
4337static int be_suspend(struct pci_dev *pdev, pm_message_t state)
4338{
4339 struct be_adapter *adapter = pci_get_drvdata(pdev);
4340 struct net_device *netdev = adapter->netdev;
4341
71d8d1b5
AK
4342 if (adapter->wol)
4343 be_setup_wol(adapter, true);
4344
f67ef7ba
PR
4345 cancel_delayed_work_sync(&adapter->func_recovery_work);
4346
6b7c5b94
SP
4347 netif_device_detach(netdev);
4348 if (netif_running(netdev)) {
4349 rtnl_lock();
4350 be_close(netdev);
4351 rtnl_unlock();
4352 }
9b0365f1 4353 be_clear(adapter);
6b7c5b94
SP
4354
4355 pci_save_state(pdev);
4356 pci_disable_device(pdev);
4357 pci_set_power_state(pdev, pci_choose_state(pdev, state));
4358 return 0;
4359}
4360
4361static int be_resume(struct pci_dev *pdev)
4362{
4363 int status = 0;
4364 struct be_adapter *adapter = pci_get_drvdata(pdev);
4365 struct net_device *netdev = adapter->netdev;
4366
4367 netif_device_detach(netdev);
4368
4369 status = pci_enable_device(pdev);
4370 if (status)
4371 return status;
4372
1ca01512 4373 pci_set_power_state(pdev, PCI_D0);
6b7c5b94
SP
4374 pci_restore_state(pdev);
4375
2243e2e9
SP
4376 /* tell fw we're ready to fire cmds */
4377 status = be_cmd_fw_init(adapter);
4378 if (status)
4379 return status;
4380
9b0365f1 4381 be_setup(adapter);
6b7c5b94
SP
4382 if (netif_running(netdev)) {
4383 rtnl_lock();
4384 be_open(netdev);
4385 rtnl_unlock();
4386 }
f67ef7ba
PR
4387
4388 schedule_delayed_work(&adapter->func_recovery_work,
4389 msecs_to_jiffies(1000));
6b7c5b94 4390 netif_device_attach(netdev);
71d8d1b5
AK
4391
4392 if (adapter->wol)
4393 be_setup_wol(adapter, false);
a4ca055f 4394
6b7c5b94
SP
4395 return 0;
4396}
4397
82456b03
SP
4398/*
4399 * An FLR will stop BE from DMAing any data.
4400 */
4401static void be_shutdown(struct pci_dev *pdev)
4402{
4403 struct be_adapter *adapter = pci_get_drvdata(pdev);
82456b03 4404
2d5d4154
AK
4405 if (!adapter)
4406 return;
82456b03 4407
0f4a6828 4408 cancel_delayed_work_sync(&adapter->work);
f67ef7ba 4409 cancel_delayed_work_sync(&adapter->func_recovery_work);
a4ca055f 4410
2d5d4154 4411 netif_device_detach(adapter->netdev);
82456b03 4412
57841869
AK
4413 be_cmd_reset_function(adapter);
4414
82456b03 4415 pci_disable_device(pdev);
82456b03
SP
4416}
4417
cf588477
SP
4418static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
4419 pci_channel_state_t state)
4420{
4421 struct be_adapter *adapter = pci_get_drvdata(pdev);
4422 struct net_device *netdev = adapter->netdev;
4423
4424 dev_err(&adapter->pdev->dev, "EEH error detected\n");
4425
01e5b2c4
SK
4426 if (!adapter->eeh_error) {
4427 adapter->eeh_error = true;
cf588477 4428
01e5b2c4 4429 cancel_delayed_work_sync(&adapter->func_recovery_work);
cf588477 4430
cf588477 4431 rtnl_lock();
01e5b2c4
SK
4432 netif_device_detach(netdev);
4433 if (netif_running(netdev))
4434 be_close(netdev);
cf588477 4435 rtnl_unlock();
01e5b2c4
SK
4436
4437 be_clear(adapter);
cf588477 4438 }
cf588477
SP
4439
4440 if (state == pci_channel_io_perm_failure)
4441 return PCI_ERS_RESULT_DISCONNECT;
4442
4443 pci_disable_device(pdev);
4444
eeb7fc7b
SK
4445 /* The error could cause the FW to trigger a flash debug dump.
4446 * Resetting the card while flash dump is in progress
c8a54163
PR
4447 * can cause it not to recover; wait for it to finish.
4448 * Wait only for first function as it is needed only once per
4449 * adapter.
eeb7fc7b 4450 */
c8a54163
PR
4451 if (pdev->devfn == 0)
4452 ssleep(30);
4453
cf588477
SP
4454 return PCI_ERS_RESULT_NEED_RESET;
4455}
4456
4457static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
4458{
4459 struct be_adapter *adapter = pci_get_drvdata(pdev);
4460 int status;
4461
4462 dev_info(&adapter->pdev->dev, "EEH reset\n");
cf588477
SP
4463
4464 status = pci_enable_device(pdev);
4465 if (status)
4466 return PCI_ERS_RESULT_DISCONNECT;
4467
4468 pci_set_master(pdev);
1ca01512 4469 pci_set_power_state(pdev, PCI_D0);
cf588477
SP
4470 pci_restore_state(pdev);
4471
4472 /* Check if card is ok and fw is ready */
c5b3ad4c
SP
4473 dev_info(&adapter->pdev->dev,
4474 "Waiting for FW to be ready after EEH reset\n");
bf99e50d 4475 status = be_fw_wait_ready(adapter);
cf588477
SP
4476 if (status)
4477 return PCI_ERS_RESULT_DISCONNECT;
4478
d6b6d987 4479 pci_cleanup_aer_uncorrect_error_status(pdev);
01e5b2c4 4480 be_clear_all_error(adapter);
cf588477
SP
4481 return PCI_ERS_RESULT_RECOVERED;
4482}
4483
4484static void be_eeh_resume(struct pci_dev *pdev)
4485{
4486 int status = 0;
4487 struct be_adapter *adapter = pci_get_drvdata(pdev);
4488 struct net_device *netdev = adapter->netdev;
4489
4490 dev_info(&adapter->pdev->dev, "EEH resume\n");
4491
4492 pci_save_state(pdev);
4493
2d177be8 4494 status = be_cmd_reset_function(adapter);
cf588477
SP
4495 if (status)
4496 goto err;
4497
2d177be8
KA
4498 /* tell fw we're ready to fire cmds */
4499 status = be_cmd_fw_init(adapter);
bf99e50d
PR
4500 if (status)
4501 goto err;
4502
cf588477
SP
4503 status = be_setup(adapter);
4504 if (status)
4505 goto err;
4506
4507 if (netif_running(netdev)) {
4508 status = be_open(netdev);
4509 if (status)
4510 goto err;
4511 }
f67ef7ba
PR
4512
4513 schedule_delayed_work(&adapter->func_recovery_work,
4514 msecs_to_jiffies(1000));
cf588477
SP
4515 netif_device_attach(netdev);
4516 return;
4517err:
4518 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
cf588477
SP
4519}
4520
3646f0e5 4521static const struct pci_error_handlers be_eeh_handlers = {
cf588477
SP
4522 .error_detected = be_eeh_err_detected,
4523 .slot_reset = be_eeh_reset,
4524 .resume = be_eeh_resume,
4525};
4526
6b7c5b94
SP
4527static struct pci_driver be_driver = {
4528 .name = DRV_NAME,
4529 .id_table = be_dev_ids,
4530 .probe = be_probe,
4531 .remove = be_remove,
4532 .suspend = be_suspend,
cf588477 4533 .resume = be_resume,
82456b03 4534 .shutdown = be_shutdown,
cf588477 4535 .err_handler = &be_eeh_handlers
6b7c5b94
SP
4536};
4537
4538static int __init be_init_module(void)
4539{
8e95a202
JP
4540 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
4541 rx_frag_size != 2048) {
6b7c5b94
SP
4542 printk(KERN_WARNING DRV_NAME
4543 " : Module param rx_frag_size must be 2048/4096/8192."
4544 " Using 2048\n");
4545 rx_frag_size = 2048;
4546 }
6b7c5b94
SP
4547
4548 return pci_register_driver(&be_driver);
4549}
4550module_init(be_init_module);
4551
4552static void __exit be_exit_module(void)
4553{
4554 pci_unregister_driver(&be_driver);
4555}
4556module_exit(be_exit_module);