be2net: remove LANCER A0 workaround
[linux-2.6-block.git] / drivers / net / ethernet / emulex / benet / be_main.c
CommitLineData
6b7c5b94 1/*
d2145cde 2 * Copyright (C) 2005 - 2011 Emulex
6b7c5b94
SP
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
d2145cde 11 * linux-drivers@emulex.com
6b7c5b94 12 *
d2145cde
AK
13 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
6b7c5b94
SP
16 */
17
70c71606 18#include <linux/prefetch.h>
9d9779e7 19#include <linux/module.h>
6b7c5b94 20#include "be.h"
8788fdc2 21#include "be_cmds.h"
65f71b8b 22#include <asm/div64.h>
d6b6d987 23#include <linux/aer.h>
6b7c5b94
SP
24
25MODULE_VERSION(DRV_VER);
26MODULE_DEVICE_TABLE(pci, be_dev_ids);
27MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
28MODULE_AUTHOR("ServerEngines Corporation");
29MODULE_LICENSE("GPL");
30
ba343c77 31static unsigned int num_vfs;
ba343c77 32module_param(num_vfs, uint, S_IRUGO);
ba343c77 33MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
6b7c5b94 34
11ac75ed
SP
35static ushort rx_frag_size = 2048;
36module_param(rx_frag_size, ushort, S_IRUGO);
37MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
38
6b7c5b94 39static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
c4ca2374 40 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
59fd5d87 41 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
c4ca2374
AK
42 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
43 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
fe6d2a38 44 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
12f4d0a8 45 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
ecedb6ae 46 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
76b73530 47 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID6)},
6b7c5b94
SP
48 { 0 }
49};
50MODULE_DEVICE_TABLE(pci, be_dev_ids);
7c185276 51/* UE Status Low CSR */
42c8b11e 52static const char * const ue_status_low_desc[] = {
7c185276
AK
53 "CEV",
54 "CTX",
55 "DBUF",
56 "ERX",
57 "Host",
58 "MPU",
59 "NDMA",
60 "PTC ",
61 "RDMA ",
62 "RXF ",
63 "RXIPS ",
64 "RXULP0 ",
65 "RXULP1 ",
66 "RXULP2 ",
67 "TIM ",
68 "TPOST ",
69 "TPRE ",
70 "TXIPS ",
71 "TXULP0 ",
72 "TXULP1 ",
73 "UC ",
74 "WDMA ",
75 "TXULP2 ",
76 "HOST1 ",
77 "P0_OB_LINK ",
78 "P1_OB_LINK ",
79 "HOST_GPIO ",
80 "MBOX ",
81 "AXGMAC0",
82 "AXGMAC1",
83 "JTAG",
84 "MPU_INTPEND"
85};
86/* UE Status High CSR */
42c8b11e 87static const char * const ue_status_hi_desc[] = {
7c185276
AK
88 "LPCMEMHOST",
89 "MGMT_MAC",
90 "PCS0ONLINE",
91 "MPU_IRAM",
92 "PCS1ONLINE",
93 "PCTL0",
94 "PCTL1",
95 "PMEM",
96 "RR",
97 "TXPB",
98 "RXPP",
99 "XAUI",
100 "TXP",
101 "ARM",
102 "IPC",
103 "HOST2",
104 "HOST3",
105 "HOST4",
106 "HOST5",
107 "HOST6",
108 "HOST7",
109 "HOST8",
110 "HOST9",
42c8b11e 111 "NETC",
7c185276
AK
112 "Unknown",
113 "Unknown",
114 "Unknown",
115 "Unknown",
116 "Unknown",
117 "Unknown",
118 "Unknown",
119 "Unknown"
120};
6b7c5b94 121
752961a1
SP
122/* Is BE in a multi-channel mode */
123static inline bool be_is_mc(struct be_adapter *adapter) {
124 return (adapter->function_mode & FLEX10_MODE ||
125 adapter->function_mode & VNIC_MODE ||
126 adapter->function_mode & UMC_ENABLED);
127}
128
6b7c5b94
SP
129static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
130{
131 struct be_dma_mem *mem = &q->dma_mem;
1cfafab9 132 if (mem->va) {
2b7bcebf
IV
133 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
134 mem->dma);
1cfafab9
SP
135 mem->va = NULL;
136 }
6b7c5b94
SP
137}
138
139static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
140 u16 len, u16 entry_size)
141{
142 struct be_dma_mem *mem = &q->dma_mem;
143
144 memset(q, 0, sizeof(*q));
145 q->len = len;
146 q->entry_size = entry_size;
147 mem->size = len * entry_size;
2b7bcebf
IV
148 mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
149 GFP_KERNEL);
6b7c5b94 150 if (!mem->va)
10ef9ab4 151 return -ENOMEM;
6b7c5b94
SP
152 memset(mem->va, 0, mem->size);
153 return 0;
154}
155
8788fdc2 156static void be_intr_set(struct be_adapter *adapter, bool enable)
6b7c5b94 157{
db3ea781 158 u32 reg, enabled;
5f0b849e 159
f67ef7ba 160 if (adapter->eeh_error)
cf588477
SP
161 return;
162
db3ea781
SP
163 pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
164 &reg);
165 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
166
5f0b849e 167 if (!enabled && enable)
6b7c5b94 168 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
5f0b849e 169 else if (enabled && !enable)
6b7c5b94 170 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
5f0b849e 171 else
6b7c5b94 172 return;
5f0b849e 173
db3ea781
SP
174 pci_write_config_dword(adapter->pdev,
175 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
6b7c5b94
SP
176}
177
8788fdc2 178static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
6b7c5b94
SP
179{
180 u32 val = 0;
181 val |= qid & DB_RQ_RING_ID_MASK;
182 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
f3eb62d2
SP
183
184 wmb();
8788fdc2 185 iowrite32(val, adapter->db + DB_RQ_OFFSET);
6b7c5b94
SP
186}
187
8788fdc2 188static void be_txq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
6b7c5b94
SP
189{
190 u32 val = 0;
191 val |= qid & DB_TXULP_RING_ID_MASK;
192 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
f3eb62d2
SP
193
194 wmb();
8788fdc2 195 iowrite32(val, adapter->db + DB_TXULP1_OFFSET);
6b7c5b94
SP
196}
197
8788fdc2 198static void be_eq_notify(struct be_adapter *adapter, u16 qid,
6b7c5b94
SP
199 bool arm, bool clear_int, u16 num_popped)
200{
201 u32 val = 0;
202 val |= qid & DB_EQ_RING_ID_MASK;
fe6d2a38
SP
203 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
204 DB_EQ_RING_ID_EXT_MASK_SHIFT);
cf588477 205
f67ef7ba 206 if (adapter->eeh_error)
cf588477
SP
207 return;
208
6b7c5b94
SP
209 if (arm)
210 val |= 1 << DB_EQ_REARM_SHIFT;
211 if (clear_int)
212 val |= 1 << DB_EQ_CLR_SHIFT;
213 val |= 1 << DB_EQ_EVNT_SHIFT;
214 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
8788fdc2 215 iowrite32(val, adapter->db + DB_EQ_OFFSET);
6b7c5b94
SP
216}
217
8788fdc2 218void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
6b7c5b94
SP
219{
220 u32 val = 0;
221 val |= qid & DB_CQ_RING_ID_MASK;
fe6d2a38
SP
222 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
223 DB_CQ_RING_ID_EXT_MASK_SHIFT);
cf588477 224
f67ef7ba 225 if (adapter->eeh_error)
cf588477
SP
226 return;
227
6b7c5b94
SP
228 if (arm)
229 val |= 1 << DB_CQ_REARM_SHIFT;
230 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
8788fdc2 231 iowrite32(val, adapter->db + DB_CQ_OFFSET);
6b7c5b94
SP
232}
233
6b7c5b94
SP
234static int be_mac_addr_set(struct net_device *netdev, void *p)
235{
236 struct be_adapter *adapter = netdev_priv(netdev);
237 struct sockaddr *addr = p;
238 int status = 0;
e3a7ae2c 239 u8 current_mac[ETH_ALEN];
fbc13f01 240 u32 pmac_id = adapter->pmac_id[0];
704e4c88 241 bool active_mac = true;
6b7c5b94 242
ca9e4988
AK
243 if (!is_valid_ether_addr(addr->sa_data))
244 return -EADDRNOTAVAIL;
245
704e4c88
PR
246 /* For BE VF, MAC address is already activated by PF.
247 * Hence only operation left is updating netdev->devaddr.
248 * Update it if user is passing the same MAC which was used
249 * during configuring VF MAC from PF(Hypervisor).
250 */
251 if (!lancer_chip(adapter) && !be_physfn(adapter)) {
252 status = be_cmd_mac_addr_query(adapter, current_mac,
253 false, adapter->if_handle, 0);
254 if (!status && !memcmp(current_mac, addr->sa_data, ETH_ALEN))
255 goto done;
256 else
257 goto err;
258 }
259
260 if (!memcmp(addr->sa_data, netdev->dev_addr, ETH_ALEN))
261 goto done;
262
263 /* For Lancer check if any MAC is active.
264 * If active, get its mac id.
265 */
266 if (lancer_chip(adapter) && !be_physfn(adapter))
267 be_cmd_get_mac_from_list(adapter, current_mac, &active_mac,
268 &pmac_id, 0);
269
270 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
271 adapter->if_handle,
272 &adapter->pmac_id[0], 0);
273
a65027e4 274 if (status)
e3a7ae2c 275 goto err;
6b7c5b94 276
704e4c88
PR
277 if (active_mac)
278 be_cmd_pmac_del(adapter, adapter->if_handle,
279 pmac_id, 0);
280done:
e3a7ae2c
SK
281 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
282 return 0;
283err:
284 dev_err(&adapter->pdev->dev, "MAC %pM set Failed\n", addr->sa_data);
6b7c5b94
SP
285 return status;
286}
287
89a88ab8
AK
288static void populate_be2_stats(struct be_adapter *adapter)
289{
ac124ff9
SP
290 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
291 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
292 struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
89a88ab8 293 struct be_port_rxf_stats_v0 *port_stats =
ac124ff9
SP
294 &rxf_stats->port[adapter->port_num];
295 struct be_drv_stats *drvs = &adapter->drv_stats;
89a88ab8 296
ac124ff9 297 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
89a88ab8
AK
298 drvs->rx_pause_frames = port_stats->rx_pause_frames;
299 drvs->rx_crc_errors = port_stats->rx_crc_errors;
300 drvs->rx_control_frames = port_stats->rx_control_frames;
301 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
302 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
303 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
304 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
305 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
306 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
307 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
308 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
309 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
310 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
311 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
ac124ff9 312 drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
89a88ab8
AK
313 drvs->rx_dropped_header_too_small =
314 port_stats->rx_dropped_header_too_small;
d45b9d39
SP
315 drvs->rx_address_mismatch_drops =
316 port_stats->rx_address_mismatch_drops +
317 port_stats->rx_vlan_mismatch_drops;
89a88ab8
AK
318 drvs->rx_alignment_symbol_errors =
319 port_stats->rx_alignment_symbol_errors;
320
321 drvs->tx_pauseframes = port_stats->tx_pauseframes;
322 drvs->tx_controlframes = port_stats->tx_controlframes;
323
324 if (adapter->port_num)
ac124ff9 325 drvs->jabber_events = rxf_stats->port1_jabber_events;
89a88ab8 326 else
ac124ff9 327 drvs->jabber_events = rxf_stats->port0_jabber_events;
89a88ab8 328 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
89a88ab8 329 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
89a88ab8
AK
330 drvs->forwarded_packets = rxf_stats->forwarded_packets;
331 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
ac124ff9
SP
332 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
333 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
89a88ab8
AK
334 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
335}
336
337static void populate_be3_stats(struct be_adapter *adapter)
338{
ac124ff9
SP
339 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
340 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
341 struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
89a88ab8 342 struct be_port_rxf_stats_v1 *port_stats =
ac124ff9
SP
343 &rxf_stats->port[adapter->port_num];
344 struct be_drv_stats *drvs = &adapter->drv_stats;
89a88ab8 345
ac124ff9 346 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
02fe7027
AK
347 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
348 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
89a88ab8
AK
349 drvs->rx_pause_frames = port_stats->rx_pause_frames;
350 drvs->rx_crc_errors = port_stats->rx_crc_errors;
351 drvs->rx_control_frames = port_stats->rx_control_frames;
352 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
353 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
354 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
355 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
356 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
357 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
358 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
359 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
360 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
361 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
362 drvs->rx_dropped_header_too_small =
363 port_stats->rx_dropped_header_too_small;
364 drvs->rx_input_fifo_overflow_drop =
365 port_stats->rx_input_fifo_overflow_drop;
d45b9d39 366 drvs->rx_address_mismatch_drops = port_stats->rx_address_mismatch_drops;
89a88ab8
AK
367 drvs->rx_alignment_symbol_errors =
368 port_stats->rx_alignment_symbol_errors;
ac124ff9 369 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
89a88ab8
AK
370 drvs->tx_pauseframes = port_stats->tx_pauseframes;
371 drvs->tx_controlframes = port_stats->tx_controlframes;
372 drvs->jabber_events = port_stats->jabber_events;
373 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
89a88ab8 374 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
89a88ab8
AK
375 drvs->forwarded_packets = rxf_stats->forwarded_packets;
376 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
ac124ff9
SP
377 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
378 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
89a88ab8
AK
379 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
380}
381
005d5696
SX
382static void populate_lancer_stats(struct be_adapter *adapter)
383{
89a88ab8 384
005d5696 385 struct be_drv_stats *drvs = &adapter->drv_stats;
ac124ff9
SP
386 struct lancer_pport_stats *pport_stats =
387 pport_stats_from_cmd(adapter);
388
389 be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
390 drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
391 drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
392 drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
005d5696 393 drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
ac124ff9 394 drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
005d5696
SX
395 drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
396 drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
397 drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
398 drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
399 drvs->rx_dropped_tcp_length =
400 pport_stats->rx_dropped_invalid_tcp_length;
401 drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
402 drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
403 drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
404 drvs->rx_dropped_header_too_small =
405 pport_stats->rx_dropped_header_too_small;
406 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
d45b9d39
SP
407 drvs->rx_address_mismatch_drops =
408 pport_stats->rx_address_mismatch_drops +
409 pport_stats->rx_vlan_mismatch_drops;
ac124ff9 410 drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
005d5696 411 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
ac124ff9
SP
412 drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
413 drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
005d5696 414 drvs->jabber_events = pport_stats->rx_jabbers;
ac124ff9
SP
415 drvs->forwarded_packets = pport_stats->num_forwards_lo;
416 drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
005d5696 417 drvs->rx_drops_too_many_frags =
ac124ff9 418 pport_stats->rx_drops_too_many_frags_lo;
005d5696 419}
89a88ab8 420
09c1c68f
SP
421static void accumulate_16bit_val(u32 *acc, u16 val)
422{
423#define lo(x) (x & 0xFFFF)
424#define hi(x) (x & 0xFFFF0000)
425 bool wrapped = val < lo(*acc);
426 u32 newacc = hi(*acc) + val;
427
428 if (wrapped)
429 newacc += 65536;
430 ACCESS_ONCE(*acc) = newacc;
431}
432
89a88ab8
AK
433void be_parse_stats(struct be_adapter *adapter)
434{
ac124ff9
SP
435 struct be_erx_stats_v1 *erx = be_erx_stats_from_cmd(adapter);
436 struct be_rx_obj *rxo;
437 int i;
438
005d5696
SX
439 if (adapter->generation == BE_GEN3) {
440 if (lancer_chip(adapter))
441 populate_lancer_stats(adapter);
442 else
443 populate_be3_stats(adapter);
444 } else {
89a88ab8 445 populate_be2_stats(adapter);
005d5696 446 }
ac124ff9 447
d51ebd33
PR
448 if (lancer_chip(adapter))
449 goto done;
450
ac124ff9 451 /* as erx_v1 is longer than v0, ok to use v1 defn for v0 access */
09c1c68f
SP
452 for_all_rx_queues(adapter, rxo, i) {
453 /* below erx HW counter can actually wrap around after
454 * 65535. Driver accumulates a 32-bit value
455 */
456 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
457 (u16)erx->rx_drops_no_fragments[rxo->q.id]);
458 }
d51ebd33
PR
459done:
460 return;
89a88ab8
AK
461}
462
ab1594e9
SP
463static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
464 struct rtnl_link_stats64 *stats)
6b7c5b94 465{
ab1594e9 466 struct be_adapter *adapter = netdev_priv(netdev);
89a88ab8 467 struct be_drv_stats *drvs = &adapter->drv_stats;
3abcdeda 468 struct be_rx_obj *rxo;
3c8def97 469 struct be_tx_obj *txo;
ab1594e9
SP
470 u64 pkts, bytes;
471 unsigned int start;
3abcdeda 472 int i;
6b7c5b94 473
3abcdeda 474 for_all_rx_queues(adapter, rxo, i) {
ab1594e9
SP
475 const struct be_rx_stats *rx_stats = rx_stats(rxo);
476 do {
477 start = u64_stats_fetch_begin_bh(&rx_stats->sync);
478 pkts = rx_stats(rxo)->rx_pkts;
479 bytes = rx_stats(rxo)->rx_bytes;
480 } while (u64_stats_fetch_retry_bh(&rx_stats->sync, start));
481 stats->rx_packets += pkts;
482 stats->rx_bytes += bytes;
483 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
484 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
485 rx_stats(rxo)->rx_drops_no_frags;
3abcdeda
SP
486 }
487
3c8def97 488 for_all_tx_queues(adapter, txo, i) {
ab1594e9
SP
489 const struct be_tx_stats *tx_stats = tx_stats(txo);
490 do {
491 start = u64_stats_fetch_begin_bh(&tx_stats->sync);
492 pkts = tx_stats(txo)->tx_pkts;
493 bytes = tx_stats(txo)->tx_bytes;
494 } while (u64_stats_fetch_retry_bh(&tx_stats->sync, start));
495 stats->tx_packets += pkts;
496 stats->tx_bytes += bytes;
3c8def97 497 }
6b7c5b94
SP
498
499 /* bad pkts received */
ab1594e9 500 stats->rx_errors = drvs->rx_crc_errors +
89a88ab8
AK
501 drvs->rx_alignment_symbol_errors +
502 drvs->rx_in_range_errors +
503 drvs->rx_out_range_errors +
504 drvs->rx_frame_too_long +
505 drvs->rx_dropped_too_small +
506 drvs->rx_dropped_too_short +
507 drvs->rx_dropped_header_too_small +
508 drvs->rx_dropped_tcp_length +
ab1594e9 509 drvs->rx_dropped_runt;
68110868 510
6b7c5b94 511 /* detailed rx errors */
ab1594e9 512 stats->rx_length_errors = drvs->rx_in_range_errors +
89a88ab8
AK
513 drvs->rx_out_range_errors +
514 drvs->rx_frame_too_long;
68110868 515
ab1594e9 516 stats->rx_crc_errors = drvs->rx_crc_errors;
6b7c5b94
SP
517
518 /* frame alignment errors */
ab1594e9 519 stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
68110868 520
6b7c5b94
SP
521 /* receiver fifo overrun */
522 /* drops_no_pbuf is no per i/f, it's per BE card */
ab1594e9 523 stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
89a88ab8
AK
524 drvs->rx_input_fifo_overflow_drop +
525 drvs->rx_drops_no_pbuf;
ab1594e9 526 return stats;
6b7c5b94
SP
527}
528
b236916a 529void be_link_status_update(struct be_adapter *adapter, u8 link_status)
6b7c5b94 530{
6b7c5b94
SP
531 struct net_device *netdev = adapter->netdev;
532
b236916a 533 if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
ea172a01 534 netif_carrier_off(netdev);
b236916a 535 adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
6b7c5b94 536 }
b236916a
AK
537
538 if ((link_status & LINK_STATUS_MASK) == LINK_UP)
539 netif_carrier_on(netdev);
540 else
541 netif_carrier_off(netdev);
6b7c5b94
SP
542}
543
3c8def97 544static void be_tx_stats_update(struct be_tx_obj *txo,
91992e44 545 u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
6b7c5b94 546{
3c8def97
SP
547 struct be_tx_stats *stats = tx_stats(txo);
548
ab1594e9 549 u64_stats_update_begin(&stats->sync);
ac124ff9
SP
550 stats->tx_reqs++;
551 stats->tx_wrbs += wrb_cnt;
552 stats->tx_bytes += copied;
553 stats->tx_pkts += (gso_segs ? gso_segs : 1);
6b7c5b94 554 if (stopped)
ac124ff9 555 stats->tx_stops++;
ab1594e9 556 u64_stats_update_end(&stats->sync);
6b7c5b94
SP
557}
558
559/* Determine number of WRB entries needed to xmit data in an skb */
fe6d2a38
SP
560static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
561 bool *dummy)
6b7c5b94 562{
ebc8d2ab
DM
563 int cnt = (skb->len > skb->data_len);
564
565 cnt += skb_shinfo(skb)->nr_frags;
566
6b7c5b94
SP
567 /* to account for hdr wrb */
568 cnt++;
fe6d2a38
SP
569 if (lancer_chip(adapter) || !(cnt & 1)) {
570 *dummy = false;
571 } else {
6b7c5b94
SP
572 /* add a dummy to make it an even num */
573 cnt++;
574 *dummy = true;
fe6d2a38 575 }
6b7c5b94
SP
576 BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
577 return cnt;
578}
579
580static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
581{
582 wrb->frag_pa_hi = upper_32_bits(addr);
583 wrb->frag_pa_lo = addr & 0xFFFFFFFF;
584 wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
89b1f496 585 wrb->rsvd0 = 0;
6b7c5b94
SP
586}
587
1ded132d
AK
588static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
589 struct sk_buff *skb)
590{
591 u8 vlan_prio;
592 u16 vlan_tag;
593
594 vlan_tag = vlan_tx_tag_get(skb);
595 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
596 /* If vlan priority provided by OS is NOT in available bmap */
597 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
598 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
599 adapter->recommended_prio;
600
601 return vlan_tag;
602}
603
93040ae5
SK
604static int be_vlan_tag_chk(struct be_adapter *adapter, struct sk_buff *skb)
605{
606 return vlan_tx_tag_present(skb) || adapter->pvid;
607}
608
cc4ce020
SK
609static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
610 struct sk_buff *skb, u32 wrb_cnt, u32 len)
6b7c5b94 611{
1ded132d 612 u16 vlan_tag;
cc4ce020 613
6b7c5b94
SP
614 memset(hdr, 0, sizeof(*hdr));
615
616 AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
617
49e4b847 618 if (skb_is_gso(skb)) {
6b7c5b94
SP
619 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
620 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
621 hdr, skb_shinfo(skb)->gso_size);
fe6d2a38 622 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
49e4b847 623 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
6b7c5b94
SP
624 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
625 if (is_tcp_pkt(skb))
626 AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
627 else if (is_udp_pkt(skb))
628 AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
629 }
630
4c5102f9 631 if (vlan_tx_tag_present(skb)) {
6b7c5b94 632 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
1ded132d 633 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
cc4ce020 634 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
6b7c5b94
SP
635 }
636
637 AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
638 AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, 1);
639 AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
640 AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
641}
642
2b7bcebf 643static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
7101e111
SP
644 bool unmap_single)
645{
646 dma_addr_t dma;
647
648 be_dws_le_to_cpu(wrb, sizeof(*wrb));
649
650 dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
b681ee77 651 if (wrb->frag_len) {
7101e111 652 if (unmap_single)
2b7bcebf
IV
653 dma_unmap_single(dev, dma, wrb->frag_len,
654 DMA_TO_DEVICE);
7101e111 655 else
2b7bcebf 656 dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
7101e111
SP
657 }
658}
6b7c5b94 659
3c8def97 660static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
6b7c5b94
SP
661 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb)
662{
7101e111
SP
663 dma_addr_t busaddr;
664 int i, copied = 0;
2b7bcebf 665 struct device *dev = &adapter->pdev->dev;
6b7c5b94 666 struct sk_buff *first_skb = skb;
6b7c5b94
SP
667 struct be_eth_wrb *wrb;
668 struct be_eth_hdr_wrb *hdr;
7101e111
SP
669 bool map_single = false;
670 u16 map_head;
6b7c5b94 671
6b7c5b94
SP
672 hdr = queue_head_node(txq);
673 queue_head_inc(txq);
7101e111 674 map_head = txq->head;
6b7c5b94 675
ebc8d2ab 676 if (skb->len > skb->data_len) {
e743d313 677 int len = skb_headlen(skb);
2b7bcebf
IV
678 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
679 if (dma_mapping_error(dev, busaddr))
7101e111
SP
680 goto dma_err;
681 map_single = true;
ebc8d2ab
DM
682 wrb = queue_head_node(txq);
683 wrb_fill(wrb, busaddr, len);
684 be_dws_cpu_to_le(wrb, sizeof(*wrb));
685 queue_head_inc(txq);
686 copied += len;
687 }
6b7c5b94 688
ebc8d2ab 689 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
9e903e08 690 const struct skb_frag_struct *frag =
ebc8d2ab 691 &skb_shinfo(skb)->frags[i];
b061b39e 692 busaddr = skb_frag_dma_map(dev, frag, 0,
9e903e08 693 skb_frag_size(frag), DMA_TO_DEVICE);
2b7bcebf 694 if (dma_mapping_error(dev, busaddr))
7101e111 695 goto dma_err;
ebc8d2ab 696 wrb = queue_head_node(txq);
9e903e08 697 wrb_fill(wrb, busaddr, skb_frag_size(frag));
ebc8d2ab
DM
698 be_dws_cpu_to_le(wrb, sizeof(*wrb));
699 queue_head_inc(txq);
9e903e08 700 copied += skb_frag_size(frag);
6b7c5b94
SP
701 }
702
703 if (dummy_wrb) {
704 wrb = queue_head_node(txq);
705 wrb_fill(wrb, 0, 0);
706 be_dws_cpu_to_le(wrb, sizeof(*wrb));
707 queue_head_inc(txq);
708 }
709
cc4ce020 710 wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied);
6b7c5b94
SP
711 be_dws_cpu_to_le(hdr, sizeof(*hdr));
712
713 return copied;
7101e111
SP
714dma_err:
715 txq->head = map_head;
716 while (copied) {
717 wrb = queue_head_node(txq);
2b7bcebf 718 unmap_tx_frag(dev, wrb, map_single);
7101e111
SP
719 map_single = false;
720 copied -= wrb->frag_len;
721 queue_head_inc(txq);
722 }
723 return 0;
6b7c5b94
SP
724}
725
93040ae5
SK
726static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
727 struct sk_buff *skb)
728{
729 u16 vlan_tag = 0;
730
731 skb = skb_share_check(skb, GFP_ATOMIC);
732 if (unlikely(!skb))
733 return skb;
734
735 if (vlan_tx_tag_present(skb)) {
736 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
737 __vlan_put_tag(skb, vlan_tag);
738 skb->vlan_tci = 0;
739 }
740
741 return skb;
742}
743
61357325 744static netdev_tx_t be_xmit(struct sk_buff *skb,
b31c50a7 745 struct net_device *netdev)
6b7c5b94
SP
746{
747 struct be_adapter *adapter = netdev_priv(netdev);
3c8def97
SP
748 struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
749 struct be_queue_info *txq = &txo->q;
93040ae5 750 struct iphdr *ip = NULL;
6b7c5b94 751 u32 wrb_cnt = 0, copied = 0;
93040ae5 752 u32 start = txq->head, eth_hdr_len;
6b7c5b94
SP
753 bool dummy_wrb, stopped = false;
754
93040ae5
SK
755 eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
756 VLAN_ETH_HLEN : ETH_HLEN;
757
758 /* HW has a bug which considers padding bytes as legal
759 * and modifies the IPv4 hdr's 'tot_len' field
1ded132d 760 */
93040ae5
SK
761 if (skb->len <= 60 && be_vlan_tag_chk(adapter, skb) &&
762 is_ipv4_pkt(skb)) {
763 ip = (struct iphdr *)ip_hdr(skb);
764 pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len));
765 }
1ded132d 766
93040ae5
SK
767 /* HW has a bug wherein it will calculate CSUM for VLAN
768 * pkts even though it is disabled.
769 * Manually insert VLAN in pkt.
770 */
771 if (skb->ip_summed != CHECKSUM_PARTIAL &&
772 be_vlan_tag_chk(adapter, skb)) {
773 skb = be_insert_vlan_in_pkt(adapter, skb);
1ded132d
AK
774 if (unlikely(!skb))
775 goto tx_drop;
1ded132d
AK
776 }
777
fe6d2a38 778 wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
6b7c5b94 779
3c8def97 780 copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb);
c190e3c8 781 if (copied) {
cd8f76c0
ED
782 int gso_segs = skb_shinfo(skb)->gso_segs;
783
c190e3c8 784 /* record the sent skb in the sent_skb table */
3c8def97
SP
785 BUG_ON(txo->sent_skb_list[start]);
786 txo->sent_skb_list[start] = skb;
c190e3c8
AK
787
788 /* Ensure txq has space for the next skb; Else stop the queue
789 * *BEFORE* ringing the tx doorbell, so that we serialze the
790 * tx compls of the current transmit which'll wake up the queue
791 */
7101e111 792 atomic_add(wrb_cnt, &txq->used);
c190e3c8
AK
793 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
794 txq->len) {
3c8def97 795 netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
c190e3c8
AK
796 stopped = true;
797 }
6b7c5b94 798
c190e3c8 799 be_txq_notify(adapter, txq->id, wrb_cnt);
6b7c5b94 800
cd8f76c0 801 be_tx_stats_update(txo, wrb_cnt, copied, gso_segs, stopped);
c190e3c8
AK
802 } else {
803 txq->head = start;
804 dev_kfree_skb_any(skb);
6b7c5b94 805 }
1ded132d 806tx_drop:
6b7c5b94
SP
807 return NETDEV_TX_OK;
808}
809
810static int be_change_mtu(struct net_device *netdev, int new_mtu)
811{
812 struct be_adapter *adapter = netdev_priv(netdev);
813 if (new_mtu < BE_MIN_MTU ||
34a89b8c
AK
814 new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
815 (ETH_HLEN + ETH_FCS_LEN))) {
6b7c5b94
SP
816 dev_info(&adapter->pdev->dev,
817 "MTU must be between %d and %d bytes\n",
34a89b8c
AK
818 BE_MIN_MTU,
819 (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
6b7c5b94
SP
820 return -EINVAL;
821 }
822 dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
823 netdev->mtu, new_mtu);
824 netdev->mtu = new_mtu;
825 return 0;
826}
827
828/*
82903e4b
AK
829 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
830 * If the user configures more, place BE in vlan promiscuous mode.
6b7c5b94 831 */
10329df8 832static int be_vid_config(struct be_adapter *adapter)
6b7c5b94 833{
10329df8
SP
834 u16 vids[BE_NUM_VLANS_SUPPORTED];
835 u16 num = 0, i;
82903e4b 836 int status = 0;
1da87b7f 837
c0e64ef4
SP
838 /* No need to further configure vids if in promiscuous mode */
839 if (adapter->promiscuous)
840 return 0;
841
0fc16ebf
PR
842 if (adapter->vlans_added > adapter->max_vlans)
843 goto set_vlan_promisc;
844
845 /* Construct VLAN Table to give to HW */
846 for (i = 0; i < VLAN_N_VID; i++)
847 if (adapter->vlan_tag[i])
10329df8 848 vids[num++] = cpu_to_le16(i);
0fc16ebf
PR
849
850 status = be_cmd_vlan_config(adapter, adapter->if_handle,
10329df8 851 vids, num, 1, 0);
0fc16ebf
PR
852
853 /* Set to VLAN promisc mode as setting VLAN filter failed */
854 if (status) {
855 dev_info(&adapter->pdev->dev, "Exhausted VLAN HW filters.\n");
856 dev_info(&adapter->pdev->dev, "Disabling HW VLAN filtering.\n");
857 goto set_vlan_promisc;
6b7c5b94 858 }
1da87b7f 859
b31c50a7 860 return status;
0fc16ebf
PR
861
862set_vlan_promisc:
863 status = be_cmd_vlan_config(adapter, adapter->if_handle,
864 NULL, 0, 1, 1);
865 return status;
6b7c5b94
SP
866}
867
8e586137 868static int be_vlan_add_vid(struct net_device *netdev, u16 vid)
6b7c5b94
SP
869{
870 struct be_adapter *adapter = netdev_priv(netdev);
80817cbf 871 int status = 0;
6b7c5b94 872
a85e9986 873 if (!lancer_chip(adapter) && !be_physfn(adapter)) {
80817cbf
AK
874 status = -EINVAL;
875 goto ret;
876 }
ba343c77 877
a85e9986
PR
878 /* Packets with VID 0 are always received by Lancer by default */
879 if (lancer_chip(adapter) && vid == 0)
880 goto ret;
881
6b7c5b94 882 adapter->vlan_tag[vid] = 1;
82903e4b 883 if (adapter->vlans_added <= (adapter->max_vlans + 1))
10329df8 884 status = be_vid_config(adapter);
8e586137 885
80817cbf
AK
886 if (!status)
887 adapter->vlans_added++;
888 else
889 adapter->vlan_tag[vid] = 0;
890ret:
891 return status;
6b7c5b94
SP
892}
893
8e586137 894static int be_vlan_rem_vid(struct net_device *netdev, u16 vid)
6b7c5b94
SP
895{
896 struct be_adapter *adapter = netdev_priv(netdev);
80817cbf 897 int status = 0;
6b7c5b94 898
a85e9986 899 if (!lancer_chip(adapter) && !be_physfn(adapter)) {
80817cbf
AK
900 status = -EINVAL;
901 goto ret;
902 }
ba343c77 903
a85e9986
PR
904 /* Packets with VID 0 are always received by Lancer by default */
905 if (lancer_chip(adapter) && vid == 0)
906 goto ret;
907
6b7c5b94 908 adapter->vlan_tag[vid] = 0;
82903e4b 909 if (adapter->vlans_added <= adapter->max_vlans)
10329df8 910 status = be_vid_config(adapter);
8e586137 911
80817cbf
AK
912 if (!status)
913 adapter->vlans_added--;
914 else
915 adapter->vlan_tag[vid] = 1;
916ret:
917 return status;
6b7c5b94
SP
918}
919
a54769f5 920static void be_set_rx_mode(struct net_device *netdev)
6b7c5b94
SP
921{
922 struct be_adapter *adapter = netdev_priv(netdev);
0fc16ebf 923 int status;
6b7c5b94 924
24307eef 925 if (netdev->flags & IFF_PROMISC) {
5b8821b7 926 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
24307eef
SP
927 adapter->promiscuous = true;
928 goto done;
6b7c5b94
SP
929 }
930
25985edc 931 /* BE was previously in promiscuous mode; disable it */
24307eef
SP
932 if (adapter->promiscuous) {
933 adapter->promiscuous = false;
5b8821b7 934 be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
c0e64ef4
SP
935
936 if (adapter->vlans_added)
10329df8 937 be_vid_config(adapter);
6b7c5b94
SP
938 }
939
e7b909a6 940 /* Enable multicast promisc if num configured exceeds what we support */
4cd24eaf 941 if (netdev->flags & IFF_ALLMULTI ||
abb93951 942 netdev_mc_count(netdev) > adapter->max_mcast_mac) {
5b8821b7 943 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
24307eef 944 goto done;
6b7c5b94 945 }
6b7c5b94 946
fbc13f01
AK
947 if (netdev_uc_count(netdev) != adapter->uc_macs) {
948 struct netdev_hw_addr *ha;
949 int i = 1; /* First slot is claimed by the Primary MAC */
950
951 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++) {
952 be_cmd_pmac_del(adapter, adapter->if_handle,
953 adapter->pmac_id[i], 0);
954 }
955
956 if (netdev_uc_count(netdev) > adapter->max_pmac_cnt) {
957 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
958 adapter->promiscuous = true;
959 goto done;
960 }
961
962 netdev_for_each_uc_addr(ha, adapter->netdev) {
963 adapter->uc_macs++; /* First slot is for Primary MAC */
964 be_cmd_pmac_add(adapter, (u8 *)ha->addr,
965 adapter->if_handle,
966 &adapter->pmac_id[adapter->uc_macs], 0);
967 }
968 }
969
0fc16ebf
PR
970 status = be_cmd_rx_filter(adapter, IFF_MULTICAST, ON);
971
972 /* Set to MCAST promisc mode if setting MULTICAST address fails */
973 if (status) {
974 dev_info(&adapter->pdev->dev, "Exhausted multicast HW filters.\n");
975 dev_info(&adapter->pdev->dev, "Disabling HW multicast filtering.\n");
976 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
977 }
24307eef
SP
978done:
979 return;
6b7c5b94
SP
980}
981
ba343c77
SB
982static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
983{
984 struct be_adapter *adapter = netdev_priv(netdev);
11ac75ed 985 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
ba343c77 986 int status;
704e4c88
PR
987 bool active_mac = false;
988 u32 pmac_id;
989 u8 old_mac[ETH_ALEN];
ba343c77 990
11ac75ed 991 if (!sriov_enabled(adapter))
ba343c77
SB
992 return -EPERM;
993
11ac75ed 994 if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
ba343c77
SB
995 return -EINVAL;
996
590c391d 997 if (lancer_chip(adapter)) {
704e4c88
PR
998 status = be_cmd_get_mac_from_list(adapter, old_mac, &active_mac,
999 &pmac_id, vf + 1);
1000 if (!status && active_mac)
1001 be_cmd_pmac_del(adapter, vf_cfg->if_handle,
1002 pmac_id, vf + 1);
1003
590c391d
PR
1004 status = be_cmd_set_mac_list(adapter, mac, 1, vf + 1);
1005 } else {
11ac75ed
SP
1006 status = be_cmd_pmac_del(adapter, vf_cfg->if_handle,
1007 vf_cfg->pmac_id, vf + 1);
ba343c77 1008
11ac75ed
SP
1009 status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
1010 &vf_cfg->pmac_id, vf + 1);
590c391d
PR
1011 }
1012
64600ea5 1013 if (status)
ba343c77
SB
1014 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
1015 mac, vf);
64600ea5 1016 else
11ac75ed 1017 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
64600ea5 1018
ba343c77
SB
1019 return status;
1020}
1021
64600ea5
AK
1022static int be_get_vf_config(struct net_device *netdev, int vf,
1023 struct ifla_vf_info *vi)
1024{
1025 struct be_adapter *adapter = netdev_priv(netdev);
11ac75ed 1026 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
64600ea5 1027
11ac75ed 1028 if (!sriov_enabled(adapter))
64600ea5
AK
1029 return -EPERM;
1030
11ac75ed 1031 if (vf >= adapter->num_vfs)
64600ea5
AK
1032 return -EINVAL;
1033
1034 vi->vf = vf;
11ac75ed
SP
1035 vi->tx_rate = vf_cfg->tx_rate;
1036 vi->vlan = vf_cfg->vlan_tag;
64600ea5 1037 vi->qos = 0;
11ac75ed 1038 memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
64600ea5
AK
1039
1040 return 0;
1041}
1042
1da87b7f
AK
1043static int be_set_vf_vlan(struct net_device *netdev,
1044 int vf, u16 vlan, u8 qos)
1045{
1046 struct be_adapter *adapter = netdev_priv(netdev);
1047 int status = 0;
1048
11ac75ed 1049 if (!sriov_enabled(adapter))
1da87b7f
AK
1050 return -EPERM;
1051
11ac75ed 1052 if (vf >= adapter->num_vfs || vlan > 4095)
1da87b7f
AK
1053 return -EINVAL;
1054
1055 if (vlan) {
f1f3ee1b
AK
1056 if (adapter->vf_cfg[vf].vlan_tag != vlan) {
1057 /* If this is new value, program it. Else skip. */
1058 adapter->vf_cfg[vf].vlan_tag = vlan;
1059
1060 status = be_cmd_set_hsw_config(adapter, vlan,
1061 vf + 1, adapter->vf_cfg[vf].if_handle);
1062 }
1da87b7f 1063 } else {
f1f3ee1b 1064 /* Reset Transparent Vlan Tagging. */
11ac75ed 1065 adapter->vf_cfg[vf].vlan_tag = 0;
f1f3ee1b
AK
1066 vlan = adapter->vf_cfg[vf].def_vid;
1067 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
1068 adapter->vf_cfg[vf].if_handle);
1da87b7f
AK
1069 }
1070
1da87b7f
AK
1071
1072 if (status)
1073 dev_info(&adapter->pdev->dev,
1074 "VLAN %d config on VF %d failed\n", vlan, vf);
1075 return status;
1076}
1077
e1d18735
AK
1078static int be_set_vf_tx_rate(struct net_device *netdev,
1079 int vf, int rate)
1080{
1081 struct be_adapter *adapter = netdev_priv(netdev);
1082 int status = 0;
1083
11ac75ed 1084 if (!sriov_enabled(adapter))
e1d18735
AK
1085 return -EPERM;
1086
94f434c2 1087 if (vf >= adapter->num_vfs)
e1d18735
AK
1088 return -EINVAL;
1089
94f434c2
AK
1090 if (rate < 100 || rate > 10000) {
1091 dev_err(&adapter->pdev->dev,
1092 "tx rate must be between 100 and 10000 Mbps\n");
1093 return -EINVAL;
1094 }
e1d18735 1095
d5c18473
PR
1096 if (lancer_chip(adapter))
1097 status = be_cmd_set_profile_config(adapter, rate / 10, vf + 1);
1098 else
1099 status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
e1d18735
AK
1100
1101 if (status)
94f434c2 1102 dev_err(&adapter->pdev->dev,
e1d18735 1103 "tx rate %d on VF %d failed\n", rate, vf);
94f434c2
AK
1104 else
1105 adapter->vf_cfg[vf].tx_rate = rate;
e1d18735
AK
1106 return status;
1107}
1108
39f1d94d
SP
1109static int be_find_vfs(struct be_adapter *adapter, int vf_state)
1110{
1111 struct pci_dev *dev, *pdev = adapter->pdev;
2f6a0260 1112 int vfs = 0, assigned_vfs = 0, pos;
39f1d94d
SP
1113 u16 offset, stride;
1114
1115 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
d79c0a20
SP
1116 if (!pos)
1117 return 0;
39f1d94d
SP
1118 pci_read_config_word(pdev, pos + PCI_SRIOV_VF_OFFSET, &offset);
1119 pci_read_config_word(pdev, pos + PCI_SRIOV_VF_STRIDE, &stride);
1120
1121 dev = pci_get_device(pdev->vendor, PCI_ANY_ID, NULL);
1122 while (dev) {
2f6a0260 1123 if (dev->is_virtfn && pci_physfn(dev) == pdev) {
39f1d94d
SP
1124 vfs++;
1125 if (dev->dev_flags & PCI_DEV_FLAGS_ASSIGNED)
1126 assigned_vfs++;
1127 }
1128 dev = pci_get_device(pdev->vendor, PCI_ANY_ID, dev);
1129 }
1130 return (vf_state == ASSIGNED) ? assigned_vfs : vfs;
1131}
1132
10ef9ab4 1133static void be_eqd_update(struct be_adapter *adapter, struct be_eq_obj *eqo)
6b7c5b94 1134{
10ef9ab4 1135 struct be_rx_stats *stats = rx_stats(&adapter->rx_obj[eqo->idx]);
4097f663 1136 ulong now = jiffies;
ac124ff9 1137 ulong delta = now - stats->rx_jiffies;
ab1594e9
SP
1138 u64 pkts;
1139 unsigned int start, eqd;
ac124ff9 1140
10ef9ab4
SP
1141 if (!eqo->enable_aic) {
1142 eqd = eqo->eqd;
1143 goto modify_eqd;
1144 }
1145
1146 if (eqo->idx >= adapter->num_rx_qs)
ac124ff9 1147 return;
6b7c5b94 1148
10ef9ab4
SP
1149 stats = rx_stats(&adapter->rx_obj[eqo->idx]);
1150
4097f663 1151 /* Wrapped around */
3abcdeda
SP
1152 if (time_before(now, stats->rx_jiffies)) {
1153 stats->rx_jiffies = now;
4097f663
SP
1154 return;
1155 }
6b7c5b94 1156
ac124ff9
SP
1157 /* Update once a second */
1158 if (delta < HZ)
6b7c5b94
SP
1159 return;
1160
ab1594e9
SP
1161 do {
1162 start = u64_stats_fetch_begin_bh(&stats->sync);
1163 pkts = stats->rx_pkts;
1164 } while (u64_stats_fetch_retry_bh(&stats->sync, start));
1165
68c3e5a7 1166 stats->rx_pps = (unsigned long)(pkts - stats->rx_pkts_prev) / (delta / HZ);
ab1594e9 1167 stats->rx_pkts_prev = pkts;
3abcdeda 1168 stats->rx_jiffies = now;
10ef9ab4
SP
1169 eqd = (stats->rx_pps / 110000) << 3;
1170 eqd = min(eqd, eqo->max_eqd);
1171 eqd = max(eqd, eqo->min_eqd);
ac124ff9
SP
1172 if (eqd < 10)
1173 eqd = 0;
10ef9ab4
SP
1174
1175modify_eqd:
1176 if (eqd != eqo->cur_eqd) {
1177 be_cmd_modify_eqd(adapter, eqo->q.id, eqd);
1178 eqo->cur_eqd = eqd;
ac124ff9 1179 }
6b7c5b94
SP
1180}
1181
3abcdeda 1182static void be_rx_stats_update(struct be_rx_obj *rxo,
2e588f84 1183 struct be_rx_compl_info *rxcp)
4097f663 1184{
ac124ff9 1185 struct be_rx_stats *stats = rx_stats(rxo);
1ef78abe 1186
ab1594e9 1187 u64_stats_update_begin(&stats->sync);
3abcdeda 1188 stats->rx_compl++;
2e588f84 1189 stats->rx_bytes += rxcp->pkt_size;
3abcdeda 1190 stats->rx_pkts++;
2e588f84 1191 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
3abcdeda 1192 stats->rx_mcast_pkts++;
2e588f84 1193 if (rxcp->err)
ac124ff9 1194 stats->rx_compl_err++;
ab1594e9 1195 u64_stats_update_end(&stats->sync);
4097f663
SP
1196}
1197
2e588f84 1198static inline bool csum_passed(struct be_rx_compl_info *rxcp)
728a9972 1199{
19fad86f
PR
1200 /* L4 checksum is not reliable for non TCP/UDP packets.
1201 * Also ignore ipcksm for ipv6 pkts */
2e588f84
SP
1202 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
1203 (rxcp->ip_csum || rxcp->ipv6);
728a9972
AK
1204}
1205
10ef9ab4
SP
1206static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo,
1207 u16 frag_idx)
6b7c5b94 1208{
10ef9ab4 1209 struct be_adapter *adapter = rxo->adapter;
6b7c5b94 1210 struct be_rx_page_info *rx_page_info;
3abcdeda 1211 struct be_queue_info *rxq = &rxo->q;
6b7c5b94 1212
3abcdeda 1213 rx_page_info = &rxo->page_info_tbl[frag_idx];
6b7c5b94
SP
1214 BUG_ON(!rx_page_info->page);
1215
205859a2 1216 if (rx_page_info->last_page_user) {
2b7bcebf
IV
1217 dma_unmap_page(&adapter->pdev->dev,
1218 dma_unmap_addr(rx_page_info, bus),
1219 adapter->big_page_size, DMA_FROM_DEVICE);
205859a2
AK
1220 rx_page_info->last_page_user = false;
1221 }
6b7c5b94
SP
1222
1223 atomic_dec(&rxq->used);
1224 return rx_page_info;
1225}
1226
1227/* Throwaway the data in the Rx completion */
10ef9ab4
SP
1228static void be_rx_compl_discard(struct be_rx_obj *rxo,
1229 struct be_rx_compl_info *rxcp)
6b7c5b94 1230{
3abcdeda 1231 struct be_queue_info *rxq = &rxo->q;
6b7c5b94 1232 struct be_rx_page_info *page_info;
2e588f84 1233 u16 i, num_rcvd = rxcp->num_rcvd;
6b7c5b94 1234
e80d9da6 1235 for (i = 0; i < num_rcvd; i++) {
10ef9ab4 1236 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
e80d9da6
PR
1237 put_page(page_info->page);
1238 memset(page_info, 0, sizeof(*page_info));
2e588f84 1239 index_inc(&rxcp->rxq_idx, rxq->len);
6b7c5b94
SP
1240 }
1241}
1242
1243/*
1244 * skb_fill_rx_data forms a complete skb for an ether frame
1245 * indicated by rxcp.
1246 */
10ef9ab4
SP
1247static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
1248 struct be_rx_compl_info *rxcp)
6b7c5b94 1249{
3abcdeda 1250 struct be_queue_info *rxq = &rxo->q;
6b7c5b94 1251 struct be_rx_page_info *page_info;
2e588f84
SP
1252 u16 i, j;
1253 u16 hdr_len, curr_frag_len, remaining;
6b7c5b94 1254 u8 *start;
6b7c5b94 1255
10ef9ab4 1256 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
6b7c5b94
SP
1257 start = page_address(page_info->page) + page_info->page_offset;
1258 prefetch(start);
1259
1260 /* Copy data in the first descriptor of this completion */
2e588f84 1261 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
6b7c5b94 1262
6b7c5b94
SP
1263 skb->len = curr_frag_len;
1264 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
ac1ae5f3 1265 memcpy(skb->data, start, curr_frag_len);
6b7c5b94
SP
1266 /* Complete packet has now been moved to data */
1267 put_page(page_info->page);
1268 skb->data_len = 0;
1269 skb->tail += curr_frag_len;
1270 } else {
ac1ae5f3
ED
1271 hdr_len = ETH_HLEN;
1272 memcpy(skb->data, start, hdr_len);
6b7c5b94 1273 skb_shinfo(skb)->nr_frags = 1;
b061b39e 1274 skb_frag_set_page(skb, 0, page_info->page);
6b7c5b94
SP
1275 skb_shinfo(skb)->frags[0].page_offset =
1276 page_info->page_offset + hdr_len;
9e903e08 1277 skb_frag_size_set(&skb_shinfo(skb)->frags[0], curr_frag_len - hdr_len);
6b7c5b94 1278 skb->data_len = curr_frag_len - hdr_len;
bdb28a97 1279 skb->truesize += rx_frag_size;
6b7c5b94
SP
1280 skb->tail += hdr_len;
1281 }
205859a2 1282 page_info->page = NULL;
6b7c5b94 1283
2e588f84
SP
1284 if (rxcp->pkt_size <= rx_frag_size) {
1285 BUG_ON(rxcp->num_rcvd != 1);
1286 return;
6b7c5b94
SP
1287 }
1288
1289 /* More frags present for this completion */
2e588f84
SP
1290 index_inc(&rxcp->rxq_idx, rxq->len);
1291 remaining = rxcp->pkt_size - curr_frag_len;
1292 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
10ef9ab4 1293 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
2e588f84 1294 curr_frag_len = min(remaining, rx_frag_size);
6b7c5b94 1295
bd46cb6c
AK
1296 /* Coalesce all frags from the same physical page in one slot */
1297 if (page_info->page_offset == 0) {
1298 /* Fresh page */
1299 j++;
b061b39e 1300 skb_frag_set_page(skb, j, page_info->page);
bd46cb6c
AK
1301 skb_shinfo(skb)->frags[j].page_offset =
1302 page_info->page_offset;
9e903e08 1303 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
bd46cb6c
AK
1304 skb_shinfo(skb)->nr_frags++;
1305 } else {
1306 put_page(page_info->page);
1307 }
1308
9e903e08 1309 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
6b7c5b94
SP
1310 skb->len += curr_frag_len;
1311 skb->data_len += curr_frag_len;
bdb28a97 1312 skb->truesize += rx_frag_size;
2e588f84
SP
1313 remaining -= curr_frag_len;
1314 index_inc(&rxcp->rxq_idx, rxq->len);
205859a2 1315 page_info->page = NULL;
6b7c5b94 1316 }
bd46cb6c 1317 BUG_ON(j > MAX_SKB_FRAGS);
6b7c5b94
SP
1318}
1319
5be93b9a 1320/* Process the RX completion indicated by rxcp when GRO is disabled */
10ef9ab4
SP
1321static void be_rx_compl_process(struct be_rx_obj *rxo,
1322 struct be_rx_compl_info *rxcp)
6b7c5b94 1323{
10ef9ab4 1324 struct be_adapter *adapter = rxo->adapter;
6332c8d3 1325 struct net_device *netdev = adapter->netdev;
6b7c5b94 1326 struct sk_buff *skb;
89420424 1327
bb349bb4 1328 skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE);
a058a632 1329 if (unlikely(!skb)) {
ac124ff9 1330 rx_stats(rxo)->rx_drops_no_skbs++;
10ef9ab4 1331 be_rx_compl_discard(rxo, rxcp);
6b7c5b94
SP
1332 return;
1333 }
1334
10ef9ab4 1335 skb_fill_rx_data(rxo, skb, rxcp);
6b7c5b94 1336
6332c8d3 1337 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
728a9972 1338 skb->ip_summed = CHECKSUM_UNNECESSARY;
c6ce2f4b
SK
1339 else
1340 skb_checksum_none_assert(skb);
6b7c5b94 1341
6332c8d3 1342 skb->protocol = eth_type_trans(skb, netdev);
aaa6daec 1343 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
10ef9ab4 1344 if (netdev->features & NETIF_F_RXHASH)
4b972914
AK
1345 skb->rxhash = rxcp->rss_hash;
1346
6b7c5b94 1347
343e43c0 1348 if (rxcp->vlanf)
4c5102f9
AK
1349 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1350
1351 netif_receive_skb(skb);
6b7c5b94
SP
1352}
1353
5be93b9a 1354/* Process the RX completion indicated by rxcp when GRO is enabled */
10ef9ab4
SP
1355void be_rx_compl_process_gro(struct be_rx_obj *rxo, struct napi_struct *napi,
1356 struct be_rx_compl_info *rxcp)
6b7c5b94 1357{
10ef9ab4 1358 struct be_adapter *adapter = rxo->adapter;
6b7c5b94 1359 struct be_rx_page_info *page_info;
5be93b9a 1360 struct sk_buff *skb = NULL;
3abcdeda 1361 struct be_queue_info *rxq = &rxo->q;
2e588f84
SP
1362 u16 remaining, curr_frag_len;
1363 u16 i, j;
3968fa1e 1364
10ef9ab4 1365 skb = napi_get_frags(napi);
5be93b9a 1366 if (!skb) {
10ef9ab4 1367 be_rx_compl_discard(rxo, rxcp);
5be93b9a
AK
1368 return;
1369 }
1370
2e588f84
SP
1371 remaining = rxcp->pkt_size;
1372 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
10ef9ab4 1373 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
6b7c5b94
SP
1374
1375 curr_frag_len = min(remaining, rx_frag_size);
1376
bd46cb6c
AK
1377 /* Coalesce all frags from the same physical page in one slot */
1378 if (i == 0 || page_info->page_offset == 0) {
1379 /* First frag or Fresh page */
1380 j++;
b061b39e 1381 skb_frag_set_page(skb, j, page_info->page);
5be93b9a
AK
1382 skb_shinfo(skb)->frags[j].page_offset =
1383 page_info->page_offset;
9e903e08 1384 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
bd46cb6c
AK
1385 } else {
1386 put_page(page_info->page);
1387 }
9e903e08 1388 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
bdb28a97 1389 skb->truesize += rx_frag_size;
bd46cb6c 1390 remaining -= curr_frag_len;
2e588f84 1391 index_inc(&rxcp->rxq_idx, rxq->len);
6b7c5b94
SP
1392 memset(page_info, 0, sizeof(*page_info));
1393 }
bd46cb6c 1394 BUG_ON(j > MAX_SKB_FRAGS);
6b7c5b94 1395
5be93b9a 1396 skb_shinfo(skb)->nr_frags = j + 1;
2e588f84
SP
1397 skb->len = rxcp->pkt_size;
1398 skb->data_len = rxcp->pkt_size;
5be93b9a 1399 skb->ip_summed = CHECKSUM_UNNECESSARY;
aaa6daec 1400 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
4b972914
AK
1401 if (adapter->netdev->features & NETIF_F_RXHASH)
1402 skb->rxhash = rxcp->rss_hash;
5be93b9a 1403
343e43c0 1404 if (rxcp->vlanf)
4c5102f9
AK
1405 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1406
10ef9ab4 1407 napi_gro_frags(napi);
2e588f84
SP
1408}
1409
10ef9ab4
SP
1410static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
1411 struct be_rx_compl_info *rxcp)
2e588f84
SP
1412{
1413 rxcp->pkt_size =
1414 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1415 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1416 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1417 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
9ecb42fd 1418 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
2e588f84
SP
1419 rxcp->ip_csum =
1420 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1421 rxcp->l4_csum =
1422 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1423 rxcp->ipv6 =
1424 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
1425 rxcp->rxq_idx =
1426 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, fragndx, compl);
1427 rxcp->num_rcvd =
1428 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1429 rxcp->pkt_type =
1430 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
4b972914 1431 rxcp->rss_hash =
c297977e 1432 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, compl);
15d72184
SP
1433 if (rxcp->vlanf) {
1434 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm,
3c709f8f
DM
1435 compl);
1436 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
1437 compl);
15d72184 1438 }
12004ae9 1439 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, port, compl);
2e588f84
SP
1440}
1441
10ef9ab4
SP
1442static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
1443 struct be_rx_compl_info *rxcp)
2e588f84
SP
1444{
1445 rxcp->pkt_size =
1446 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1447 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1448 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1449 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
9ecb42fd 1450 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
2e588f84
SP
1451 rxcp->ip_csum =
1452 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1453 rxcp->l4_csum =
1454 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1455 rxcp->ipv6 =
1456 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
1457 rxcp->rxq_idx =
1458 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, fragndx, compl);
1459 rxcp->num_rcvd =
1460 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1461 rxcp->pkt_type =
1462 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
4b972914 1463 rxcp->rss_hash =
c297977e 1464 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, compl);
15d72184
SP
1465 if (rxcp->vlanf) {
1466 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm,
3c709f8f
DM
1467 compl);
1468 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
1469 compl);
15d72184 1470 }
12004ae9 1471 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, port, compl);
2e588f84
SP
1472}
1473
1474static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1475{
1476 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1477 struct be_rx_compl_info *rxcp = &rxo->rxcp;
1478 struct be_adapter *adapter = rxo->adapter;
6b7c5b94 1479
2e588f84
SP
1480 /* For checking the valid bit it is Ok to use either definition as the
1481 * valid bit is at the same position in both v0 and v1 Rx compl */
1482 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
1483 return NULL;
6b7c5b94 1484
2e588f84
SP
1485 rmb();
1486 be_dws_le_to_cpu(compl, sizeof(*compl));
6b7c5b94 1487
2e588f84 1488 if (adapter->be3_native)
10ef9ab4 1489 be_parse_rx_compl_v1(compl, rxcp);
2e588f84 1490 else
10ef9ab4 1491 be_parse_rx_compl_v0(compl, rxcp);
6b7c5b94 1492
15d72184
SP
1493 if (rxcp->vlanf) {
1494 /* vlanf could be wrongly set in some cards.
1495 * ignore if vtm is not set */
752961a1 1496 if ((adapter->function_mode & FLEX10_MODE) && !rxcp->vtm)
15d72184 1497 rxcp->vlanf = 0;
6b7c5b94 1498
15d72184 1499 if (!lancer_chip(adapter))
3c709f8f 1500 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
6b7c5b94 1501
939cf306 1502 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
3c709f8f 1503 !adapter->vlan_tag[rxcp->vlan_tag])
15d72184
SP
1504 rxcp->vlanf = 0;
1505 }
2e588f84
SP
1506
1507 /* As the compl has been parsed, reset it; we wont touch it again */
1508 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
6b7c5b94 1509
3abcdeda 1510 queue_tail_inc(&rxo->cq);
6b7c5b94
SP
1511 return rxcp;
1512}
1513
1829b086 1514static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
6b7c5b94 1515{
6b7c5b94 1516 u32 order = get_order(size);
1829b086 1517
6b7c5b94 1518 if (order > 0)
1829b086
ED
1519 gfp |= __GFP_COMP;
1520 return alloc_pages(gfp, order);
6b7c5b94
SP
1521}
1522
1523/*
1524 * Allocate a page, split it to fragments of size rx_frag_size and post as
1525 * receive buffers to BE
1526 */
1829b086 1527static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
6b7c5b94 1528{
3abcdeda 1529 struct be_adapter *adapter = rxo->adapter;
26d92f92 1530 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
3abcdeda 1531 struct be_queue_info *rxq = &rxo->q;
6b7c5b94
SP
1532 struct page *pagep = NULL;
1533 struct be_eth_rx_d *rxd;
1534 u64 page_dmaaddr = 0, frag_dmaaddr;
1535 u32 posted, page_offset = 0;
1536
3abcdeda 1537 page_info = &rxo->page_info_tbl[rxq->head];
6b7c5b94
SP
1538 for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1539 if (!pagep) {
1829b086 1540 pagep = be_alloc_pages(adapter->big_page_size, gfp);
6b7c5b94 1541 if (unlikely(!pagep)) {
ac124ff9 1542 rx_stats(rxo)->rx_post_fail++;
6b7c5b94
SP
1543 break;
1544 }
2b7bcebf
IV
1545 page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
1546 0, adapter->big_page_size,
1547 DMA_FROM_DEVICE);
6b7c5b94
SP
1548 page_info->page_offset = 0;
1549 } else {
1550 get_page(pagep);
1551 page_info->page_offset = page_offset + rx_frag_size;
1552 }
1553 page_offset = page_info->page_offset;
1554 page_info->page = pagep;
fac6da5b 1555 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
6b7c5b94
SP
1556 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1557
1558 rxd = queue_head_node(rxq);
1559 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1560 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
6b7c5b94
SP
1561
1562 /* Any space left in the current big page for another frag? */
1563 if ((page_offset + rx_frag_size + rx_frag_size) >
1564 adapter->big_page_size) {
1565 pagep = NULL;
1566 page_info->last_page_user = true;
1567 }
26d92f92
SP
1568
1569 prev_page_info = page_info;
1570 queue_head_inc(rxq);
10ef9ab4 1571 page_info = &rxo->page_info_tbl[rxq->head];
6b7c5b94
SP
1572 }
1573 if (pagep)
26d92f92 1574 prev_page_info->last_page_user = true;
6b7c5b94
SP
1575
1576 if (posted) {
6b7c5b94 1577 atomic_add(posted, &rxq->used);
8788fdc2 1578 be_rxq_notify(adapter, rxq->id, posted);
ea1dae11
SP
1579 } else if (atomic_read(&rxq->used) == 0) {
1580 /* Let be_worker replenish when memory is available */
3abcdeda 1581 rxo->rx_post_starved = true;
6b7c5b94 1582 }
6b7c5b94
SP
1583}
1584
5fb379ee 1585static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
6b7c5b94 1586{
6b7c5b94
SP
1587 struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1588
1589 if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1590 return NULL;
1591
f3eb62d2 1592 rmb();
6b7c5b94
SP
1593 be_dws_le_to_cpu(txcp, sizeof(*txcp));
1594
1595 txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1596
1597 queue_tail_inc(tx_cq);
1598 return txcp;
1599}
1600
3c8def97
SP
1601static u16 be_tx_compl_process(struct be_adapter *adapter,
1602 struct be_tx_obj *txo, u16 last_index)
6b7c5b94 1603{
3c8def97 1604 struct be_queue_info *txq = &txo->q;
a73b796e 1605 struct be_eth_wrb *wrb;
3c8def97 1606 struct sk_buff **sent_skbs = txo->sent_skb_list;
6b7c5b94 1607 struct sk_buff *sent_skb;
ec43b1a6
SP
1608 u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1609 bool unmap_skb_hdr = true;
6b7c5b94 1610
ec43b1a6 1611 sent_skb = sent_skbs[txq->tail];
6b7c5b94 1612 BUG_ON(!sent_skb);
ec43b1a6
SP
1613 sent_skbs[txq->tail] = NULL;
1614
1615 /* skip header wrb */
a73b796e 1616 queue_tail_inc(txq);
6b7c5b94 1617
ec43b1a6 1618 do {
6b7c5b94 1619 cur_index = txq->tail;
a73b796e 1620 wrb = queue_tail_node(txq);
2b7bcebf
IV
1621 unmap_tx_frag(&adapter->pdev->dev, wrb,
1622 (unmap_skb_hdr && skb_headlen(sent_skb)));
ec43b1a6
SP
1623 unmap_skb_hdr = false;
1624
6b7c5b94
SP
1625 num_wrbs++;
1626 queue_tail_inc(txq);
ec43b1a6 1627 } while (cur_index != last_index);
6b7c5b94 1628
6b7c5b94 1629 kfree_skb(sent_skb);
4d586b82 1630 return num_wrbs;
6b7c5b94
SP
1631}
1632
10ef9ab4
SP
1633/* Return the number of events in the event queue */
1634static inline int events_get(struct be_eq_obj *eqo)
859b1e4e 1635{
10ef9ab4
SP
1636 struct be_eq_entry *eqe;
1637 int num = 0;
859b1e4e 1638
10ef9ab4
SP
1639 do {
1640 eqe = queue_tail_node(&eqo->q);
1641 if (eqe->evt == 0)
1642 break;
859b1e4e 1643
10ef9ab4
SP
1644 rmb();
1645 eqe->evt = 0;
1646 num++;
1647 queue_tail_inc(&eqo->q);
1648 } while (true);
1649
1650 return num;
859b1e4e
SP
1651}
1652
10ef9ab4 1653static int event_handle(struct be_eq_obj *eqo)
859b1e4e 1654{
10ef9ab4
SP
1655 bool rearm = false;
1656 int num = events_get(eqo);
859b1e4e 1657
10ef9ab4 1658 /* Deal with any spurious interrupts that come without events */
3c8def97
SP
1659 if (!num)
1660 rearm = true;
1661
af311fe3
PR
1662 if (num || msix_enabled(eqo->adapter))
1663 be_eq_notify(eqo->adapter, eqo->q.id, rearm, true, num);
1664
859b1e4e 1665 if (num)
10ef9ab4 1666 napi_schedule(&eqo->napi);
859b1e4e
SP
1667
1668 return num;
1669}
1670
10ef9ab4
SP
1671/* Leaves the EQ is disarmed state */
1672static void be_eq_clean(struct be_eq_obj *eqo)
859b1e4e 1673{
10ef9ab4 1674 int num = events_get(eqo);
859b1e4e 1675
10ef9ab4 1676 be_eq_notify(eqo->adapter, eqo->q.id, false, true, num);
859b1e4e
SP
1677}
1678
10ef9ab4 1679static void be_rx_cq_clean(struct be_rx_obj *rxo)
6b7c5b94
SP
1680{
1681 struct be_rx_page_info *page_info;
3abcdeda
SP
1682 struct be_queue_info *rxq = &rxo->q;
1683 struct be_queue_info *rx_cq = &rxo->cq;
2e588f84 1684 struct be_rx_compl_info *rxcp;
6b7c5b94
SP
1685 u16 tail;
1686
1687 /* First cleanup pending rx completions */
3abcdeda 1688 while ((rxcp = be_rx_compl_get(rxo)) != NULL) {
10ef9ab4
SP
1689 be_rx_compl_discard(rxo, rxcp);
1690 be_cq_notify(rxo->adapter, rx_cq->id, false, 1);
6b7c5b94
SP
1691 }
1692
1693 /* Then free posted rx buffer that were not used */
1694 tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
cdab23b7 1695 for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
10ef9ab4 1696 page_info = get_rx_page_info(rxo, tail);
6b7c5b94
SP
1697 put_page(page_info->page);
1698 memset(page_info, 0, sizeof(*page_info));
1699 }
1700 BUG_ON(atomic_read(&rxq->used));
482c9e79 1701 rxq->tail = rxq->head = 0;
6b7c5b94
SP
1702}
1703
0ae57bb3 1704static void be_tx_compl_clean(struct be_adapter *adapter)
6b7c5b94 1705{
0ae57bb3
SP
1706 struct be_tx_obj *txo;
1707 struct be_queue_info *txq;
a8e9179a 1708 struct be_eth_tx_compl *txcp;
4d586b82 1709 u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
b03388d6
SP
1710 struct sk_buff *sent_skb;
1711 bool dummy_wrb;
0ae57bb3 1712 int i, pending_txqs;
a8e9179a
SP
1713
1714 /* Wait for a max of 200ms for all the tx-completions to arrive. */
1715 do {
0ae57bb3
SP
1716 pending_txqs = adapter->num_tx_qs;
1717
1718 for_all_tx_queues(adapter, txo, i) {
1719 txq = &txo->q;
1720 while ((txcp = be_tx_compl_get(&txo->cq))) {
1721 end_idx =
1722 AMAP_GET_BITS(struct amap_eth_tx_compl,
1723 wrb_index, txcp);
1724 num_wrbs += be_tx_compl_process(adapter, txo,
1725 end_idx);
1726 cmpl++;
1727 }
1728 if (cmpl) {
1729 be_cq_notify(adapter, txo->cq.id, false, cmpl);
1730 atomic_sub(num_wrbs, &txq->used);
1731 cmpl = 0;
1732 num_wrbs = 0;
1733 }
1734 if (atomic_read(&txq->used) == 0)
1735 pending_txqs--;
a8e9179a
SP
1736 }
1737
0ae57bb3 1738 if (pending_txqs == 0 || ++timeo > 200)
a8e9179a
SP
1739 break;
1740
1741 mdelay(1);
1742 } while (true);
1743
0ae57bb3
SP
1744 for_all_tx_queues(adapter, txo, i) {
1745 txq = &txo->q;
1746 if (atomic_read(&txq->used))
1747 dev_err(&adapter->pdev->dev, "%d pending tx-compls\n",
1748 atomic_read(&txq->used));
1749
1750 /* free posted tx for which compls will never arrive */
1751 while (atomic_read(&txq->used)) {
1752 sent_skb = txo->sent_skb_list[txq->tail];
1753 end_idx = txq->tail;
1754 num_wrbs = wrb_cnt_for_skb(adapter, sent_skb,
1755 &dummy_wrb);
1756 index_adv(&end_idx, num_wrbs - 1, txq->len);
1757 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
1758 atomic_sub(num_wrbs, &txq->used);
1759 }
b03388d6 1760 }
6b7c5b94
SP
1761}
1762
10ef9ab4
SP
1763static void be_evt_queues_destroy(struct be_adapter *adapter)
1764{
1765 struct be_eq_obj *eqo;
1766 int i;
1767
1768 for_all_evt_queues(adapter, eqo, i) {
19d59aa7
PR
1769 if (eqo->q.created) {
1770 be_eq_clean(eqo);
10ef9ab4 1771 be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
19d59aa7 1772 }
10ef9ab4
SP
1773 be_queue_free(adapter, &eqo->q);
1774 }
1775}
1776
1777static int be_evt_queues_create(struct be_adapter *adapter)
1778{
1779 struct be_queue_info *eq;
1780 struct be_eq_obj *eqo;
1781 int i, rc;
1782
1783 adapter->num_evt_qs = num_irqs(adapter);
1784
1785 for_all_evt_queues(adapter, eqo, i) {
1786 eqo->adapter = adapter;
1787 eqo->tx_budget = BE_TX_BUDGET;
1788 eqo->idx = i;
1789 eqo->max_eqd = BE_MAX_EQD;
1790 eqo->enable_aic = true;
1791
1792 eq = &eqo->q;
1793 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1794 sizeof(struct be_eq_entry));
1795 if (rc)
1796 return rc;
1797
1798 rc = be_cmd_eq_create(adapter, eq, eqo->cur_eqd);
1799 if (rc)
1800 return rc;
1801 }
1cfafab9 1802 return 0;
10ef9ab4
SP
1803}
1804
5fb379ee
SP
1805static void be_mcc_queues_destroy(struct be_adapter *adapter)
1806{
1807 struct be_queue_info *q;
5fb379ee 1808
8788fdc2 1809 q = &adapter->mcc_obj.q;
5fb379ee 1810 if (q->created)
8788fdc2 1811 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
5fb379ee
SP
1812 be_queue_free(adapter, q);
1813
8788fdc2 1814 q = &adapter->mcc_obj.cq;
5fb379ee 1815 if (q->created)
8788fdc2 1816 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
5fb379ee
SP
1817 be_queue_free(adapter, q);
1818}
1819
1820/* Must be called only after TX qs are created as MCC shares TX EQ */
1821static int be_mcc_queues_create(struct be_adapter *adapter)
1822{
1823 struct be_queue_info *q, *cq;
5fb379ee 1824
8788fdc2 1825 cq = &adapter->mcc_obj.cq;
5fb379ee 1826 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
efd2e40a 1827 sizeof(struct be_mcc_compl)))
5fb379ee
SP
1828 goto err;
1829
10ef9ab4
SP
1830 /* Use the default EQ for MCC completions */
1831 if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0))
5fb379ee
SP
1832 goto mcc_cq_free;
1833
8788fdc2 1834 q = &adapter->mcc_obj.q;
5fb379ee
SP
1835 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
1836 goto mcc_cq_destroy;
1837
8788fdc2 1838 if (be_cmd_mccq_create(adapter, q, cq))
5fb379ee
SP
1839 goto mcc_q_free;
1840
1841 return 0;
1842
1843mcc_q_free:
1844 be_queue_free(adapter, q);
1845mcc_cq_destroy:
8788fdc2 1846 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
5fb379ee
SP
1847mcc_cq_free:
1848 be_queue_free(adapter, cq);
1849err:
1850 return -1;
1851}
1852
6b7c5b94
SP
1853static void be_tx_queues_destroy(struct be_adapter *adapter)
1854{
1855 struct be_queue_info *q;
3c8def97
SP
1856 struct be_tx_obj *txo;
1857 u8 i;
6b7c5b94 1858
3c8def97
SP
1859 for_all_tx_queues(adapter, txo, i) {
1860 q = &txo->q;
1861 if (q->created)
1862 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
1863 be_queue_free(adapter, q);
6b7c5b94 1864
3c8def97
SP
1865 q = &txo->cq;
1866 if (q->created)
1867 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1868 be_queue_free(adapter, q);
1869 }
6b7c5b94
SP
1870}
1871
dafc0fe3
SP
1872static int be_num_txqs_want(struct be_adapter *adapter)
1873{
abb93951
PR
1874 if ((!lancer_chip(adapter) && sriov_want(adapter)) ||
1875 be_is_mc(adapter) ||
1876 (!lancer_chip(adapter) && !be_physfn(adapter)) ||
39f1d94d 1877 adapter->generation == BE_GEN2)
dafc0fe3
SP
1878 return 1;
1879 else
abb93951 1880 return adapter->max_tx_queues;
dafc0fe3
SP
1881}
1882
10ef9ab4 1883static int be_tx_cqs_create(struct be_adapter *adapter)
6b7c5b94 1884{
10ef9ab4
SP
1885 struct be_queue_info *cq, *eq;
1886 int status;
3c8def97
SP
1887 struct be_tx_obj *txo;
1888 u8 i;
6b7c5b94 1889
dafc0fe3 1890 adapter->num_tx_qs = be_num_txqs_want(adapter);
3bb62f4f
PR
1891 if (adapter->num_tx_qs != MAX_TX_QS) {
1892 rtnl_lock();
dafc0fe3
SP
1893 netif_set_real_num_tx_queues(adapter->netdev,
1894 adapter->num_tx_qs);
3bb62f4f
PR
1895 rtnl_unlock();
1896 }
dafc0fe3 1897
10ef9ab4
SP
1898 for_all_tx_queues(adapter, txo, i) {
1899 cq = &txo->cq;
1900 status = be_queue_alloc(adapter, cq, TX_CQ_LEN,
1901 sizeof(struct be_eth_tx_compl));
1902 if (status)
1903 return status;
3c8def97 1904
10ef9ab4
SP
1905 /* If num_evt_qs is less than num_tx_qs, then more than
1906 * one txq share an eq
1907 */
1908 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
1909 status = be_cmd_cq_create(adapter, cq, eq, false, 3);
1910 if (status)
1911 return status;
1912 }
1913 return 0;
1914}
6b7c5b94 1915
10ef9ab4
SP
1916static int be_tx_qs_create(struct be_adapter *adapter)
1917{
1918 struct be_tx_obj *txo;
1919 int i, status;
fe6d2a38 1920
3c8def97 1921 for_all_tx_queues(adapter, txo, i) {
10ef9ab4
SP
1922 status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
1923 sizeof(struct be_eth_wrb));
1924 if (status)
1925 return status;
6b7c5b94 1926
10ef9ab4
SP
1927 status = be_cmd_txq_create(adapter, &txo->q, &txo->cq);
1928 if (status)
1929 return status;
3c8def97 1930 }
6b7c5b94 1931
d379142b
SP
1932 dev_info(&adapter->pdev->dev, "created %d TX queue(s)\n",
1933 adapter->num_tx_qs);
10ef9ab4 1934 return 0;
6b7c5b94
SP
1935}
1936
10ef9ab4 1937static void be_rx_cqs_destroy(struct be_adapter *adapter)
6b7c5b94
SP
1938{
1939 struct be_queue_info *q;
3abcdeda
SP
1940 struct be_rx_obj *rxo;
1941 int i;
1942
1943 for_all_rx_queues(adapter, rxo, i) {
3abcdeda
SP
1944 q = &rxo->cq;
1945 if (q->created)
1946 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1947 be_queue_free(adapter, q);
ac6a0c4a
SP
1948 }
1949}
1950
10ef9ab4 1951static int be_rx_cqs_create(struct be_adapter *adapter)
6b7c5b94 1952{
10ef9ab4 1953 struct be_queue_info *eq, *cq;
3abcdeda
SP
1954 struct be_rx_obj *rxo;
1955 int rc, i;
6b7c5b94 1956
10ef9ab4
SP
1957 /* We'll create as many RSS rings as there are irqs.
1958 * But when there's only one irq there's no use creating RSS rings
1959 */
1960 adapter->num_rx_qs = (num_irqs(adapter) > 1) ?
1961 num_irqs(adapter) + 1 : 1;
7f640062
SP
1962 if (adapter->num_rx_qs != MAX_RX_QS) {
1963 rtnl_lock();
1964 netif_set_real_num_rx_queues(adapter->netdev,
1965 adapter->num_rx_qs);
1966 rtnl_unlock();
1967 }
ac6a0c4a 1968
6b7c5b94 1969 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
3abcdeda
SP
1970 for_all_rx_queues(adapter, rxo, i) {
1971 rxo->adapter = adapter;
3abcdeda
SP
1972 cq = &rxo->cq;
1973 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
1974 sizeof(struct be_eth_rx_compl));
1975 if (rc)
10ef9ab4 1976 return rc;
3abcdeda 1977
10ef9ab4
SP
1978 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
1979 rc = be_cmd_cq_create(adapter, cq, eq, false, 3);
3abcdeda 1980 if (rc)
10ef9ab4 1981 return rc;
3abcdeda 1982 }
6b7c5b94 1983
d379142b
SP
1984 dev_info(&adapter->pdev->dev,
1985 "created %d RSS queue(s) and 1 default RX queue\n",
1986 adapter->num_rx_qs - 1);
10ef9ab4 1987 return 0;
b628bde2
SP
1988}
1989
6b7c5b94
SP
1990static irqreturn_t be_intx(int irq, void *dev)
1991{
1992 struct be_adapter *adapter = dev;
10ef9ab4 1993 int num_evts;
6b7c5b94 1994
10ef9ab4
SP
1995 /* With INTx only one EQ is used */
1996 num_evts = event_handle(&adapter->eq_obj[0]);
1997 if (num_evts)
1998 return IRQ_HANDLED;
1999 else
2000 return IRQ_NONE;
6b7c5b94
SP
2001}
2002
10ef9ab4 2003static irqreturn_t be_msix(int irq, void *dev)
6b7c5b94 2004{
10ef9ab4 2005 struct be_eq_obj *eqo = dev;
6b7c5b94 2006
10ef9ab4 2007 event_handle(eqo);
6b7c5b94
SP
2008 return IRQ_HANDLED;
2009}
2010
2e588f84 2011static inline bool do_gro(struct be_rx_compl_info *rxcp)
6b7c5b94 2012{
2e588f84 2013 return (rxcp->tcpf && !rxcp->err) ? true : false;
6b7c5b94
SP
2014}
2015
10ef9ab4
SP
2016static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
2017 int budget)
6b7c5b94 2018{
3abcdeda
SP
2019 struct be_adapter *adapter = rxo->adapter;
2020 struct be_queue_info *rx_cq = &rxo->cq;
2e588f84 2021 struct be_rx_compl_info *rxcp;
6b7c5b94
SP
2022 u32 work_done;
2023
2024 for (work_done = 0; work_done < budget; work_done++) {
3abcdeda 2025 rxcp = be_rx_compl_get(rxo);
6b7c5b94
SP
2026 if (!rxcp)
2027 break;
2028
12004ae9
SP
2029 /* Is it a flush compl that has no data */
2030 if (unlikely(rxcp->num_rcvd == 0))
2031 goto loop_continue;
2032
2033 /* Discard compl with partial DMA Lancer B0 */
2034 if (unlikely(!rxcp->pkt_size)) {
10ef9ab4 2035 be_rx_compl_discard(rxo, rxcp);
12004ae9
SP
2036 goto loop_continue;
2037 }
2038
2039 /* On BE drop pkts that arrive due to imperfect filtering in
2040 * promiscuous mode on some skews
2041 */
2042 if (unlikely(rxcp->port != adapter->port_num &&
2043 !lancer_chip(adapter))) {
10ef9ab4 2044 be_rx_compl_discard(rxo, rxcp);
12004ae9 2045 goto loop_continue;
64642811 2046 }
009dd872 2047
12004ae9 2048 if (do_gro(rxcp))
10ef9ab4 2049 be_rx_compl_process_gro(rxo, napi, rxcp);
12004ae9 2050 else
10ef9ab4 2051 be_rx_compl_process(rxo, rxcp);
12004ae9 2052loop_continue:
2e588f84 2053 be_rx_stats_update(rxo, rxcp);
6b7c5b94
SP
2054 }
2055
10ef9ab4
SP
2056 if (work_done) {
2057 be_cq_notify(adapter, rx_cq->id, true, work_done);
9372cacb 2058
10ef9ab4
SP
2059 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
2060 be_post_rx_frags(rxo, GFP_ATOMIC);
6b7c5b94 2061 }
10ef9ab4 2062
6b7c5b94
SP
2063 return work_done;
2064}
2065
10ef9ab4
SP
2066static bool be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
2067 int budget, int idx)
6b7c5b94 2068{
6b7c5b94 2069 struct be_eth_tx_compl *txcp;
10ef9ab4 2070 int num_wrbs = 0, work_done;
3c8def97 2071
10ef9ab4
SP
2072 for (work_done = 0; work_done < budget; work_done++) {
2073 txcp = be_tx_compl_get(&txo->cq);
2074 if (!txcp)
2075 break;
2076 num_wrbs += be_tx_compl_process(adapter, txo,
3c8def97
SP
2077 AMAP_GET_BITS(struct amap_eth_tx_compl,
2078 wrb_index, txcp));
10ef9ab4 2079 }
6b7c5b94 2080
10ef9ab4
SP
2081 if (work_done) {
2082 be_cq_notify(adapter, txo->cq.id, true, work_done);
2083 atomic_sub(num_wrbs, &txo->q.used);
3c8def97 2084
10ef9ab4
SP
2085 /* As Tx wrbs have been freed up, wake up netdev queue
2086 * if it was stopped due to lack of tx wrbs. */
2087 if (__netif_subqueue_stopped(adapter->netdev, idx) &&
2088 atomic_read(&txo->q.used) < txo->q.len / 2) {
2089 netif_wake_subqueue(adapter->netdev, idx);
3c8def97 2090 }
10ef9ab4
SP
2091
2092 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
2093 tx_stats(txo)->tx_compl += work_done;
2094 u64_stats_update_end(&tx_stats(txo)->sync_compl);
6b7c5b94 2095 }
10ef9ab4
SP
2096 return (work_done < budget); /* Done */
2097}
6b7c5b94 2098
10ef9ab4
SP
2099int be_poll(struct napi_struct *napi, int budget)
2100{
2101 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2102 struct be_adapter *adapter = eqo->adapter;
2103 int max_work = 0, work, i;
2104 bool tx_done;
f31e50a8 2105
10ef9ab4
SP
2106 /* Process all TXQs serviced by this EQ */
2107 for (i = eqo->idx; i < adapter->num_tx_qs; i += adapter->num_evt_qs) {
2108 tx_done = be_process_tx(adapter, &adapter->tx_obj[i],
2109 eqo->tx_budget, i);
2110 if (!tx_done)
2111 max_work = budget;
f31e50a8
SP
2112 }
2113
10ef9ab4
SP
2114 /* This loop will iterate twice for EQ0 in which
2115 * completions of the last RXQ (default one) are also processed
2116 * For other EQs the loop iterates only once
2117 */
2118 for (i = eqo->idx; i < adapter->num_rx_qs; i += adapter->num_evt_qs) {
2119 work = be_process_rx(&adapter->rx_obj[i], napi, budget);
2120 max_work = max(work, max_work);
2121 }
6b7c5b94 2122
10ef9ab4
SP
2123 if (is_mcc_eqo(eqo))
2124 be_process_mcc(adapter);
93c86700 2125
10ef9ab4
SP
2126 if (max_work < budget) {
2127 napi_complete(napi);
2128 be_eq_notify(adapter, eqo->q.id, true, false, 0);
2129 } else {
2130 /* As we'll continue in polling mode, count and clear events */
2131 be_eq_notify(adapter, eqo->q.id, false, false, events_get(eqo));
93c86700 2132 }
10ef9ab4 2133 return max_work;
6b7c5b94
SP
2134}
2135
f67ef7ba 2136void be_detect_error(struct be_adapter *adapter)
7c185276 2137{
e1cfb67a
PR
2138 u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
2139 u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
7c185276
AK
2140 u32 i;
2141
f67ef7ba 2142 if (be_crit_error(adapter))
72f02485
SP
2143 return;
2144
e1cfb67a
PR
2145 if (lancer_chip(adapter)) {
2146 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2147 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2148 sliport_err1 = ioread32(adapter->db +
2149 SLIPORT_ERROR1_OFFSET);
2150 sliport_err2 = ioread32(adapter->db +
2151 SLIPORT_ERROR2_OFFSET);
2152 }
2153 } else {
2154 pci_read_config_dword(adapter->pdev,
2155 PCICFG_UE_STATUS_LOW, &ue_lo);
2156 pci_read_config_dword(adapter->pdev,
2157 PCICFG_UE_STATUS_HIGH, &ue_hi);
2158 pci_read_config_dword(adapter->pdev,
2159 PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask);
2160 pci_read_config_dword(adapter->pdev,
2161 PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
2162
f67ef7ba
PR
2163 ue_lo = (ue_lo & ~ue_lo_mask);
2164 ue_hi = (ue_hi & ~ue_hi_mask);
e1cfb67a 2165 }
7c185276 2166
1451ae6e
AK
2167 /* On certain platforms BE hardware can indicate spurious UEs.
2168 * Allow the h/w to stop working completely in case of a real UE.
2169 * Hence not setting the hw_error for UE detection.
2170 */
2171 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
f67ef7ba 2172 adapter->hw_error = true;
434b3648 2173 dev_err(&adapter->pdev->dev,
f67ef7ba
PR
2174 "Error detected in the card\n");
2175 }
2176
2177 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2178 dev_err(&adapter->pdev->dev,
2179 "ERR: sliport status 0x%x\n", sliport_status);
2180 dev_err(&adapter->pdev->dev,
2181 "ERR: sliport error1 0x%x\n", sliport_err1);
2182 dev_err(&adapter->pdev->dev,
2183 "ERR: sliport error2 0x%x\n", sliport_err2);
d053de91
AK
2184 }
2185
e1cfb67a
PR
2186 if (ue_lo) {
2187 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
2188 if (ue_lo & 1)
7c185276
AK
2189 dev_err(&adapter->pdev->dev,
2190 "UE: %s bit set\n", ue_status_low_desc[i]);
2191 }
2192 }
f67ef7ba 2193
e1cfb67a
PR
2194 if (ue_hi) {
2195 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
2196 if (ue_hi & 1)
7c185276
AK
2197 dev_err(&adapter->pdev->dev,
2198 "UE: %s bit set\n", ue_status_hi_desc[i]);
2199 }
2200 }
2201
2202}
2203
8d56ff11
SP
2204static void be_msix_disable(struct be_adapter *adapter)
2205{
ac6a0c4a 2206 if (msix_enabled(adapter)) {
8d56ff11 2207 pci_disable_msix(adapter->pdev);
ac6a0c4a 2208 adapter->num_msix_vec = 0;
3abcdeda
SP
2209 }
2210}
2211
10ef9ab4
SP
2212static uint be_num_rss_want(struct be_adapter *adapter)
2213{
30e80b55 2214 u32 num = 0;
abb93951 2215
10ef9ab4 2216 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
abb93951
PR
2217 (lancer_chip(adapter) ||
2218 (!sriov_want(adapter) && be_physfn(adapter)))) {
2219 num = adapter->max_rss_queues;
30e80b55
YM
2220 num = min_t(u32, num, (u32)netif_get_num_default_rss_queues());
2221 }
2222 return num;
10ef9ab4
SP
2223}
2224
6b7c5b94
SP
2225static void be_msix_enable(struct be_adapter *adapter)
2226{
10ef9ab4 2227#define BE_MIN_MSIX_VECTORS 1
045508a8 2228 int i, status, num_vec, num_roce_vec = 0;
d379142b 2229 struct device *dev = &adapter->pdev->dev;
6b7c5b94 2230
10ef9ab4
SP
2231 /* If RSS queues are not used, need a vec for default RX Q */
2232 num_vec = min(be_num_rss_want(adapter), num_online_cpus());
045508a8
PP
2233 if (be_roce_supported(adapter)) {
2234 num_roce_vec = min_t(u32, MAX_ROCE_MSIX_VECTORS,
2235 (num_online_cpus() + 1));
2236 num_roce_vec = min(num_roce_vec, MAX_ROCE_EQS);
2237 num_vec += num_roce_vec;
2238 num_vec = min(num_vec, MAX_MSIX_VECTORS);
2239 }
10ef9ab4 2240 num_vec = max(num_vec, BE_MIN_MSIX_VECTORS);
3abcdeda 2241
ac6a0c4a 2242 for (i = 0; i < num_vec; i++)
6b7c5b94
SP
2243 adapter->msix_entries[i].entry = i;
2244
ac6a0c4a 2245 status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
3abcdeda
SP
2246 if (status == 0) {
2247 goto done;
2248 } else if (status >= BE_MIN_MSIX_VECTORS) {
ac6a0c4a 2249 num_vec = status;
3abcdeda 2250 if (pci_enable_msix(adapter->pdev, adapter->msix_entries,
ac6a0c4a 2251 num_vec) == 0)
3abcdeda 2252 goto done;
3abcdeda 2253 }
d379142b
SP
2254
2255 dev_warn(dev, "MSIx enable failed\n");
3abcdeda
SP
2256 return;
2257done:
045508a8
PP
2258 if (be_roce_supported(adapter)) {
2259 if (num_vec > num_roce_vec) {
2260 adapter->num_msix_vec = num_vec - num_roce_vec;
2261 adapter->num_msix_roce_vec =
2262 num_vec - adapter->num_msix_vec;
2263 } else {
2264 adapter->num_msix_vec = num_vec;
2265 adapter->num_msix_roce_vec = 0;
2266 }
2267 } else
2268 adapter->num_msix_vec = num_vec;
d379142b 2269 dev_info(dev, "enabled %d MSI-x vector(s)\n", adapter->num_msix_vec);
ac6a0c4a 2270 return;
6b7c5b94
SP
2271}
2272
fe6d2a38 2273static inline int be_msix_vec_get(struct be_adapter *adapter,
10ef9ab4 2274 struct be_eq_obj *eqo)
b628bde2 2275{
10ef9ab4 2276 return adapter->msix_entries[eqo->idx].vector;
b628bde2 2277}
6b7c5b94 2278
b628bde2
SP
2279static int be_msix_register(struct be_adapter *adapter)
2280{
10ef9ab4
SP
2281 struct net_device *netdev = adapter->netdev;
2282 struct be_eq_obj *eqo;
2283 int status, i, vec;
6b7c5b94 2284
10ef9ab4
SP
2285 for_all_evt_queues(adapter, eqo, i) {
2286 sprintf(eqo->desc, "%s-q%d", netdev->name, i);
2287 vec = be_msix_vec_get(adapter, eqo);
2288 status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
3abcdeda
SP
2289 if (status)
2290 goto err_msix;
2291 }
b628bde2 2292
6b7c5b94 2293 return 0;
3abcdeda 2294err_msix:
10ef9ab4
SP
2295 for (i--, eqo = &adapter->eq_obj[i]; i >= 0; i--, eqo--)
2296 free_irq(be_msix_vec_get(adapter, eqo), eqo);
2297 dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
2298 status);
ac6a0c4a 2299 be_msix_disable(adapter);
6b7c5b94
SP
2300 return status;
2301}
2302
2303static int be_irq_register(struct be_adapter *adapter)
2304{
2305 struct net_device *netdev = adapter->netdev;
2306 int status;
2307
ac6a0c4a 2308 if (msix_enabled(adapter)) {
6b7c5b94
SP
2309 status = be_msix_register(adapter);
2310 if (status == 0)
2311 goto done;
ba343c77
SB
2312 /* INTx is not supported for VF */
2313 if (!be_physfn(adapter))
2314 return status;
6b7c5b94
SP
2315 }
2316
2317 /* INTx */
2318 netdev->irq = adapter->pdev->irq;
2319 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
2320 adapter);
2321 if (status) {
2322 dev_err(&adapter->pdev->dev,
2323 "INTx request IRQ failed - err %d\n", status);
2324 return status;
2325 }
2326done:
2327 adapter->isr_registered = true;
2328 return 0;
2329}
2330
2331static void be_irq_unregister(struct be_adapter *adapter)
2332{
2333 struct net_device *netdev = adapter->netdev;
10ef9ab4 2334 struct be_eq_obj *eqo;
3abcdeda 2335 int i;
6b7c5b94
SP
2336
2337 if (!adapter->isr_registered)
2338 return;
2339
2340 /* INTx */
ac6a0c4a 2341 if (!msix_enabled(adapter)) {
6b7c5b94
SP
2342 free_irq(netdev->irq, adapter);
2343 goto done;
2344 }
2345
2346 /* MSIx */
10ef9ab4
SP
2347 for_all_evt_queues(adapter, eqo, i)
2348 free_irq(be_msix_vec_get(adapter, eqo), eqo);
3abcdeda 2349
6b7c5b94
SP
2350done:
2351 adapter->isr_registered = false;
6b7c5b94
SP
2352}
2353
10ef9ab4 2354static void be_rx_qs_destroy(struct be_adapter *adapter)
482c9e79
SP
2355{
2356 struct be_queue_info *q;
2357 struct be_rx_obj *rxo;
2358 int i;
2359
2360 for_all_rx_queues(adapter, rxo, i) {
2361 q = &rxo->q;
2362 if (q->created) {
2363 be_cmd_rxq_destroy(adapter, q);
2364 /* After the rxq is invalidated, wait for a grace time
2365 * of 1ms for all dma to end and the flush compl to
2366 * arrive
2367 */
2368 mdelay(1);
10ef9ab4 2369 be_rx_cq_clean(rxo);
482c9e79 2370 }
10ef9ab4 2371 be_queue_free(adapter, q);
482c9e79
SP
2372 }
2373}
2374
889cd4b2
SP
2375static int be_close(struct net_device *netdev)
2376{
2377 struct be_adapter *adapter = netdev_priv(netdev);
10ef9ab4
SP
2378 struct be_eq_obj *eqo;
2379 int i;
889cd4b2 2380
045508a8
PP
2381 be_roce_dev_close(adapter);
2382
889cd4b2
SP
2383 be_async_mcc_disable(adapter);
2384
fe6d2a38
SP
2385 if (!lancer_chip(adapter))
2386 be_intr_set(adapter, false);
889cd4b2 2387
10ef9ab4
SP
2388 for_all_evt_queues(adapter, eqo, i) {
2389 napi_disable(&eqo->napi);
2390 if (msix_enabled(adapter))
2391 synchronize_irq(be_msix_vec_get(adapter, eqo));
2392 else
2393 synchronize_irq(netdev->irq);
2394 be_eq_clean(eqo);
63fcb27f
PR
2395 }
2396
889cd4b2
SP
2397 be_irq_unregister(adapter);
2398
889cd4b2
SP
2399 /* Wait for all pending tx completions to arrive so that
2400 * all tx skbs are freed.
2401 */
0ae57bb3 2402 be_tx_compl_clean(adapter);
889cd4b2 2403
10ef9ab4 2404 be_rx_qs_destroy(adapter);
482c9e79
SP
2405 return 0;
2406}
2407
10ef9ab4 2408static int be_rx_qs_create(struct be_adapter *adapter)
482c9e79
SP
2409{
2410 struct be_rx_obj *rxo;
e9008ee9
PR
2411 int rc, i, j;
2412 u8 rsstable[128];
482c9e79
SP
2413
2414 for_all_rx_queues(adapter, rxo, i) {
10ef9ab4
SP
2415 rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
2416 sizeof(struct be_eth_rx_d));
2417 if (rc)
2418 return rc;
2419 }
2420
2421 /* The FW would like the default RXQ to be created first */
2422 rxo = default_rxo(adapter);
2423 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id, rx_frag_size,
2424 adapter->if_handle, false, &rxo->rss_id);
2425 if (rc)
2426 return rc;
2427
2428 for_all_rss_queues(adapter, rxo, i) {
482c9e79 2429 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
10ef9ab4
SP
2430 rx_frag_size, adapter->if_handle,
2431 true, &rxo->rss_id);
482c9e79
SP
2432 if (rc)
2433 return rc;
2434 }
2435
2436 if (be_multi_rxq(adapter)) {
e9008ee9
PR
2437 for (j = 0; j < 128; j += adapter->num_rx_qs - 1) {
2438 for_all_rss_queues(adapter, rxo, i) {
2439 if ((j + i) >= 128)
2440 break;
2441 rsstable[j + i] = rxo->rss_id;
2442 }
2443 }
2444 rc = be_cmd_rss_config(adapter, rsstable, 128);
482c9e79
SP
2445 if (rc)
2446 return rc;
2447 }
2448
2449 /* First time posting */
10ef9ab4 2450 for_all_rx_queues(adapter, rxo, i)
482c9e79 2451 be_post_rx_frags(rxo, GFP_KERNEL);
889cd4b2
SP
2452 return 0;
2453}
2454
6b7c5b94
SP
2455static int be_open(struct net_device *netdev)
2456{
2457 struct be_adapter *adapter = netdev_priv(netdev);
10ef9ab4 2458 struct be_eq_obj *eqo;
3abcdeda 2459 struct be_rx_obj *rxo;
10ef9ab4 2460 struct be_tx_obj *txo;
b236916a 2461 u8 link_status;
3abcdeda 2462 int status, i;
5fb379ee 2463
10ef9ab4 2464 status = be_rx_qs_create(adapter);
482c9e79
SP
2465 if (status)
2466 goto err;
2467
5fb379ee
SP
2468 be_irq_register(adapter);
2469
fe6d2a38
SP
2470 if (!lancer_chip(adapter))
2471 be_intr_set(adapter, true);
5fb379ee 2472
10ef9ab4 2473 for_all_rx_queues(adapter, rxo, i)
3abcdeda 2474 be_cq_notify(adapter, rxo->cq.id, true, 0);
5fb379ee 2475
10ef9ab4
SP
2476 for_all_tx_queues(adapter, txo, i)
2477 be_cq_notify(adapter, txo->cq.id, true, 0);
2478
7a1e9b20
SP
2479 be_async_mcc_enable(adapter);
2480
10ef9ab4
SP
2481 for_all_evt_queues(adapter, eqo, i) {
2482 napi_enable(&eqo->napi);
2483 be_eq_notify(adapter, eqo->q.id, true, false, 0);
2484 }
2485
323ff71e 2486 status = be_cmd_link_status_query(adapter, NULL, &link_status, 0);
b236916a
AK
2487 if (!status)
2488 be_link_status_update(adapter, link_status);
2489
045508a8 2490 be_roce_dev_open(adapter);
889cd4b2
SP
2491 return 0;
2492err:
2493 be_close(adapter->netdev);
2494 return -EIO;
5fb379ee
SP
2495}
2496
71d8d1b5
AK
2497static int be_setup_wol(struct be_adapter *adapter, bool enable)
2498{
2499 struct be_dma_mem cmd;
2500 int status = 0;
2501 u8 mac[ETH_ALEN];
2502
2503 memset(mac, 0, ETH_ALEN);
2504
2505 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
2b7bcebf
IV
2506 cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2507 GFP_KERNEL);
71d8d1b5
AK
2508 if (cmd.va == NULL)
2509 return -1;
2510 memset(cmd.va, 0, cmd.size);
2511
2512 if (enable) {
2513 status = pci_write_config_dword(adapter->pdev,
2514 PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2515 if (status) {
2516 dev_err(&adapter->pdev->dev,
2381a55c 2517 "Could not enable Wake-on-lan\n");
2b7bcebf
IV
2518 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2519 cmd.dma);
71d8d1b5
AK
2520 return status;
2521 }
2522 status = be_cmd_enable_magic_wol(adapter,
2523 adapter->netdev->dev_addr, &cmd);
2524 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2525 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2526 } else {
2527 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2528 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2529 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2530 }
2531
2b7bcebf 2532 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
71d8d1b5
AK
2533 return status;
2534}
2535
6d87f5c3
AK
2536/*
2537 * Generate a seed MAC address from the PF MAC Address using jhash.
2538 * MAC Address for VFs are assigned incrementally starting from the seed.
2539 * These addresses are programmed in the ASIC by the PF and the VF driver
2540 * queries for the MAC address during its probe.
2541 */
2542static inline int be_vf_eth_addr_config(struct be_adapter *adapter)
2543{
f9449ab7 2544 u32 vf;
3abcdeda 2545 int status = 0;
6d87f5c3 2546 u8 mac[ETH_ALEN];
11ac75ed 2547 struct be_vf_cfg *vf_cfg;
6d87f5c3
AK
2548
2549 be_vf_eth_addr_generate(adapter, mac);
2550
11ac75ed 2551 for_all_vfs(adapter, vf_cfg, vf) {
590c391d
PR
2552 if (lancer_chip(adapter)) {
2553 status = be_cmd_set_mac_list(adapter, mac, 1, vf + 1);
2554 } else {
2555 status = be_cmd_pmac_add(adapter, mac,
11ac75ed
SP
2556 vf_cfg->if_handle,
2557 &vf_cfg->pmac_id, vf + 1);
590c391d
PR
2558 }
2559
6d87f5c3
AK
2560 if (status)
2561 dev_err(&adapter->pdev->dev,
590c391d 2562 "Mac address assignment failed for VF %d\n", vf);
6d87f5c3 2563 else
11ac75ed 2564 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
6d87f5c3
AK
2565
2566 mac[5] += 1;
2567 }
2568 return status;
2569}
2570
f9449ab7 2571static void be_vf_clear(struct be_adapter *adapter)
6d87f5c3 2572{
11ac75ed 2573 struct be_vf_cfg *vf_cfg;
6d87f5c3
AK
2574 u32 vf;
2575
39f1d94d
SP
2576 if (be_find_vfs(adapter, ASSIGNED)) {
2577 dev_warn(&adapter->pdev->dev, "VFs are assigned to VMs\n");
2578 goto done;
2579 }
2580
11ac75ed 2581 for_all_vfs(adapter, vf_cfg, vf) {
590c391d
PR
2582 if (lancer_chip(adapter))
2583 be_cmd_set_mac_list(adapter, NULL, 0, vf + 1);
2584 else
11ac75ed
SP
2585 be_cmd_pmac_del(adapter, vf_cfg->if_handle,
2586 vf_cfg->pmac_id, vf + 1);
f9449ab7 2587
11ac75ed
SP
2588 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
2589 }
39f1d94d
SP
2590 pci_disable_sriov(adapter->pdev);
2591done:
2592 kfree(adapter->vf_cfg);
2593 adapter->num_vfs = 0;
6d87f5c3
AK
2594}
2595
a54769f5
SP
2596static int be_clear(struct be_adapter *adapter)
2597{
fbc13f01
AK
2598 int i = 1;
2599
191eb756
SP
2600 if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) {
2601 cancel_delayed_work_sync(&adapter->work);
2602 adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED;
2603 }
2604
11ac75ed 2605 if (sriov_enabled(adapter))
f9449ab7
SP
2606 be_vf_clear(adapter);
2607
fbc13f01
AK
2608 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++)
2609 be_cmd_pmac_del(adapter, adapter->if_handle,
2610 adapter->pmac_id[i], 0);
2611
f9449ab7 2612 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
a54769f5
SP
2613
2614 be_mcc_queues_destroy(adapter);
10ef9ab4 2615 be_rx_cqs_destroy(adapter);
a54769f5 2616 be_tx_queues_destroy(adapter);
10ef9ab4 2617 be_evt_queues_destroy(adapter);
a54769f5 2618
abb93951
PR
2619 kfree(adapter->pmac_id);
2620 adapter->pmac_id = NULL;
2621
10ef9ab4 2622 be_msix_disable(adapter);
a54769f5
SP
2623 return 0;
2624}
2625
abb93951
PR
2626static void be_get_vf_if_cap_flags(struct be_adapter *adapter,
2627 u32 *cap_flags, u8 domain)
2628{
2629 bool profile_present = false;
2630 int status;
2631
2632 if (lancer_chip(adapter)) {
2633 status = be_cmd_get_profile_config(adapter, cap_flags, domain);
2634 if (!status)
2635 profile_present = true;
2636 }
2637
2638 if (!profile_present)
2639 *cap_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2640 BE_IF_FLAGS_MULTICAST;
2641}
2642
39f1d94d 2643static int be_vf_setup_init(struct be_adapter *adapter)
30128031 2644{
11ac75ed 2645 struct be_vf_cfg *vf_cfg;
30128031
SP
2646 int vf;
2647
39f1d94d
SP
2648 adapter->vf_cfg = kcalloc(adapter->num_vfs, sizeof(*vf_cfg),
2649 GFP_KERNEL);
2650 if (!adapter->vf_cfg)
2651 return -ENOMEM;
2652
11ac75ed
SP
2653 for_all_vfs(adapter, vf_cfg, vf) {
2654 vf_cfg->if_handle = -1;
2655 vf_cfg->pmac_id = -1;
30128031 2656 }
39f1d94d 2657 return 0;
30128031
SP
2658}
2659
f9449ab7
SP
2660static int be_vf_setup(struct be_adapter *adapter)
2661{
11ac75ed 2662 struct be_vf_cfg *vf_cfg;
39f1d94d 2663 struct device *dev = &adapter->pdev->dev;
f9449ab7 2664 u32 cap_flags, en_flags, vf;
f1f3ee1b 2665 u16 def_vlan, lnk_speed;
39f1d94d
SP
2666 int status, enabled_vfs;
2667
2668 enabled_vfs = be_find_vfs(adapter, ENABLED);
2669 if (enabled_vfs) {
2670 dev_warn(dev, "%d VFs are already enabled\n", enabled_vfs);
2671 dev_warn(dev, "Ignoring num_vfs=%d setting\n", num_vfs);
2672 return 0;
2673 }
f9449ab7 2674
39f1d94d
SP
2675 if (num_vfs > adapter->dev_num_vfs) {
2676 dev_warn(dev, "Device supports %d VFs and not %d\n",
2677 adapter->dev_num_vfs, num_vfs);
2678 num_vfs = adapter->dev_num_vfs;
2679 }
2680
2681 status = pci_enable_sriov(adapter->pdev, num_vfs);
2682 if (!status) {
2683 adapter->num_vfs = num_vfs;
2684 } else {
2685 /* Platform doesn't support SRIOV though device supports it */
2686 dev_warn(dev, "SRIOV enable failed\n");
2687 return 0;
2688 }
2689
2690 status = be_vf_setup_init(adapter);
2691 if (status)
2692 goto err;
30128031 2693
11ac75ed 2694 for_all_vfs(adapter, vf_cfg, vf) {
abb93951
PR
2695 be_get_vf_if_cap_flags(adapter, &cap_flags, vf + 1);
2696
2697 en_flags = cap_flags & (BE_IF_FLAGS_UNTAGGED |
2698 BE_IF_FLAGS_BROADCAST |
2699 BE_IF_FLAGS_MULTICAST);
2700
1578e777
PR
2701 status = be_cmd_if_create(adapter, cap_flags, en_flags,
2702 &vf_cfg->if_handle, vf + 1);
f9449ab7
SP
2703 if (status)
2704 goto err;
f9449ab7
SP
2705 }
2706
39f1d94d
SP
2707 if (!enabled_vfs) {
2708 status = be_vf_eth_addr_config(adapter);
2709 if (status)
2710 goto err;
2711 }
f9449ab7 2712
11ac75ed 2713 for_all_vfs(adapter, vf_cfg, vf) {
8a046d3b
VV
2714 lnk_speed = 1000;
2715 status = be_cmd_set_qos(adapter, lnk_speed, vf + 1);
f9449ab7
SP
2716 if (status)
2717 goto err;
11ac75ed 2718 vf_cfg->tx_rate = lnk_speed * 10;
f1f3ee1b
AK
2719
2720 status = be_cmd_get_hsw_config(adapter, &def_vlan,
2721 vf + 1, vf_cfg->if_handle);
2722 if (status)
2723 goto err;
2724 vf_cfg->def_vid = def_vlan;
dcf7ebba
PR
2725
2726 be_cmd_enable_vf(adapter, vf + 1);
f9449ab7
SP
2727 }
2728 return 0;
2729err:
2730 return status;
2731}
2732
30128031
SP
2733static void be_setup_init(struct be_adapter *adapter)
2734{
2735 adapter->vlan_prio_bmap = 0xff;
42f11cf2 2736 adapter->phy.link_speed = -1;
30128031
SP
2737 adapter->if_handle = -1;
2738 adapter->be3_native = false;
2739 adapter->promiscuous = false;
2740 adapter->eq_next_idx = 0;
f25b119c
PR
2741
2742 if (be_physfn(adapter))
2743 adapter->cmd_privileges = MAX_PRIVILEGES;
2744 else
2745 adapter->cmd_privileges = MIN_PRIVILEGES;
30128031
SP
2746}
2747
1578e777
PR
2748static int be_get_mac_addr(struct be_adapter *adapter, u8 *mac, u32 if_handle,
2749 bool *active_mac, u32 *pmac_id)
590c391d 2750{
1578e777 2751 int status = 0;
e5e1ee89 2752
1578e777
PR
2753 if (!is_zero_ether_addr(adapter->netdev->perm_addr)) {
2754 memcpy(mac, adapter->netdev->dev_addr, ETH_ALEN);
2755 if (!lancer_chip(adapter) && !be_physfn(adapter))
2756 *active_mac = true;
2757 else
2758 *active_mac = false;
e5e1ee89 2759
1578e777
PR
2760 return status;
2761 }
e5e1ee89 2762
1578e777
PR
2763 if (lancer_chip(adapter)) {
2764 status = be_cmd_get_mac_from_list(adapter, mac,
2765 active_mac, pmac_id, 0);
2766 if (*active_mac) {
5ee4979b
SP
2767 status = be_cmd_mac_addr_query(adapter, mac, false,
2768 if_handle, *pmac_id);
1578e777
PR
2769 }
2770 } else if (be_physfn(adapter)) {
2771 /* For BE3, for PF get permanent MAC */
5ee4979b 2772 status = be_cmd_mac_addr_query(adapter, mac, true, 0, 0);
1578e777 2773 *active_mac = false;
e5e1ee89 2774 } else {
1578e777 2775 /* For BE3, for VF get soft MAC assigned by PF*/
5ee4979b 2776 status = be_cmd_mac_addr_query(adapter, mac, false,
1578e777
PR
2777 if_handle, 0);
2778 *active_mac = true;
e5e1ee89 2779 }
590c391d
PR
2780 return status;
2781}
2782
abb93951
PR
2783static void be_get_resources(struct be_adapter *adapter)
2784{
2785 int status;
2786 bool profile_present = false;
2787
2788 if (lancer_chip(adapter)) {
2789 status = be_cmd_get_func_config(adapter);
2790
2791 if (!status)
2792 profile_present = true;
2793 }
2794
2795 if (profile_present) {
2796 /* Sanity fixes for Lancer */
2797 adapter->max_pmac_cnt = min_t(u16, adapter->max_pmac_cnt,
2798 BE_UC_PMAC_COUNT);
2799 adapter->max_vlans = min_t(u16, adapter->max_vlans,
2800 BE_NUM_VLANS_SUPPORTED);
2801 adapter->max_mcast_mac = min_t(u16, adapter->max_mcast_mac,
2802 BE_MAX_MC);
2803 adapter->max_tx_queues = min_t(u16, adapter->max_tx_queues,
2804 MAX_TX_QS);
2805 adapter->max_rss_queues = min_t(u16, adapter->max_rss_queues,
2806 BE3_MAX_RSS_QS);
2807 adapter->max_event_queues = min_t(u16,
2808 adapter->max_event_queues,
2809 BE3_MAX_RSS_QS);
2810
2811 if (adapter->max_rss_queues &&
2812 adapter->max_rss_queues == adapter->max_rx_queues)
2813 adapter->max_rss_queues -= 1;
2814
2815 if (adapter->max_event_queues < adapter->max_rss_queues)
2816 adapter->max_rss_queues = adapter->max_event_queues;
2817
2818 } else {
2819 if (be_physfn(adapter))
2820 adapter->max_pmac_cnt = BE_UC_PMAC_COUNT;
2821 else
2822 adapter->max_pmac_cnt = BE_VF_UC_PMAC_COUNT;
2823
2824 if (adapter->function_mode & FLEX10_MODE)
2825 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
2826 else
2827 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
2828
2829 adapter->max_mcast_mac = BE_MAX_MC;
2830 adapter->max_tx_queues = MAX_TX_QS;
2831 adapter->max_rss_queues = (adapter->be3_native) ?
2832 BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
2833 adapter->max_event_queues = BE3_MAX_RSS_QS;
2834
2835 adapter->if_cap_flags = BE_IF_FLAGS_UNTAGGED |
2836 BE_IF_FLAGS_BROADCAST |
2837 BE_IF_FLAGS_MULTICAST |
2838 BE_IF_FLAGS_PASS_L3L4_ERRORS |
2839 BE_IF_FLAGS_MCAST_PROMISCUOUS |
2840 BE_IF_FLAGS_VLAN_PROMISCUOUS |
2841 BE_IF_FLAGS_PROMISCUOUS;
2842
2843 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS)
2844 adapter->if_cap_flags |= BE_IF_FLAGS_RSS;
2845 }
2846}
2847
39f1d94d
SP
2848/* Routine to query per function resource limits */
2849static int be_get_config(struct be_adapter *adapter)
2850{
abb93951 2851 int pos, status;
39f1d94d
SP
2852 u16 dev_num_vfs;
2853
abb93951
PR
2854 status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
2855 &adapter->function_mode,
2856 &adapter->function_caps);
2857 if (status)
2858 goto err;
2859
2860 be_get_resources(adapter);
2861
2862 /* primary mac needs 1 pmac entry */
2863 adapter->pmac_id = kcalloc(adapter->max_pmac_cnt + 1,
2864 sizeof(u32), GFP_KERNEL);
2865 if (!adapter->pmac_id) {
2866 status = -ENOMEM;
2867 goto err;
2868 }
2869
39f1d94d
SP
2870 pos = pci_find_ext_capability(adapter->pdev, PCI_EXT_CAP_ID_SRIOV);
2871 if (pos) {
2872 pci_read_config_word(adapter->pdev, pos + PCI_SRIOV_TOTAL_VF,
2873 &dev_num_vfs);
7c5a5242
VV
2874 if (!lancer_chip(adapter))
2875 dev_num_vfs = min_t(u16, dev_num_vfs, MAX_VFS);
39f1d94d
SP
2876 adapter->dev_num_vfs = dev_num_vfs;
2877 }
abb93951
PR
2878err:
2879 return status;
39f1d94d
SP
2880}
2881
5fb379ee
SP
2882static int be_setup(struct be_adapter *adapter)
2883{
39f1d94d 2884 struct device *dev = &adapter->pdev->dev;
abb93951 2885 u32 en_flags;
a54769f5 2886 u32 tx_fc, rx_fc;
10ef9ab4 2887 int status;
ba343c77 2888 u8 mac[ETH_ALEN];
1578e777 2889 bool active_mac;
ba343c77 2890
30128031 2891 be_setup_init(adapter);
6b7c5b94 2892
abb93951
PR
2893 if (!lancer_chip(adapter))
2894 be_cmd_req_native_mode(adapter);
39f1d94d 2895
abb93951
PR
2896 status = be_get_config(adapter);
2897 if (status)
2898 goto err;
73d540f2 2899
10ef9ab4
SP
2900 be_msix_enable(adapter);
2901
2902 status = be_evt_queues_create(adapter);
2903 if (status)
a54769f5 2904 goto err;
6b7c5b94 2905
10ef9ab4
SP
2906 status = be_tx_cqs_create(adapter);
2907 if (status)
2908 goto err;
2909
2910 status = be_rx_cqs_create(adapter);
2911 if (status)
a54769f5 2912 goto err;
6b7c5b94 2913
f9449ab7 2914 status = be_mcc_queues_create(adapter);
10ef9ab4 2915 if (status)
a54769f5 2916 goto err;
6b7c5b94 2917
f25b119c
PR
2918 be_cmd_get_fn_privileges(adapter, &adapter->cmd_privileges, 0);
2919 /* In UMC mode FW does not return right privileges.
2920 * Override with correct privilege equivalent to PF.
2921 */
2922 if (be_is_mc(adapter))
2923 adapter->cmd_privileges = MAX_PRIVILEGES;
2924
f9449ab7
SP
2925 en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2926 BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
5d5adb93 2927
abb93951 2928 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS)
f9449ab7 2929 en_flags |= BE_IF_FLAGS_RSS;
1578e777 2930
abb93951 2931 en_flags = en_flags & adapter->if_cap_flags;
0b13fb45 2932
abb93951 2933 status = be_cmd_if_create(adapter, adapter->if_cap_flags, en_flags,
1578e777 2934 &adapter->if_handle, 0);
5fb379ee 2935 if (status != 0)
a54769f5 2936 goto err;
6b7c5b94 2937
1578e777
PR
2938 memset(mac, 0, ETH_ALEN);
2939 active_mac = false;
2940 status = be_get_mac_addr(adapter, mac, adapter->if_handle,
2941 &active_mac, &adapter->pmac_id[0]);
2942 if (status != 0)
2943 goto err;
2944
2945 if (!active_mac) {
2946 status = be_cmd_pmac_add(adapter, mac, adapter->if_handle,
2947 &adapter->pmac_id[0], 0);
2948 if (status != 0)
2949 goto err;
2950 }
2951
2952 if (is_zero_ether_addr(adapter->netdev->dev_addr)) {
2953 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2954 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
f9449ab7 2955 }
0dffc83e 2956
10ef9ab4
SP
2957 status = be_tx_qs_create(adapter);
2958 if (status)
2959 goto err;
2960
04b71175 2961 be_cmd_get_fw_ver(adapter, adapter->fw_ver, NULL);
5a56eb10 2962
1d1e9a46 2963 if (adapter->vlans_added)
10329df8 2964 be_vid_config(adapter);
7ab8b0b4 2965
a54769f5 2966 be_set_rx_mode(adapter->netdev);
5fb379ee 2967
ddc3f5cb 2968 be_cmd_get_flow_control(adapter, &tx_fc, &rx_fc);
590c391d 2969
ddc3f5cb
AK
2970 if (rx_fc != adapter->rx_fc || tx_fc != adapter->tx_fc)
2971 be_cmd_set_flow_control(adapter, adapter->tx_fc,
a54769f5 2972 adapter->rx_fc);
2dc1deb6 2973
39f1d94d
SP
2974 if (be_physfn(adapter) && num_vfs) {
2975 if (adapter->dev_num_vfs)
2976 be_vf_setup(adapter);
2977 else
2978 dev_warn(dev, "device doesn't support SRIOV\n");
f9449ab7
SP
2979 }
2980
f25b119c
PR
2981 status = be_cmd_get_phy_info(adapter);
2982 if (!status && be_pause_supported(adapter))
42f11cf2
AK
2983 adapter->phy.fc_autoneg = 1;
2984
191eb756
SP
2985 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
2986 adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
f9449ab7 2987 return 0;
a54769f5
SP
2988err:
2989 be_clear(adapter);
2990 return status;
2991}
6b7c5b94 2992
66268739
IV
2993#ifdef CONFIG_NET_POLL_CONTROLLER
2994static void be_netpoll(struct net_device *netdev)
2995{
2996 struct be_adapter *adapter = netdev_priv(netdev);
10ef9ab4 2997 struct be_eq_obj *eqo;
66268739
IV
2998 int i;
2999
10ef9ab4
SP
3000 for_all_evt_queues(adapter, eqo, i)
3001 event_handle(eqo);
3002
3003 return;
66268739
IV
3004}
3005#endif
3006
84517482 3007#define FW_FILE_HDR_SIGN "ServerEngines Corp. "
c165541e
PR
3008char flash_cookie[2][16] = {"*** SE FLAS", "H DIRECTORY *** "};
3009
fa9a6fed 3010static bool be_flash_redboot(struct be_adapter *adapter,
3f0d4560
AK
3011 const u8 *p, u32 img_start, int image_size,
3012 int hdr_size)
fa9a6fed
SB
3013{
3014 u32 crc_offset;
3015 u8 flashed_crc[4];
3016 int status;
3f0d4560
AK
3017
3018 crc_offset = hdr_size + img_start + image_size - 4;
3019
fa9a6fed 3020 p += crc_offset;
3f0d4560
AK
3021
3022 status = be_cmd_get_flash_crc(adapter, flashed_crc,
f510fc64 3023 (image_size - 4));
fa9a6fed
SB
3024 if (status) {
3025 dev_err(&adapter->pdev->dev,
3026 "could not get crc from flash, not flashing redboot\n");
3027 return false;
3028 }
3029
3030 /*update redboot only if crc does not match*/
3031 if (!memcmp(flashed_crc, p, 4))
3032 return false;
3033 else
3034 return true;
fa9a6fed
SB
3035}
3036
306f1348
SP
3037static bool phy_flashing_required(struct be_adapter *adapter)
3038{
42f11cf2
AK
3039 return (adapter->phy.phy_type == TN_8022 &&
3040 adapter->phy.interface_type == PHY_TYPE_BASET_10GB);
306f1348
SP
3041}
3042
c165541e
PR
3043static bool is_comp_in_ufi(struct be_adapter *adapter,
3044 struct flash_section_info *fsec, int type)
3045{
3046 int i = 0, img_type = 0;
3047 struct flash_section_info_g2 *fsec_g2 = NULL;
3048
3049 if (adapter->generation != BE_GEN3)
3050 fsec_g2 = (struct flash_section_info_g2 *)fsec;
3051
3052 for (i = 0; i < MAX_FLASH_COMP; i++) {
3053 if (fsec_g2)
3054 img_type = le32_to_cpu(fsec_g2->fsec_entry[i].type);
3055 else
3056 img_type = le32_to_cpu(fsec->fsec_entry[i].type);
3057
3058 if (img_type == type)
3059 return true;
3060 }
3061 return false;
3062
3063}
3064
3065struct flash_section_info *get_fsec_info(struct be_adapter *adapter,
3066 int header_size,
3067 const struct firmware *fw)
3068{
3069 struct flash_section_info *fsec = NULL;
3070 const u8 *p = fw->data;
3071
3072 p += header_size;
3073 while (p < (fw->data + fw->size)) {
3074 fsec = (struct flash_section_info *)p;
3075 if (!memcmp(flash_cookie, fsec->cookie, sizeof(flash_cookie)))
3076 return fsec;
3077 p += 32;
3078 }
3079 return NULL;
3080}
3081
773a2d7c
PR
3082static int be_flash(struct be_adapter *adapter, const u8 *img,
3083 struct be_dma_mem *flash_cmd, int optype, int img_size)
3084{
3085 u32 total_bytes = 0, flash_op, num_bytes = 0;
3086 int status = 0;
3087 struct be_cmd_write_flashrom *req = flash_cmd->va;
3088
3089 total_bytes = img_size;
3090 while (total_bytes) {
3091 num_bytes = min_t(u32, 32*1024, total_bytes);
3092
3093 total_bytes -= num_bytes;
3094
3095 if (!total_bytes) {
3096 if (optype == OPTYPE_PHY_FW)
3097 flash_op = FLASHROM_OPER_PHY_FLASH;
3098 else
3099 flash_op = FLASHROM_OPER_FLASH;
3100 } else {
3101 if (optype == OPTYPE_PHY_FW)
3102 flash_op = FLASHROM_OPER_PHY_SAVE;
3103 else
3104 flash_op = FLASHROM_OPER_SAVE;
3105 }
3106
be716446 3107 memcpy(req->data_buf, img, num_bytes);
773a2d7c
PR
3108 img += num_bytes;
3109 status = be_cmd_write_flashrom(adapter, flash_cmd, optype,
3110 flash_op, num_bytes);
3111 if (status) {
3112 if (status == ILLEGAL_IOCTL_REQ &&
3113 optype == OPTYPE_PHY_FW)
3114 break;
3115 dev_err(&adapter->pdev->dev,
3116 "cmd to write to flash rom failed.\n");
3117 return status;
3118 }
3119 }
3120 return 0;
3121}
3122
3f0d4560 3123static int be_flash_data(struct be_adapter *adapter,
c165541e
PR
3124 const struct firmware *fw,
3125 struct be_dma_mem *flash_cmd,
3126 int num_of_images)
3f0d4560 3127
84517482 3128{
3f0d4560 3129 int status = 0, i, filehdr_size = 0;
c165541e 3130 int img_hdrs_size = (num_of_images * sizeof(struct image_hdr));
84517482 3131 const u8 *p = fw->data;
215faf9c 3132 const struct flash_comp *pflashcomp;
773a2d7c 3133 int num_comp, redboot;
c165541e
PR
3134 struct flash_section_info *fsec = NULL;
3135
3136 struct flash_comp gen3_flash_types[] = {
3137 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, OPTYPE_ISCSI_ACTIVE,
3138 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_iSCSI},
3139 { FLASH_REDBOOT_START_g3, OPTYPE_REDBOOT,
3140 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3, IMAGE_BOOT_CODE},
3141 { FLASH_iSCSI_BIOS_START_g3, OPTYPE_BIOS,
3142 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_ISCSI},
3143 { FLASH_PXE_BIOS_START_g3, OPTYPE_PXE_BIOS,
3144 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_PXE},
3145 { FLASH_FCoE_BIOS_START_g3, OPTYPE_FCOE_BIOS,
3146 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_FCoE},
3147 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, OPTYPE_ISCSI_BACKUP,
3148 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_iSCSI},
3149 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, OPTYPE_FCOE_FW_ACTIVE,
3150 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_FCoE},
3151 { FLASH_FCoE_BACKUP_IMAGE_START_g3, OPTYPE_FCOE_FW_BACKUP,
3152 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_FCoE},
3153 { FLASH_NCSI_START_g3, OPTYPE_NCSI_FW,
3154 FLASH_NCSI_IMAGE_MAX_SIZE_g3, IMAGE_NCSI},
3155 { FLASH_PHY_FW_START_g3, OPTYPE_PHY_FW,
3156 FLASH_PHY_FW_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_PHY}
3f0d4560 3157 };
c165541e
PR
3158
3159 struct flash_comp gen2_flash_types[] = {
3160 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, OPTYPE_ISCSI_ACTIVE,
3161 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_iSCSI},
3162 { FLASH_REDBOOT_START_g2, OPTYPE_REDBOOT,
3163 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2, IMAGE_BOOT_CODE},
3164 { FLASH_iSCSI_BIOS_START_g2, OPTYPE_BIOS,
3165 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_ISCSI},
3166 { FLASH_PXE_BIOS_START_g2, OPTYPE_PXE_BIOS,
3167 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_PXE},
3168 { FLASH_FCoE_BIOS_START_g2, OPTYPE_FCOE_BIOS,
3169 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_FCoE},
3170 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, OPTYPE_ISCSI_BACKUP,
3171 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_iSCSI},
3172 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, OPTYPE_FCOE_FW_ACTIVE,
3173 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_FCoE},
3174 { FLASH_FCoE_BACKUP_IMAGE_START_g2, OPTYPE_FCOE_FW_BACKUP,
3175 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_FCoE}
3f0d4560
AK
3176 };
3177
3178 if (adapter->generation == BE_GEN3) {
3179 pflashcomp = gen3_flash_types;
3180 filehdr_size = sizeof(struct flash_file_hdr_g3);
215faf9c 3181 num_comp = ARRAY_SIZE(gen3_flash_types);
3f0d4560
AK
3182 } else {
3183 pflashcomp = gen2_flash_types;
3184 filehdr_size = sizeof(struct flash_file_hdr_g2);
215faf9c 3185 num_comp = ARRAY_SIZE(gen2_flash_types);
84517482 3186 }
c165541e
PR
3187 /* Get flash section info*/
3188 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3189 if (!fsec) {
3190 dev_err(&adapter->pdev->dev,
3191 "Invalid Cookie. UFI corrupted ?\n");
3192 return -1;
3193 }
9fe96934 3194 for (i = 0; i < num_comp; i++) {
c165541e 3195 if (!is_comp_in_ufi(adapter, fsec, pflashcomp[i].img_type))
9fe96934 3196 continue;
c165541e
PR
3197
3198 if ((pflashcomp[i].optype == OPTYPE_NCSI_FW) &&
3199 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
3200 continue;
3201
773a2d7c
PR
3202 if (pflashcomp[i].optype == OPTYPE_PHY_FW &&
3203 !phy_flashing_required(adapter))
306f1348 3204 continue;
c165541e 3205
773a2d7c
PR
3206 if (pflashcomp[i].optype == OPTYPE_REDBOOT) {
3207 redboot = be_flash_redboot(adapter, fw->data,
3208 pflashcomp[i].offset, pflashcomp[i].size,
3209 filehdr_size + img_hdrs_size);
3210 if (!redboot)
3211 continue;
3212 }
c165541e 3213
3f0d4560 3214 p = fw->data;
c165541e 3215 p += filehdr_size + pflashcomp[i].offset + img_hdrs_size;
306f1348
SP
3216 if (p + pflashcomp[i].size > fw->data + fw->size)
3217 return -1;
773a2d7c
PR
3218
3219 status = be_flash(adapter, p, flash_cmd, pflashcomp[i].optype,
3220 pflashcomp[i].size);
3221 if (status) {
3222 dev_err(&adapter->pdev->dev,
3223 "Flashing section type %d failed.\n",
3224 pflashcomp[i].img_type);
3225 return status;
84517482 3226 }
84517482 3227 }
84517482
AK
3228 return 0;
3229}
3230
773a2d7c
PR
3231static int be_flash_skyhawk(struct be_adapter *adapter,
3232 const struct firmware *fw,
3233 struct be_dma_mem *flash_cmd, int num_of_images)
3f0d4560 3234{
773a2d7c
PR
3235 int status = 0, i, filehdr_size = 0;
3236 int img_offset, img_size, img_optype, redboot;
3237 int img_hdrs_size = num_of_images * sizeof(struct image_hdr);
3238 const u8 *p = fw->data;
3239 struct flash_section_info *fsec = NULL;
3240
3241 filehdr_size = sizeof(struct flash_file_hdr_g3);
3242 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3243 if (!fsec) {
3244 dev_err(&adapter->pdev->dev,
3245 "Invalid Cookie. UFI corrupted ?\n");
3246 return -1;
3247 }
3248
3249 for (i = 0; i < le32_to_cpu(fsec->fsec_hdr.num_images); i++) {
3250 img_offset = le32_to_cpu(fsec->fsec_entry[i].offset);
3251 img_size = le32_to_cpu(fsec->fsec_entry[i].pad_size);
3252
3253 switch (le32_to_cpu(fsec->fsec_entry[i].type)) {
3254 case IMAGE_FIRMWARE_iSCSI:
3255 img_optype = OPTYPE_ISCSI_ACTIVE;
3256 break;
3257 case IMAGE_BOOT_CODE:
3258 img_optype = OPTYPE_REDBOOT;
3259 break;
3260 case IMAGE_OPTION_ROM_ISCSI:
3261 img_optype = OPTYPE_BIOS;
3262 break;
3263 case IMAGE_OPTION_ROM_PXE:
3264 img_optype = OPTYPE_PXE_BIOS;
3265 break;
3266 case IMAGE_OPTION_ROM_FCoE:
3267 img_optype = OPTYPE_FCOE_BIOS;
3268 break;
3269 case IMAGE_FIRMWARE_BACKUP_iSCSI:
3270 img_optype = OPTYPE_ISCSI_BACKUP;
3271 break;
3272 case IMAGE_NCSI:
3273 img_optype = OPTYPE_NCSI_FW;
3274 break;
3275 default:
3276 continue;
3277 }
3278
3279 if (img_optype == OPTYPE_REDBOOT) {
3280 redboot = be_flash_redboot(adapter, fw->data,
3281 img_offset, img_size,
3282 filehdr_size + img_hdrs_size);
3283 if (!redboot)
3284 continue;
3285 }
3286
3287 p = fw->data;
3288 p += filehdr_size + img_offset + img_hdrs_size;
3289 if (p + img_size > fw->data + fw->size)
3290 return -1;
3291
3292 status = be_flash(adapter, p, flash_cmd, img_optype, img_size);
3293 if (status) {
3294 dev_err(&adapter->pdev->dev,
3295 "Flashing section type %d failed.\n",
3296 fsec->fsec_entry[i].type);
3297 return status;
3298 }
3299 }
3300 return 0;
3f0d4560
AK
3301}
3302
f67ef7ba
PR
3303static int lancer_wait_idle(struct be_adapter *adapter)
3304{
3305#define SLIPORT_IDLE_TIMEOUT 30
3306 u32 reg_val;
3307 int status = 0, i;
3308
3309 for (i = 0; i < SLIPORT_IDLE_TIMEOUT; i++) {
3310 reg_val = ioread32(adapter->db + PHYSDEV_CONTROL_OFFSET);
3311 if ((reg_val & PHYSDEV_CONTROL_INP_MASK) == 0)
3312 break;
3313
3314 ssleep(1);
3315 }
3316
3317 if (i == SLIPORT_IDLE_TIMEOUT)
3318 status = -1;
3319
3320 return status;
3321}
3322
3323static int lancer_fw_reset(struct be_adapter *adapter)
3324{
3325 int status = 0;
3326
3327 status = lancer_wait_idle(adapter);
3328 if (status)
3329 return status;
3330
3331 iowrite32(PHYSDEV_CONTROL_FW_RESET_MASK, adapter->db +
3332 PHYSDEV_CONTROL_OFFSET);
3333
3334 return status;
3335}
3336
485bf569
SN
3337static int lancer_fw_download(struct be_adapter *adapter,
3338 const struct firmware *fw)
84517482 3339{
485bf569
SN
3340#define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024)
3341#define LANCER_FW_DOWNLOAD_LOCATION "/prg"
84517482 3342 struct be_dma_mem flash_cmd;
485bf569
SN
3343 const u8 *data_ptr = NULL;
3344 u8 *dest_image_ptr = NULL;
3345 size_t image_size = 0;
3346 u32 chunk_size = 0;
3347 u32 data_written = 0;
3348 u32 offset = 0;
3349 int status = 0;
3350 u8 add_status = 0;
f67ef7ba 3351 u8 change_status;
84517482 3352
485bf569 3353 if (!IS_ALIGNED(fw->size, sizeof(u32))) {
d9efd2af 3354 dev_err(&adapter->pdev->dev,
485bf569
SN
3355 "FW Image not properly aligned. "
3356 "Length must be 4 byte aligned.\n");
3357 status = -EINVAL;
3358 goto lancer_fw_exit;
d9efd2af
SB
3359 }
3360
485bf569
SN
3361 flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
3362 + LANCER_FW_DOWNLOAD_CHUNK;
3363 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
3364 &flash_cmd.dma, GFP_KERNEL);
3365 if (!flash_cmd.va) {
3366 status = -ENOMEM;
3367 dev_err(&adapter->pdev->dev,
3368 "Memory allocation failure while flashing\n");
3369 goto lancer_fw_exit;
3370 }
84517482 3371
485bf569
SN
3372 dest_image_ptr = flash_cmd.va +
3373 sizeof(struct lancer_cmd_req_write_object);
3374 image_size = fw->size;
3375 data_ptr = fw->data;
3376
3377 while (image_size) {
3378 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
3379
3380 /* Copy the image chunk content. */
3381 memcpy(dest_image_ptr, data_ptr, chunk_size);
3382
3383 status = lancer_cmd_write_object(adapter, &flash_cmd,
f67ef7ba
PR
3384 chunk_size, offset,
3385 LANCER_FW_DOWNLOAD_LOCATION,
3386 &data_written, &change_status,
3387 &add_status);
485bf569
SN
3388 if (status)
3389 break;
3390
3391 offset += data_written;
3392 data_ptr += data_written;
3393 image_size -= data_written;
3394 }
3395
3396 if (!status) {
3397 /* Commit the FW written */
3398 status = lancer_cmd_write_object(adapter, &flash_cmd,
f67ef7ba
PR
3399 0, offset,
3400 LANCER_FW_DOWNLOAD_LOCATION,
3401 &data_written, &change_status,
3402 &add_status);
485bf569
SN
3403 }
3404
3405 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3406 flash_cmd.dma);
3407 if (status) {
3408 dev_err(&adapter->pdev->dev,
3409 "Firmware load error. "
3410 "Status code: 0x%x Additional Status: 0x%x\n",
3411 status, add_status);
3412 goto lancer_fw_exit;
3413 }
3414
f67ef7ba
PR
3415 if (change_status == LANCER_FW_RESET_NEEDED) {
3416 status = lancer_fw_reset(adapter);
3417 if (status) {
3418 dev_err(&adapter->pdev->dev,
3419 "Adapter busy for FW reset.\n"
3420 "New FW will not be active.\n");
3421 goto lancer_fw_exit;
3422 }
3423 } else if (change_status != LANCER_NO_RESET_NEEDED) {
3424 dev_err(&adapter->pdev->dev,
3425 "System reboot required for new FW"
3426 " to be active\n");
3427 }
3428
485bf569
SN
3429 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
3430lancer_fw_exit:
3431 return status;
3432}
3433
773a2d7c
PR
3434static int be_get_ufi_gen(struct be_adapter *adapter,
3435 struct flash_file_hdr_g2 *fhdr)
3436{
3437 if (fhdr == NULL)
3438 goto be_get_ufi_exit;
3439
3440 if (adapter->generation == BE_GEN3) {
3441 if (skyhawk_chip(adapter) && fhdr->build[0] == '4')
3442 return SH_HW;
3443 else if (!skyhawk_chip(adapter) && fhdr->build[0] == '3')
3444 return BE_GEN3;
3445 } else if (adapter->generation == BE_GEN2 && fhdr->build[0] == '2') {
3446 return BE_GEN2;
3447 }
3448
3449be_get_ufi_exit:
3450 dev_err(&adapter->pdev->dev,
3451 "UFI and Interface are not compatible for flashing\n");
3452 return -1;
3453}
3454
485bf569
SN
3455static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
3456{
3457 struct flash_file_hdr_g2 *fhdr;
3458 struct flash_file_hdr_g3 *fhdr3;
3459 struct image_hdr *img_hdr_ptr = NULL;
3460 struct be_dma_mem flash_cmd;
3461 const u8 *p;
773a2d7c 3462 int status = 0, i = 0, num_imgs = 0, ufi_type = 0;
84517482 3463
be716446 3464 flash_cmd.size = sizeof(struct be_cmd_write_flashrom);
2b7bcebf
IV
3465 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
3466 &flash_cmd.dma, GFP_KERNEL);
84517482
AK
3467 if (!flash_cmd.va) {
3468 status = -ENOMEM;
3469 dev_err(&adapter->pdev->dev,
3470 "Memory allocation failure while flashing\n");
485bf569 3471 goto be_fw_exit;
84517482
AK
3472 }
3473
773a2d7c
PR
3474 p = fw->data;
3475 fhdr = (struct flash_file_hdr_g2 *)p;
3476
3477 ufi_type = be_get_ufi_gen(adapter, fhdr);
3478
3479 fhdr3 = (struct flash_file_hdr_g3 *)fw->data;
3480 num_imgs = le32_to_cpu(fhdr3->num_imgs);
3481 for (i = 0; i < num_imgs; i++) {
3482 img_hdr_ptr = (struct image_hdr *)(fw->data +
3483 (sizeof(struct flash_file_hdr_g3) +
3484 i * sizeof(struct image_hdr)));
3485 if (le32_to_cpu(img_hdr_ptr->imageid) == 1) {
3486 if (ufi_type == SH_HW)
3487 status = be_flash_skyhawk(adapter, fw,
3488 &flash_cmd, num_imgs);
3489 else if (ufi_type == BE_GEN3)
3490 status = be_flash_data(adapter, fw,
3491 &flash_cmd, num_imgs);
3f0d4560 3492 }
773a2d7c
PR
3493 }
3494
3495 if (ufi_type == BE_GEN2)
3f0d4560 3496 status = be_flash_data(adapter, fw, &flash_cmd, 0);
773a2d7c 3497 else if (ufi_type == -1)
3f0d4560 3498 status = -1;
84517482 3499
2b7bcebf
IV
3500 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3501 flash_cmd.dma);
84517482
AK
3502 if (status) {
3503 dev_err(&adapter->pdev->dev, "Firmware load error\n");
485bf569 3504 goto be_fw_exit;
84517482
AK
3505 }
3506
af901ca1 3507 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
84517482 3508
485bf569
SN
3509be_fw_exit:
3510 return status;
3511}
3512
3513int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
3514{
3515 const struct firmware *fw;
3516 int status;
3517
3518 if (!netif_running(adapter->netdev)) {
3519 dev_err(&adapter->pdev->dev,
3520 "Firmware load not allowed (interface is down)\n");
3521 return -1;
3522 }
3523
3524 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
3525 if (status)
3526 goto fw_exit;
3527
3528 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
3529
3530 if (lancer_chip(adapter))
3531 status = lancer_fw_download(adapter, fw);
3532 else
3533 status = be_fw_download(adapter, fw);
3534
84517482
AK
3535fw_exit:
3536 release_firmware(fw);
3537 return status;
3538}
3539
e5686ad8 3540static const struct net_device_ops be_netdev_ops = {
6b7c5b94
SP
3541 .ndo_open = be_open,
3542 .ndo_stop = be_close,
3543 .ndo_start_xmit = be_xmit,
a54769f5 3544 .ndo_set_rx_mode = be_set_rx_mode,
6b7c5b94
SP
3545 .ndo_set_mac_address = be_mac_addr_set,
3546 .ndo_change_mtu = be_change_mtu,
ab1594e9 3547 .ndo_get_stats64 = be_get_stats64,
6b7c5b94 3548 .ndo_validate_addr = eth_validate_addr,
6b7c5b94
SP
3549 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
3550 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
64600ea5 3551 .ndo_set_vf_mac = be_set_vf_mac,
1da87b7f 3552 .ndo_set_vf_vlan = be_set_vf_vlan,
e1d18735 3553 .ndo_set_vf_tx_rate = be_set_vf_tx_rate,
66268739
IV
3554 .ndo_get_vf_config = be_get_vf_config,
3555#ifdef CONFIG_NET_POLL_CONTROLLER
3556 .ndo_poll_controller = be_netpoll,
3557#endif
6b7c5b94
SP
3558};
3559
3560static void be_netdev_init(struct net_device *netdev)
3561{
3562 struct be_adapter *adapter = netdev_priv(netdev);
10ef9ab4 3563 struct be_eq_obj *eqo;
3abcdeda 3564 int i;
6b7c5b94 3565
6332c8d3 3566 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
8b8ddc68
MM
3567 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
3568 NETIF_F_HW_VLAN_TX;
3569 if (be_multi_rxq(adapter))
3570 netdev->hw_features |= NETIF_F_RXHASH;
6332c8d3
MM
3571
3572 netdev->features |= netdev->hw_features |
8b8ddc68 3573 NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
4b972914 3574
eb8a50d9 3575 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
79032644 3576 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
51c59870 3577
fbc13f01
AK
3578 netdev->priv_flags |= IFF_UNICAST_FLT;
3579
6b7c5b94
SP
3580 netdev->flags |= IFF_MULTICAST;
3581
b7e5887e 3582 netif_set_gso_max_size(netdev, 65535 - ETH_HLEN);
c190e3c8 3583
10ef9ab4 3584 netdev->netdev_ops = &be_netdev_ops;
6b7c5b94
SP
3585
3586 SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
3587
10ef9ab4
SP
3588 for_all_evt_queues(adapter, eqo, i)
3589 netif_napi_add(netdev, &eqo->napi, be_poll, BE_NAPI_WEIGHT);
6b7c5b94
SP
3590}
3591
3592static void be_unmap_pci_bars(struct be_adapter *adapter)
3593{
8788fdc2
SP
3594 if (adapter->csr)
3595 iounmap(adapter->csr);
3596 if (adapter->db)
3597 iounmap(adapter->db);
045508a8
PP
3598 if (adapter->roce_db.base)
3599 pci_iounmap(adapter->pdev, adapter->roce_db.base);
3600}
3601
3602static int lancer_roce_map_pci_bars(struct be_adapter *adapter)
3603{
3604 struct pci_dev *pdev = adapter->pdev;
3605 u8 __iomem *addr;
3606
3607 addr = pci_iomap(pdev, 2, 0);
3608 if (addr == NULL)
3609 return -ENOMEM;
3610
3611 adapter->roce_db.base = addr;
3612 adapter->roce_db.io_addr = pci_resource_start(pdev, 2);
3613 adapter->roce_db.size = 8192;
3614 adapter->roce_db.total_size = pci_resource_len(pdev, 2);
3615 return 0;
6b7c5b94
SP
3616}
3617
3618static int be_map_pci_bars(struct be_adapter *adapter)
3619{
3620 u8 __iomem *addr;
db3ea781 3621 int db_reg;
6b7c5b94 3622
fe6d2a38 3623 if (lancer_chip(adapter)) {
045508a8
PP
3624 if (be_type_2_3(adapter)) {
3625 addr = ioremap_nocache(
3626 pci_resource_start(adapter->pdev, 0),
3627 pci_resource_len(adapter->pdev, 0));
3628 if (addr == NULL)
3629 return -ENOMEM;
3630 adapter->db = addr;
3631 }
3632 if (adapter->if_type == SLI_INTF_TYPE_3) {
3633 if (lancer_roce_map_pci_bars(adapter))
3634 goto pci_map_err;
3635 }
fe6d2a38
SP
3636 return 0;
3637 }
3638
ba343c77
SB
3639 if (be_physfn(adapter)) {
3640 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 2),
3641 pci_resource_len(adapter->pdev, 2));
3642 if (addr == NULL)
3643 return -ENOMEM;
3644 adapter->csr = addr;
3645 }
6b7c5b94 3646
ba343c77 3647 if (adapter->generation == BE_GEN2) {
ba343c77
SB
3648 db_reg = 4;
3649 } else {
ba343c77
SB
3650 if (be_physfn(adapter))
3651 db_reg = 4;
3652 else
3653 db_reg = 0;
3654 }
3655 addr = ioremap_nocache(pci_resource_start(adapter->pdev, db_reg),
3656 pci_resource_len(adapter->pdev, db_reg));
6b7c5b94
SP
3657 if (addr == NULL)
3658 goto pci_map_err;
ba343c77 3659 adapter->db = addr;
045508a8
PP
3660 if (adapter->sli_family == SKYHAWK_SLI_FAMILY) {
3661 adapter->roce_db.size = 4096;
3662 adapter->roce_db.io_addr =
3663 pci_resource_start(adapter->pdev, db_reg);
3664 adapter->roce_db.total_size =
3665 pci_resource_len(adapter->pdev, db_reg);
3666 }
6b7c5b94
SP
3667 return 0;
3668pci_map_err:
3669 be_unmap_pci_bars(adapter);
3670 return -ENOMEM;
3671}
3672
6b7c5b94
SP
3673static void be_ctrl_cleanup(struct be_adapter *adapter)
3674{
8788fdc2 3675 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
6b7c5b94
SP
3676
3677 be_unmap_pci_bars(adapter);
3678
3679 if (mem->va)
2b7bcebf
IV
3680 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3681 mem->dma);
e7b909a6 3682
5b8821b7 3683 mem = &adapter->rx_filter;
e7b909a6 3684 if (mem->va)
2b7bcebf
IV
3685 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3686 mem->dma);
6b7c5b94
SP
3687}
3688
6b7c5b94
SP
3689static int be_ctrl_init(struct be_adapter *adapter)
3690{
8788fdc2
SP
3691 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
3692 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
5b8821b7 3693 struct be_dma_mem *rx_filter = &adapter->rx_filter;
6b7c5b94 3694 int status;
6b7c5b94
SP
3695
3696 status = be_map_pci_bars(adapter);
3697 if (status)
e7b909a6 3698 goto done;
6b7c5b94
SP
3699
3700 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
2b7bcebf
IV
3701 mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
3702 mbox_mem_alloc->size,
3703 &mbox_mem_alloc->dma,
3704 GFP_KERNEL);
6b7c5b94 3705 if (!mbox_mem_alloc->va) {
e7b909a6
SP
3706 status = -ENOMEM;
3707 goto unmap_pci_bars;
6b7c5b94
SP
3708 }
3709 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
3710 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
3711 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
3712 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
e7b909a6 3713
5b8821b7
SP
3714 rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
3715 rx_filter->va = dma_alloc_coherent(&adapter->pdev->dev, rx_filter->size,
3716 &rx_filter->dma, GFP_KERNEL);
3717 if (rx_filter->va == NULL) {
e7b909a6
SP
3718 status = -ENOMEM;
3719 goto free_mbox;
3720 }
5b8821b7 3721 memset(rx_filter->va, 0, rx_filter->size);
2984961c 3722 mutex_init(&adapter->mbox_lock);
8788fdc2
SP
3723 spin_lock_init(&adapter->mcc_lock);
3724 spin_lock_init(&adapter->mcc_cq_lock);
a8f447bd 3725
dd131e76 3726 init_completion(&adapter->flash_compl);
cf588477 3727 pci_save_state(adapter->pdev);
6b7c5b94 3728 return 0;
e7b909a6
SP
3729
3730free_mbox:
2b7bcebf
IV
3731 dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
3732 mbox_mem_alloc->va, mbox_mem_alloc->dma);
e7b909a6
SP
3733
3734unmap_pci_bars:
3735 be_unmap_pci_bars(adapter);
3736
3737done:
3738 return status;
6b7c5b94
SP
3739}
3740
3741static void be_stats_cleanup(struct be_adapter *adapter)
3742{
3abcdeda 3743 struct be_dma_mem *cmd = &adapter->stats_cmd;
6b7c5b94
SP
3744
3745 if (cmd->va)
2b7bcebf
IV
3746 dma_free_coherent(&adapter->pdev->dev, cmd->size,
3747 cmd->va, cmd->dma);
6b7c5b94
SP
3748}
3749
3750static int be_stats_init(struct be_adapter *adapter)
3751{
3abcdeda 3752 struct be_dma_mem *cmd = &adapter->stats_cmd;
6b7c5b94 3753
005d5696 3754 if (adapter->generation == BE_GEN2) {
89a88ab8 3755 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
005d5696
SX
3756 } else {
3757 if (lancer_chip(adapter))
3758 cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
3759 else
3760 cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
3761 }
2b7bcebf
IV
3762 cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
3763 GFP_KERNEL);
6b7c5b94
SP
3764 if (cmd->va == NULL)
3765 return -1;
d291b9af 3766 memset(cmd->va, 0, cmd->size);
6b7c5b94
SP
3767 return 0;
3768}
3769
3770static void __devexit be_remove(struct pci_dev *pdev)
3771{
3772 struct be_adapter *adapter = pci_get_drvdata(pdev);
8d56ff11 3773
6b7c5b94
SP
3774 if (!adapter)
3775 return;
3776
045508a8
PP
3777 be_roce_dev_remove(adapter);
3778
f67ef7ba
PR
3779 cancel_delayed_work_sync(&adapter->func_recovery_work);
3780
6b7c5b94
SP
3781 unregister_netdev(adapter->netdev);
3782
5fb379ee
SP
3783 be_clear(adapter);
3784
bf99e50d
PR
3785 /* tell fw we're done with firing cmds */
3786 be_cmd_fw_clean(adapter);
3787
6b7c5b94
SP
3788 be_stats_cleanup(adapter);
3789
3790 be_ctrl_cleanup(adapter);
3791
d6b6d987
SP
3792 pci_disable_pcie_error_reporting(pdev);
3793
6b7c5b94
SP
3794 pci_set_drvdata(pdev, NULL);
3795 pci_release_regions(pdev);
3796 pci_disable_device(pdev);
3797
3798 free_netdev(adapter->netdev);
3799}
3800
4762f6ce
AK
3801bool be_is_wol_supported(struct be_adapter *adapter)
3802{
3803 return ((adapter->wol_cap & BE_WOL_CAP) &&
3804 !be_is_wol_excluded(adapter)) ? true : false;
3805}
3806
941a77d5
SK
3807u32 be_get_fw_log_level(struct be_adapter *adapter)
3808{
3809 struct be_dma_mem extfat_cmd;
3810 struct be_fat_conf_params *cfgs;
3811 int status;
3812 u32 level = 0;
3813 int j;
3814
f25b119c
PR
3815 if (lancer_chip(adapter))
3816 return 0;
3817
941a77d5
SK
3818 memset(&extfat_cmd, 0, sizeof(struct be_dma_mem));
3819 extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps);
3820 extfat_cmd.va = pci_alloc_consistent(adapter->pdev, extfat_cmd.size,
3821 &extfat_cmd.dma);
3822
3823 if (!extfat_cmd.va) {
3824 dev_err(&adapter->pdev->dev, "%s: Memory allocation failure\n",
3825 __func__);
3826 goto err;
3827 }
3828
3829 status = be_cmd_get_ext_fat_capabilites(adapter, &extfat_cmd);
3830 if (!status) {
3831 cfgs = (struct be_fat_conf_params *)(extfat_cmd.va +
3832 sizeof(struct be_cmd_resp_hdr));
ac46a462 3833 for (j = 0; j < le32_to_cpu(cfgs->module[0].num_modes); j++) {
941a77d5
SK
3834 if (cfgs->module[0].trace_lvl[j].mode == MODE_UART)
3835 level = cfgs->module[0].trace_lvl[j].dbg_lvl;
3836 }
3837 }
3838 pci_free_consistent(adapter->pdev, extfat_cmd.size, extfat_cmd.va,
3839 extfat_cmd.dma);
3840err:
3841 return level;
3842}
abb93951 3843
39f1d94d 3844static int be_get_initial_config(struct be_adapter *adapter)
6b7c5b94 3845{
6b7c5b94 3846 int status;
941a77d5 3847 u32 level;
6b7c5b94 3848
9e1453c5
AK
3849 status = be_cmd_get_cntl_attributes(adapter);
3850 if (status)
3851 return status;
3852
4762f6ce
AK
3853 status = be_cmd_get_acpi_wol_cap(adapter);
3854 if (status) {
3855 /* in case of a failure to get wol capabillities
3856 * check the exclusion list to determine WOL capability */
3857 if (!be_is_wol_excluded(adapter))
3858 adapter->wol_cap |= BE_WOL_CAP;
3859 }
3860
3861 if (be_is_wol_supported(adapter))
3862 adapter->wol = true;
3863
7aeb2156
PR
3864 /* Must be a power of 2 or else MODULO will BUG_ON */
3865 adapter->be_get_temp_freq = 64;
3866
941a77d5
SK
3867 level = be_get_fw_log_level(adapter);
3868 adapter->msg_enable = level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0;
3869
2243e2e9 3870 return 0;
6b7c5b94
SP
3871}
3872
39f1d94d 3873static int be_dev_type_check(struct be_adapter *adapter)
fe6d2a38
SP
3874{
3875 struct pci_dev *pdev = adapter->pdev;
3876 u32 sli_intf = 0, if_type;
3877
3878 switch (pdev->device) {
3879 case BE_DEVICE_ID1:
3880 case OC_DEVICE_ID1:
3881 adapter->generation = BE_GEN2;
3882 break;
3883 case BE_DEVICE_ID2:
3884 case OC_DEVICE_ID2:
3885 adapter->generation = BE_GEN3;
3886 break;
3887 case OC_DEVICE_ID3:
12f4d0a8 3888 case OC_DEVICE_ID4:
fe6d2a38 3889 pci_read_config_dword(pdev, SLI_INTF_REG_OFFSET, &sli_intf);
045508a8
PP
3890 adapter->if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
3891 SLI_INTF_IF_TYPE_SHIFT;
fe6d2a38
SP
3892 if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
3893 SLI_INTF_IF_TYPE_SHIFT;
fe6d2a38 3894 if (((sli_intf & SLI_INTF_VALID_MASK) != SLI_INTF_VALID) ||
045508a8
PP
3895 !be_type_2_3(adapter)) {
3896 dev_err(&pdev->dev, "SLI_INTF reg val is not valid\n");
3897 return -EINVAL;
3898 }
3899 adapter->sli_family = ((sli_intf & SLI_INTF_FAMILY_MASK) >>
3900 SLI_INTF_FAMILY_SHIFT);
3901 adapter->generation = BE_GEN3;
3902 break;
3903 case OC_DEVICE_ID5:
76b73530 3904 case OC_DEVICE_ID6:
045508a8
PP
3905 pci_read_config_dword(pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3906 if ((sli_intf & SLI_INTF_VALID_MASK) != SLI_INTF_VALID) {
fe6d2a38
SP
3907 dev_err(&pdev->dev, "SLI_INTF reg val is not valid\n");
3908 return -EINVAL;
3909 }
fe6d2a38
SP
3910 adapter->sli_family = ((sli_intf & SLI_INTF_FAMILY_MASK) >>
3911 SLI_INTF_FAMILY_SHIFT);
3912 adapter->generation = BE_GEN3;
3913 break;
3914 default:
3915 adapter->generation = 0;
3916 }
39f1d94d
SP
3917
3918 pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3919 adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
fe6d2a38
SP
3920 return 0;
3921}
3922
f67ef7ba 3923static int lancer_recover_func(struct be_adapter *adapter)
d8110f62
PR
3924{
3925 int status;
d8110f62 3926
f67ef7ba
PR
3927 status = lancer_test_and_set_rdy_state(adapter);
3928 if (status)
3929 goto err;
d8110f62 3930
f67ef7ba
PR
3931 if (netif_running(adapter->netdev))
3932 be_close(adapter->netdev);
d8110f62 3933
f67ef7ba
PR
3934 be_clear(adapter);
3935
3936 adapter->hw_error = false;
3937 adapter->fw_timeout = false;
3938
3939 status = be_setup(adapter);
3940 if (status)
3941 goto err;
d8110f62 3942
f67ef7ba
PR
3943 if (netif_running(adapter->netdev)) {
3944 status = be_open(adapter->netdev);
d8110f62
PR
3945 if (status)
3946 goto err;
f67ef7ba 3947 }
d8110f62 3948
f67ef7ba
PR
3949 dev_err(&adapter->pdev->dev,
3950 "Adapter SLIPORT recovery succeeded\n");
3951 return 0;
3952err:
67297ad8
PR
3953 if (adapter->eeh_error)
3954 dev_err(&adapter->pdev->dev,
3955 "Adapter SLIPORT recovery failed\n");
d8110f62 3956
f67ef7ba
PR
3957 return status;
3958}
3959
3960static void be_func_recovery_task(struct work_struct *work)
3961{
3962 struct be_adapter *adapter =
3963 container_of(work, struct be_adapter, func_recovery_work.work);
3964 int status;
d8110f62 3965
f67ef7ba 3966 be_detect_error(adapter);
d8110f62 3967
f67ef7ba 3968 if (adapter->hw_error && lancer_chip(adapter)) {
d8110f62 3969
f67ef7ba
PR
3970 if (adapter->eeh_error)
3971 goto out;
d8110f62 3972
f67ef7ba
PR
3973 rtnl_lock();
3974 netif_device_detach(adapter->netdev);
3975 rtnl_unlock();
d8110f62 3976
f67ef7ba 3977 status = lancer_recover_func(adapter);
d8110f62 3978
f67ef7ba
PR
3979 if (!status)
3980 netif_device_attach(adapter->netdev);
d8110f62 3981 }
f67ef7ba
PR
3982
3983out:
3984 schedule_delayed_work(&adapter->func_recovery_work,
3985 msecs_to_jiffies(1000));
d8110f62
PR
3986}
3987
3988static void be_worker(struct work_struct *work)
3989{
3990 struct be_adapter *adapter =
3991 container_of(work, struct be_adapter, work.work);
3992 struct be_rx_obj *rxo;
10ef9ab4 3993 struct be_eq_obj *eqo;
d8110f62
PR
3994 int i;
3995
d8110f62
PR
3996 /* when interrupts are not yet enabled, just reap any pending
3997 * mcc completions */
3998 if (!netif_running(adapter->netdev)) {
072a9c48 3999 local_bh_disable();
10ef9ab4 4000 be_process_mcc(adapter);
072a9c48 4001 local_bh_enable();
d8110f62
PR
4002 goto reschedule;
4003 }
4004
4005 if (!adapter->stats_cmd_sent) {
4006 if (lancer_chip(adapter))
4007 lancer_cmd_get_pport_stats(adapter,
4008 &adapter->stats_cmd);
4009 else
4010 be_cmd_get_stats(adapter, &adapter->stats_cmd);
4011 }
4012
7aeb2156
PR
4013 if (MODULO(adapter->work_counter, adapter->be_get_temp_freq) == 0)
4014 be_cmd_get_die_temperature(adapter);
4015
d8110f62 4016 for_all_rx_queues(adapter, rxo, i) {
d8110f62
PR
4017 if (rxo->rx_post_starved) {
4018 rxo->rx_post_starved = false;
4019 be_post_rx_frags(rxo, GFP_KERNEL);
4020 }
4021 }
4022
10ef9ab4
SP
4023 for_all_evt_queues(adapter, eqo, i)
4024 be_eqd_update(adapter, eqo);
4025
d8110f62
PR
4026reschedule:
4027 adapter->work_counter++;
4028 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
4029}
4030
39f1d94d
SP
4031static bool be_reset_required(struct be_adapter *adapter)
4032{
d79c0a20 4033 return be_find_vfs(adapter, ENABLED) > 0 ? false : true;
39f1d94d
SP
4034}
4035
d379142b
SP
4036static char *mc_name(struct be_adapter *adapter)
4037{
4038 if (adapter->function_mode & FLEX10_MODE)
4039 return "FLEX10";
4040 else if (adapter->function_mode & VNIC_MODE)
4041 return "vNIC";
4042 else if (adapter->function_mode & UMC_ENABLED)
4043 return "UMC";
4044 else
4045 return "";
4046}
4047
4048static inline char *func_name(struct be_adapter *adapter)
4049{
4050 return be_physfn(adapter) ? "PF" : "VF";
4051}
4052
6b7c5b94
SP
4053static int __devinit be_probe(struct pci_dev *pdev,
4054 const struct pci_device_id *pdev_id)
4055{
4056 int status = 0;
4057 struct be_adapter *adapter;
4058 struct net_device *netdev;
b4e32a71 4059 char port_name;
6b7c5b94
SP
4060
4061 status = pci_enable_device(pdev);
4062 if (status)
4063 goto do_none;
4064
4065 status = pci_request_regions(pdev, DRV_NAME);
4066 if (status)
4067 goto disable_dev;
4068 pci_set_master(pdev);
4069
7f640062 4070 netdev = alloc_etherdev_mqs(sizeof(*adapter), MAX_TX_QS, MAX_RX_QS);
6b7c5b94
SP
4071 if (netdev == NULL) {
4072 status = -ENOMEM;
4073 goto rel_reg;
4074 }
4075 adapter = netdev_priv(netdev);
4076 adapter->pdev = pdev;
4077 pci_set_drvdata(pdev, adapter);
fe6d2a38 4078
39f1d94d 4079 status = be_dev_type_check(adapter);
63657b9c 4080 if (status)
fe6d2a38
SP
4081 goto free_netdev;
4082
6b7c5b94 4083 adapter->netdev = netdev;
2243e2e9 4084 SET_NETDEV_DEV(netdev, &pdev->dev);
6b7c5b94 4085
2b7bcebf 4086 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
6b7c5b94
SP
4087 if (!status) {
4088 netdev->features |= NETIF_F_HIGHDMA;
4089 } else {
2b7bcebf 4090 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
6b7c5b94
SP
4091 if (status) {
4092 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
4093 goto free_netdev;
4094 }
4095 }
4096
d6b6d987
SP
4097 status = pci_enable_pcie_error_reporting(pdev);
4098 if (status)
4099 dev_err(&pdev->dev, "Could not use PCIe error reporting\n");
4100
6b7c5b94
SP
4101 status = be_ctrl_init(adapter);
4102 if (status)
39f1d94d 4103 goto free_netdev;
6b7c5b94 4104
2243e2e9 4105 /* sync up with fw's ready state */
ba343c77 4106 if (be_physfn(adapter)) {
bf99e50d 4107 status = be_fw_wait_ready(adapter);
ba343c77
SB
4108 if (status)
4109 goto ctrl_clean;
ba343c77 4110 }
6b7c5b94 4111
2243e2e9
SP
4112 /* tell fw we're ready to fire cmds */
4113 status = be_cmd_fw_init(adapter);
6b7c5b94 4114 if (status)
2243e2e9
SP
4115 goto ctrl_clean;
4116
39f1d94d
SP
4117 if (be_reset_required(adapter)) {
4118 status = be_cmd_reset_function(adapter);
4119 if (status)
4120 goto ctrl_clean;
4121 }
556ae191 4122
10ef9ab4
SP
4123 /* The INTR bit may be set in the card when probed by a kdump kernel
4124 * after a crash.
4125 */
4126 if (!lancer_chip(adapter))
4127 be_intr_set(adapter, false);
4128
2243e2e9
SP
4129 status = be_stats_init(adapter);
4130 if (status)
4131 goto ctrl_clean;
4132
39f1d94d 4133 status = be_get_initial_config(adapter);
6b7c5b94
SP
4134 if (status)
4135 goto stats_clean;
6b7c5b94
SP
4136
4137 INIT_DELAYED_WORK(&adapter->work, be_worker);
f67ef7ba 4138 INIT_DELAYED_WORK(&adapter->func_recovery_work, be_func_recovery_task);
a54769f5 4139 adapter->rx_fc = adapter->tx_fc = true;
6b7c5b94 4140
5fb379ee
SP
4141 status = be_setup(adapter);
4142 if (status)
55f5c3c5 4143 goto stats_clean;
2243e2e9 4144
3abcdeda 4145 be_netdev_init(netdev);
6b7c5b94
SP
4146 status = register_netdev(netdev);
4147 if (status != 0)
5fb379ee 4148 goto unsetup;
6b7c5b94 4149
045508a8
PP
4150 be_roce_dev_add(adapter);
4151
f67ef7ba
PR
4152 schedule_delayed_work(&adapter->func_recovery_work,
4153 msecs_to_jiffies(1000));
b4e32a71
PR
4154
4155 be_cmd_query_port_name(adapter, &port_name);
4156
d379142b
SP
4157 dev_info(&pdev->dev, "%s: %s %s port %c\n", nic_name(pdev),
4158 func_name(adapter), mc_name(adapter), port_name);
34b1ef04 4159
6b7c5b94
SP
4160 return 0;
4161
5fb379ee
SP
4162unsetup:
4163 be_clear(adapter);
6b7c5b94
SP
4164stats_clean:
4165 be_stats_cleanup(adapter);
4166ctrl_clean:
4167 be_ctrl_cleanup(adapter);
f9449ab7 4168free_netdev:
fe6d2a38 4169 free_netdev(netdev);
8d56ff11 4170 pci_set_drvdata(pdev, NULL);
6b7c5b94
SP
4171rel_reg:
4172 pci_release_regions(pdev);
4173disable_dev:
4174 pci_disable_device(pdev);
4175do_none:
c4ca2374 4176 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
6b7c5b94
SP
4177 return status;
4178}
4179
4180static int be_suspend(struct pci_dev *pdev, pm_message_t state)
4181{
4182 struct be_adapter *adapter = pci_get_drvdata(pdev);
4183 struct net_device *netdev = adapter->netdev;
4184
71d8d1b5
AK
4185 if (adapter->wol)
4186 be_setup_wol(adapter, true);
4187
f67ef7ba
PR
4188 cancel_delayed_work_sync(&adapter->func_recovery_work);
4189
6b7c5b94
SP
4190 netif_device_detach(netdev);
4191 if (netif_running(netdev)) {
4192 rtnl_lock();
4193 be_close(netdev);
4194 rtnl_unlock();
4195 }
9b0365f1 4196 be_clear(adapter);
6b7c5b94
SP
4197
4198 pci_save_state(pdev);
4199 pci_disable_device(pdev);
4200 pci_set_power_state(pdev, pci_choose_state(pdev, state));
4201 return 0;
4202}
4203
4204static int be_resume(struct pci_dev *pdev)
4205{
4206 int status = 0;
4207 struct be_adapter *adapter = pci_get_drvdata(pdev);
4208 struct net_device *netdev = adapter->netdev;
4209
4210 netif_device_detach(netdev);
4211
4212 status = pci_enable_device(pdev);
4213 if (status)
4214 return status;
4215
4216 pci_set_power_state(pdev, 0);
4217 pci_restore_state(pdev);
4218
2243e2e9
SP
4219 /* tell fw we're ready to fire cmds */
4220 status = be_cmd_fw_init(adapter);
4221 if (status)
4222 return status;
4223
9b0365f1 4224 be_setup(adapter);
6b7c5b94
SP
4225 if (netif_running(netdev)) {
4226 rtnl_lock();
4227 be_open(netdev);
4228 rtnl_unlock();
4229 }
f67ef7ba
PR
4230
4231 schedule_delayed_work(&adapter->func_recovery_work,
4232 msecs_to_jiffies(1000));
6b7c5b94 4233 netif_device_attach(netdev);
71d8d1b5
AK
4234
4235 if (adapter->wol)
4236 be_setup_wol(adapter, false);
a4ca055f 4237
6b7c5b94
SP
4238 return 0;
4239}
4240
82456b03
SP
4241/*
4242 * An FLR will stop BE from DMAing any data.
4243 */
4244static void be_shutdown(struct pci_dev *pdev)
4245{
4246 struct be_adapter *adapter = pci_get_drvdata(pdev);
82456b03 4247
2d5d4154
AK
4248 if (!adapter)
4249 return;
82456b03 4250
0f4a6828 4251 cancel_delayed_work_sync(&adapter->work);
f67ef7ba 4252 cancel_delayed_work_sync(&adapter->func_recovery_work);
a4ca055f 4253
2d5d4154 4254 netif_device_detach(adapter->netdev);
82456b03 4255
57841869
AK
4256 be_cmd_reset_function(adapter);
4257
82456b03 4258 pci_disable_device(pdev);
82456b03
SP
4259}
4260
cf588477
SP
4261static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
4262 pci_channel_state_t state)
4263{
4264 struct be_adapter *adapter = pci_get_drvdata(pdev);
4265 struct net_device *netdev = adapter->netdev;
4266
4267 dev_err(&adapter->pdev->dev, "EEH error detected\n");
4268
f67ef7ba
PR
4269 adapter->eeh_error = true;
4270
4271 cancel_delayed_work_sync(&adapter->func_recovery_work);
cf588477 4272
f67ef7ba 4273 rtnl_lock();
cf588477 4274 netif_device_detach(netdev);
f67ef7ba 4275 rtnl_unlock();
cf588477
SP
4276
4277 if (netif_running(netdev)) {
4278 rtnl_lock();
4279 be_close(netdev);
4280 rtnl_unlock();
4281 }
4282 be_clear(adapter);
4283
4284 if (state == pci_channel_io_perm_failure)
4285 return PCI_ERS_RESULT_DISCONNECT;
4286
4287 pci_disable_device(pdev);
4288
eeb7fc7b
SK
4289 /* The error could cause the FW to trigger a flash debug dump.
4290 * Resetting the card while flash dump is in progress
c8a54163
PR
4291 * can cause it not to recover; wait for it to finish.
4292 * Wait only for first function as it is needed only once per
4293 * adapter.
eeb7fc7b 4294 */
c8a54163
PR
4295 if (pdev->devfn == 0)
4296 ssleep(30);
4297
cf588477
SP
4298 return PCI_ERS_RESULT_NEED_RESET;
4299}
4300
4301static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
4302{
4303 struct be_adapter *adapter = pci_get_drvdata(pdev);
4304 int status;
4305
4306 dev_info(&adapter->pdev->dev, "EEH reset\n");
f67ef7ba 4307 be_clear_all_error(adapter);
cf588477
SP
4308
4309 status = pci_enable_device(pdev);
4310 if (status)
4311 return PCI_ERS_RESULT_DISCONNECT;
4312
4313 pci_set_master(pdev);
4314 pci_set_power_state(pdev, 0);
4315 pci_restore_state(pdev);
4316
4317 /* Check if card is ok and fw is ready */
bf99e50d 4318 status = be_fw_wait_ready(adapter);
cf588477
SP
4319 if (status)
4320 return PCI_ERS_RESULT_DISCONNECT;
4321
d6b6d987 4322 pci_cleanup_aer_uncorrect_error_status(pdev);
cf588477
SP
4323 return PCI_ERS_RESULT_RECOVERED;
4324}
4325
4326static void be_eeh_resume(struct pci_dev *pdev)
4327{
4328 int status = 0;
4329 struct be_adapter *adapter = pci_get_drvdata(pdev);
4330 struct net_device *netdev = adapter->netdev;
4331
4332 dev_info(&adapter->pdev->dev, "EEH resume\n");
4333
4334 pci_save_state(pdev);
4335
4336 /* tell fw we're ready to fire cmds */
4337 status = be_cmd_fw_init(adapter);
4338 if (status)
4339 goto err;
4340
bf99e50d
PR
4341 status = be_cmd_reset_function(adapter);
4342 if (status)
4343 goto err;
4344
cf588477
SP
4345 status = be_setup(adapter);
4346 if (status)
4347 goto err;
4348
4349 if (netif_running(netdev)) {
4350 status = be_open(netdev);
4351 if (status)
4352 goto err;
4353 }
f67ef7ba
PR
4354
4355 schedule_delayed_work(&adapter->func_recovery_work,
4356 msecs_to_jiffies(1000));
cf588477
SP
4357 netif_device_attach(netdev);
4358 return;
4359err:
4360 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
cf588477
SP
4361}
4362
3646f0e5 4363static const struct pci_error_handlers be_eeh_handlers = {
cf588477
SP
4364 .error_detected = be_eeh_err_detected,
4365 .slot_reset = be_eeh_reset,
4366 .resume = be_eeh_resume,
4367};
4368
6b7c5b94
SP
4369static struct pci_driver be_driver = {
4370 .name = DRV_NAME,
4371 .id_table = be_dev_ids,
4372 .probe = be_probe,
4373 .remove = be_remove,
4374 .suspend = be_suspend,
cf588477 4375 .resume = be_resume,
82456b03 4376 .shutdown = be_shutdown,
cf588477 4377 .err_handler = &be_eeh_handlers
6b7c5b94
SP
4378};
4379
4380static int __init be_init_module(void)
4381{
8e95a202
JP
4382 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
4383 rx_frag_size != 2048) {
6b7c5b94
SP
4384 printk(KERN_WARNING DRV_NAME
4385 " : Module param rx_frag_size must be 2048/4096/8192."
4386 " Using 2048\n");
4387 rx_frag_size = 2048;
4388 }
6b7c5b94
SP
4389
4390 return pci_register_driver(&be_driver);
4391}
4392module_init(be_init_module);
4393
4394static void __exit be_exit_module(void)
4395{
4396 pci_unregister_driver(&be_driver);
4397}
4398module_exit(be_exit_module);