be2net: Use new hash key
[linux-2.6-block.git] / drivers / net / ethernet / emulex / benet / be_main.c
CommitLineData
6b7c5b94 1/*
d2145cde 2 * Copyright (C) 2005 - 2011 Emulex
6b7c5b94
SP
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
d2145cde 11 * linux-drivers@emulex.com
6b7c5b94 12 *
d2145cde
AK
13 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
6b7c5b94
SP
16 */
17
70c71606 18#include <linux/prefetch.h>
9d9779e7 19#include <linux/module.h>
6b7c5b94 20#include "be.h"
8788fdc2 21#include "be_cmds.h"
65f71b8b 22#include <asm/div64.h>
6b7c5b94
SP
23
24MODULE_VERSION(DRV_VER);
25MODULE_DEVICE_TABLE(pci, be_dev_ids);
26MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
27MODULE_AUTHOR("ServerEngines Corporation");
28MODULE_LICENSE("GPL");
29
2e588f84 30static ushort rx_frag_size = 2048;
ba343c77 31static unsigned int num_vfs;
2e588f84 32module_param(rx_frag_size, ushort, S_IRUGO);
ba343c77 33module_param(num_vfs, uint, S_IRUGO);
6b7c5b94 34MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
ba343c77 35MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
6b7c5b94 36
6b7c5b94 37static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
c4ca2374 38 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
59fd5d87 39 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
c4ca2374
AK
40 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
41 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
fe6d2a38 42 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
12f4d0a8 43 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
6b7c5b94
SP
44 { 0 }
45};
46MODULE_DEVICE_TABLE(pci, be_dev_ids);
7c185276 47/* UE Status Low CSR */
42c8b11e 48static const char * const ue_status_low_desc[] = {
7c185276
AK
49 "CEV",
50 "CTX",
51 "DBUF",
52 "ERX",
53 "Host",
54 "MPU",
55 "NDMA",
56 "PTC ",
57 "RDMA ",
58 "RXF ",
59 "RXIPS ",
60 "RXULP0 ",
61 "RXULP1 ",
62 "RXULP2 ",
63 "TIM ",
64 "TPOST ",
65 "TPRE ",
66 "TXIPS ",
67 "TXULP0 ",
68 "TXULP1 ",
69 "UC ",
70 "WDMA ",
71 "TXULP2 ",
72 "HOST1 ",
73 "P0_OB_LINK ",
74 "P1_OB_LINK ",
75 "HOST_GPIO ",
76 "MBOX ",
77 "AXGMAC0",
78 "AXGMAC1",
79 "JTAG",
80 "MPU_INTPEND"
81};
82/* UE Status High CSR */
42c8b11e 83static const char * const ue_status_hi_desc[] = {
7c185276
AK
84 "LPCMEMHOST",
85 "MGMT_MAC",
86 "PCS0ONLINE",
87 "MPU_IRAM",
88 "PCS1ONLINE",
89 "PCTL0",
90 "PCTL1",
91 "PMEM",
92 "RR",
93 "TXPB",
94 "RXPP",
95 "XAUI",
96 "TXP",
97 "ARM",
98 "IPC",
99 "HOST2",
100 "HOST3",
101 "HOST4",
102 "HOST5",
103 "HOST6",
104 "HOST7",
105 "HOST8",
106 "HOST9",
42c8b11e 107 "NETC",
7c185276
AK
108 "Unknown",
109 "Unknown",
110 "Unknown",
111 "Unknown",
112 "Unknown",
113 "Unknown",
114 "Unknown",
115 "Unknown"
116};
6b7c5b94 117
752961a1
SP
118/* Is BE in a multi-channel mode */
119static inline bool be_is_mc(struct be_adapter *adapter) {
120 return (adapter->function_mode & FLEX10_MODE ||
121 adapter->function_mode & VNIC_MODE ||
122 adapter->function_mode & UMC_ENABLED);
123}
124
6b7c5b94
SP
125static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
126{
127 struct be_dma_mem *mem = &q->dma_mem;
128 if (mem->va)
2b7bcebf
IV
129 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
130 mem->dma);
6b7c5b94
SP
131}
132
133static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
134 u16 len, u16 entry_size)
135{
136 struct be_dma_mem *mem = &q->dma_mem;
137
138 memset(q, 0, sizeof(*q));
139 q->len = len;
140 q->entry_size = entry_size;
141 mem->size = len * entry_size;
2b7bcebf
IV
142 mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
143 GFP_KERNEL);
6b7c5b94
SP
144 if (!mem->va)
145 return -1;
146 memset(mem->va, 0, mem->size);
147 return 0;
148}
149
8788fdc2 150static void be_intr_set(struct be_adapter *adapter, bool enable)
6b7c5b94 151{
db3ea781 152 u32 reg, enabled;
5f0b849e 153
cf588477
SP
154 if (adapter->eeh_err)
155 return;
156
db3ea781
SP
157 pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
158 &reg);
159 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
160
5f0b849e 161 if (!enabled && enable)
6b7c5b94 162 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
5f0b849e 163 else if (enabled && !enable)
6b7c5b94 164 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
5f0b849e 165 else
6b7c5b94 166 return;
5f0b849e 167
db3ea781
SP
168 pci_write_config_dword(adapter->pdev,
169 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
6b7c5b94
SP
170}
171
8788fdc2 172static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
6b7c5b94
SP
173{
174 u32 val = 0;
175 val |= qid & DB_RQ_RING_ID_MASK;
176 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
f3eb62d2
SP
177
178 wmb();
8788fdc2 179 iowrite32(val, adapter->db + DB_RQ_OFFSET);
6b7c5b94
SP
180}
181
8788fdc2 182static void be_txq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
6b7c5b94
SP
183{
184 u32 val = 0;
185 val |= qid & DB_TXULP_RING_ID_MASK;
186 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
f3eb62d2
SP
187
188 wmb();
8788fdc2 189 iowrite32(val, adapter->db + DB_TXULP1_OFFSET);
6b7c5b94
SP
190}
191
8788fdc2 192static void be_eq_notify(struct be_adapter *adapter, u16 qid,
6b7c5b94
SP
193 bool arm, bool clear_int, u16 num_popped)
194{
195 u32 val = 0;
196 val |= qid & DB_EQ_RING_ID_MASK;
fe6d2a38
SP
197 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
198 DB_EQ_RING_ID_EXT_MASK_SHIFT);
cf588477
SP
199
200 if (adapter->eeh_err)
201 return;
202
6b7c5b94
SP
203 if (arm)
204 val |= 1 << DB_EQ_REARM_SHIFT;
205 if (clear_int)
206 val |= 1 << DB_EQ_CLR_SHIFT;
207 val |= 1 << DB_EQ_EVNT_SHIFT;
208 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
8788fdc2 209 iowrite32(val, adapter->db + DB_EQ_OFFSET);
6b7c5b94
SP
210}
211
8788fdc2 212void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
6b7c5b94
SP
213{
214 u32 val = 0;
215 val |= qid & DB_CQ_RING_ID_MASK;
fe6d2a38
SP
216 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
217 DB_CQ_RING_ID_EXT_MASK_SHIFT);
cf588477
SP
218
219 if (adapter->eeh_err)
220 return;
221
6b7c5b94
SP
222 if (arm)
223 val |= 1 << DB_CQ_REARM_SHIFT;
224 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
8788fdc2 225 iowrite32(val, adapter->db + DB_CQ_OFFSET);
6b7c5b94
SP
226}
227
6b7c5b94
SP
228static int be_mac_addr_set(struct net_device *netdev, void *p)
229{
230 struct be_adapter *adapter = netdev_priv(netdev);
231 struct sockaddr *addr = p;
232 int status = 0;
e3a7ae2c
SK
233 u8 current_mac[ETH_ALEN];
234 u32 pmac_id = adapter->pmac_id;
6b7c5b94 235
ca9e4988
AK
236 if (!is_valid_ether_addr(addr->sa_data))
237 return -EADDRNOTAVAIL;
238
e3a7ae2c 239 status = be_cmd_mac_addr_query(adapter, current_mac,
590c391d
PR
240 MAC_ADDRESS_TYPE_NETWORK, false,
241 adapter->if_handle, 0);
a65027e4 242 if (status)
e3a7ae2c 243 goto err;
6b7c5b94 244
e3a7ae2c
SK
245 if (memcmp(addr->sa_data, current_mac, ETH_ALEN)) {
246 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
f8617e08 247 adapter->if_handle, &adapter->pmac_id, 0);
e3a7ae2c
SK
248 if (status)
249 goto err;
6b7c5b94 250
e3a7ae2c
SK
251 be_cmd_pmac_del(adapter, adapter->if_handle, pmac_id, 0);
252 }
253 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
254 return 0;
255err:
256 dev_err(&adapter->pdev->dev, "MAC %pM set Failed\n", addr->sa_data);
6b7c5b94
SP
257 return status;
258}
259
89a88ab8
AK
260static void populate_be2_stats(struct be_adapter *adapter)
261{
ac124ff9
SP
262 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
263 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
264 struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
89a88ab8 265 struct be_port_rxf_stats_v0 *port_stats =
ac124ff9
SP
266 &rxf_stats->port[adapter->port_num];
267 struct be_drv_stats *drvs = &adapter->drv_stats;
89a88ab8 268
ac124ff9 269 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
89a88ab8
AK
270 drvs->rx_pause_frames = port_stats->rx_pause_frames;
271 drvs->rx_crc_errors = port_stats->rx_crc_errors;
272 drvs->rx_control_frames = port_stats->rx_control_frames;
273 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
274 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
275 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
276 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
277 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
278 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
279 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
280 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
281 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
282 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
283 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
ac124ff9 284 drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
89a88ab8
AK
285 drvs->rx_dropped_header_too_small =
286 port_stats->rx_dropped_header_too_small;
ac124ff9 287 drvs->rx_address_match_errors = port_stats->rx_address_match_errors;
89a88ab8
AK
288 drvs->rx_alignment_symbol_errors =
289 port_stats->rx_alignment_symbol_errors;
290
291 drvs->tx_pauseframes = port_stats->tx_pauseframes;
292 drvs->tx_controlframes = port_stats->tx_controlframes;
293
294 if (adapter->port_num)
ac124ff9 295 drvs->jabber_events = rxf_stats->port1_jabber_events;
89a88ab8 296 else
ac124ff9 297 drvs->jabber_events = rxf_stats->port0_jabber_events;
89a88ab8
AK
298 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
299 drvs->rx_drops_no_txpb = rxf_stats->rx_drops_no_txpb;
300 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
301 drvs->rx_drops_invalid_ring = rxf_stats->rx_drops_invalid_ring;
302 drvs->forwarded_packets = rxf_stats->forwarded_packets;
303 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
ac124ff9
SP
304 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
305 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
89a88ab8
AK
306 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
307}
308
309static void populate_be3_stats(struct be_adapter *adapter)
310{
ac124ff9
SP
311 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
312 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
313 struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
89a88ab8 314 struct be_port_rxf_stats_v1 *port_stats =
ac124ff9
SP
315 &rxf_stats->port[adapter->port_num];
316 struct be_drv_stats *drvs = &adapter->drv_stats;
89a88ab8 317
ac124ff9 318 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
89a88ab8
AK
319 drvs->rx_pause_frames = port_stats->rx_pause_frames;
320 drvs->rx_crc_errors = port_stats->rx_crc_errors;
321 drvs->rx_control_frames = port_stats->rx_control_frames;
322 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
323 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
324 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
325 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
326 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
327 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
328 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
329 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
330 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
331 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
332 drvs->rx_dropped_header_too_small =
333 port_stats->rx_dropped_header_too_small;
334 drvs->rx_input_fifo_overflow_drop =
335 port_stats->rx_input_fifo_overflow_drop;
ac124ff9 336 drvs->rx_address_match_errors = port_stats->rx_address_match_errors;
89a88ab8
AK
337 drvs->rx_alignment_symbol_errors =
338 port_stats->rx_alignment_symbol_errors;
ac124ff9 339 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
89a88ab8
AK
340 drvs->tx_pauseframes = port_stats->tx_pauseframes;
341 drvs->tx_controlframes = port_stats->tx_controlframes;
342 drvs->jabber_events = port_stats->jabber_events;
343 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
344 drvs->rx_drops_no_txpb = rxf_stats->rx_drops_no_txpb;
345 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
346 drvs->rx_drops_invalid_ring = rxf_stats->rx_drops_invalid_ring;
347 drvs->forwarded_packets = rxf_stats->forwarded_packets;
348 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
ac124ff9
SP
349 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
350 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
89a88ab8
AK
351 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
352}
353
005d5696
SX
354static void populate_lancer_stats(struct be_adapter *adapter)
355{
89a88ab8 356
005d5696 357 struct be_drv_stats *drvs = &adapter->drv_stats;
ac124ff9
SP
358 struct lancer_pport_stats *pport_stats =
359 pport_stats_from_cmd(adapter);
360
361 be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
362 drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
363 drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
364 drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
005d5696 365 drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
ac124ff9 366 drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
005d5696
SX
367 drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
368 drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
369 drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
370 drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
371 drvs->rx_dropped_tcp_length =
372 pport_stats->rx_dropped_invalid_tcp_length;
373 drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
374 drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
375 drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
376 drvs->rx_dropped_header_too_small =
377 pport_stats->rx_dropped_header_too_small;
378 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
379 drvs->rx_address_match_errors = pport_stats->rx_address_match_errors;
ac124ff9 380 drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
005d5696 381 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
ac124ff9
SP
382 drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
383 drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
005d5696 384 drvs->jabber_events = pport_stats->rx_jabbers;
005d5696 385 drvs->rx_drops_invalid_ring = pport_stats->rx_drops_invalid_queue;
ac124ff9
SP
386 drvs->forwarded_packets = pport_stats->num_forwards_lo;
387 drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
005d5696 388 drvs->rx_drops_too_many_frags =
ac124ff9 389 pport_stats->rx_drops_too_many_frags_lo;
005d5696 390}
89a88ab8 391
09c1c68f
SP
392static void accumulate_16bit_val(u32 *acc, u16 val)
393{
394#define lo(x) (x & 0xFFFF)
395#define hi(x) (x & 0xFFFF0000)
396 bool wrapped = val < lo(*acc);
397 u32 newacc = hi(*acc) + val;
398
399 if (wrapped)
400 newacc += 65536;
401 ACCESS_ONCE(*acc) = newacc;
402}
403
89a88ab8
AK
404void be_parse_stats(struct be_adapter *adapter)
405{
ac124ff9
SP
406 struct be_erx_stats_v1 *erx = be_erx_stats_from_cmd(adapter);
407 struct be_rx_obj *rxo;
408 int i;
409
005d5696
SX
410 if (adapter->generation == BE_GEN3) {
411 if (lancer_chip(adapter))
412 populate_lancer_stats(adapter);
413 else
414 populate_be3_stats(adapter);
415 } else {
89a88ab8 416 populate_be2_stats(adapter);
005d5696 417 }
ac124ff9
SP
418
419 /* as erx_v1 is longer than v0, ok to use v1 defn for v0 access */
09c1c68f
SP
420 for_all_rx_queues(adapter, rxo, i) {
421 /* below erx HW counter can actually wrap around after
422 * 65535. Driver accumulates a 32-bit value
423 */
424 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
425 (u16)erx->rx_drops_no_fragments[rxo->q.id]);
426 }
89a88ab8
AK
427}
428
ab1594e9
SP
429static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
430 struct rtnl_link_stats64 *stats)
6b7c5b94 431{
ab1594e9 432 struct be_adapter *adapter = netdev_priv(netdev);
89a88ab8 433 struct be_drv_stats *drvs = &adapter->drv_stats;
3abcdeda 434 struct be_rx_obj *rxo;
3c8def97 435 struct be_tx_obj *txo;
ab1594e9
SP
436 u64 pkts, bytes;
437 unsigned int start;
3abcdeda 438 int i;
6b7c5b94 439
3abcdeda 440 for_all_rx_queues(adapter, rxo, i) {
ab1594e9
SP
441 const struct be_rx_stats *rx_stats = rx_stats(rxo);
442 do {
443 start = u64_stats_fetch_begin_bh(&rx_stats->sync);
444 pkts = rx_stats(rxo)->rx_pkts;
445 bytes = rx_stats(rxo)->rx_bytes;
446 } while (u64_stats_fetch_retry_bh(&rx_stats->sync, start));
447 stats->rx_packets += pkts;
448 stats->rx_bytes += bytes;
449 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
450 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
451 rx_stats(rxo)->rx_drops_no_frags;
3abcdeda
SP
452 }
453
3c8def97 454 for_all_tx_queues(adapter, txo, i) {
ab1594e9
SP
455 const struct be_tx_stats *tx_stats = tx_stats(txo);
456 do {
457 start = u64_stats_fetch_begin_bh(&tx_stats->sync);
458 pkts = tx_stats(txo)->tx_pkts;
459 bytes = tx_stats(txo)->tx_bytes;
460 } while (u64_stats_fetch_retry_bh(&tx_stats->sync, start));
461 stats->tx_packets += pkts;
462 stats->tx_bytes += bytes;
3c8def97 463 }
6b7c5b94
SP
464
465 /* bad pkts received */
ab1594e9 466 stats->rx_errors = drvs->rx_crc_errors +
89a88ab8
AK
467 drvs->rx_alignment_symbol_errors +
468 drvs->rx_in_range_errors +
469 drvs->rx_out_range_errors +
470 drvs->rx_frame_too_long +
471 drvs->rx_dropped_too_small +
472 drvs->rx_dropped_too_short +
473 drvs->rx_dropped_header_too_small +
474 drvs->rx_dropped_tcp_length +
ab1594e9 475 drvs->rx_dropped_runt;
68110868 476
6b7c5b94 477 /* detailed rx errors */
ab1594e9 478 stats->rx_length_errors = drvs->rx_in_range_errors +
89a88ab8
AK
479 drvs->rx_out_range_errors +
480 drvs->rx_frame_too_long;
68110868 481
ab1594e9 482 stats->rx_crc_errors = drvs->rx_crc_errors;
6b7c5b94
SP
483
484 /* frame alignment errors */
ab1594e9 485 stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
68110868 486
6b7c5b94
SP
487 /* receiver fifo overrun */
488 /* drops_no_pbuf is no per i/f, it's per BE card */
ab1594e9 489 stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
89a88ab8
AK
490 drvs->rx_input_fifo_overflow_drop +
491 drvs->rx_drops_no_pbuf;
ab1594e9 492 return stats;
6b7c5b94
SP
493}
494
ea172a01 495void be_link_status_update(struct be_adapter *adapter, u32 link_status)
6b7c5b94 496{
6b7c5b94
SP
497 struct net_device *netdev = adapter->netdev;
498
ea172a01
SP
499 /* when link status changes, link speed must be re-queried from card */
500 adapter->link_speed = -1;
501 if ((link_status & LINK_STATUS_MASK) == LINK_UP) {
502 netif_carrier_on(netdev);
503 dev_info(&adapter->pdev->dev, "%s: Link up\n", netdev->name);
504 } else {
505 netif_carrier_off(netdev);
506 dev_info(&adapter->pdev->dev, "%s: Link down\n", netdev->name);
6b7c5b94 507 }
6b7c5b94
SP
508}
509
3c8def97 510static void be_tx_stats_update(struct be_tx_obj *txo,
91992e44 511 u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
6b7c5b94 512{
3c8def97
SP
513 struct be_tx_stats *stats = tx_stats(txo);
514
ab1594e9 515 u64_stats_update_begin(&stats->sync);
ac124ff9
SP
516 stats->tx_reqs++;
517 stats->tx_wrbs += wrb_cnt;
518 stats->tx_bytes += copied;
519 stats->tx_pkts += (gso_segs ? gso_segs : 1);
6b7c5b94 520 if (stopped)
ac124ff9 521 stats->tx_stops++;
ab1594e9 522 u64_stats_update_end(&stats->sync);
6b7c5b94
SP
523}
524
525/* Determine number of WRB entries needed to xmit data in an skb */
fe6d2a38
SP
526static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
527 bool *dummy)
6b7c5b94 528{
ebc8d2ab
DM
529 int cnt = (skb->len > skb->data_len);
530
531 cnt += skb_shinfo(skb)->nr_frags;
532
6b7c5b94
SP
533 /* to account for hdr wrb */
534 cnt++;
fe6d2a38
SP
535 if (lancer_chip(adapter) || !(cnt & 1)) {
536 *dummy = false;
537 } else {
6b7c5b94
SP
538 /* add a dummy to make it an even num */
539 cnt++;
540 *dummy = true;
fe6d2a38 541 }
6b7c5b94
SP
542 BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
543 return cnt;
544}
545
546static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
547{
548 wrb->frag_pa_hi = upper_32_bits(addr);
549 wrb->frag_pa_lo = addr & 0xFFFFFFFF;
550 wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
551}
552
cc4ce020
SK
553static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
554 struct sk_buff *skb, u32 wrb_cnt, u32 len)
6b7c5b94 555{
cc4ce020
SK
556 u8 vlan_prio = 0;
557 u16 vlan_tag = 0;
558
6b7c5b94
SP
559 memset(hdr, 0, sizeof(*hdr));
560
561 AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
562
49e4b847 563 if (skb_is_gso(skb)) {
6b7c5b94
SP
564 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
565 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
566 hdr, skb_shinfo(skb)->gso_size);
fe6d2a38 567 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
49e4b847 568 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
fe6d2a38
SP
569 if (lancer_chip(adapter) && adapter->sli_family ==
570 LANCER_A0_SLI_FAMILY) {
571 AMAP_SET_BITS(struct amap_eth_hdr_wrb, ipcs, hdr, 1);
572 if (is_tcp_pkt(skb))
573 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
574 tcpcs, hdr, 1);
575 else if (is_udp_pkt(skb))
576 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
577 udpcs, hdr, 1);
578 }
6b7c5b94
SP
579 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
580 if (is_tcp_pkt(skb))
581 AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
582 else if (is_udp_pkt(skb))
583 AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
584 }
585
4c5102f9 586 if (vlan_tx_tag_present(skb)) {
6b7c5b94 587 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
cc4ce020
SK
588 vlan_tag = vlan_tx_tag_get(skb);
589 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
590 /* If vlan priority provided by OS is NOT in available bmap */
591 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
592 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
593 adapter->recommended_prio;
594 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
6b7c5b94
SP
595 }
596
597 AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
598 AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, 1);
599 AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
600 AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
601}
602
2b7bcebf 603static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
7101e111
SP
604 bool unmap_single)
605{
606 dma_addr_t dma;
607
608 be_dws_le_to_cpu(wrb, sizeof(*wrb));
609
610 dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
b681ee77 611 if (wrb->frag_len) {
7101e111 612 if (unmap_single)
2b7bcebf
IV
613 dma_unmap_single(dev, dma, wrb->frag_len,
614 DMA_TO_DEVICE);
7101e111 615 else
2b7bcebf 616 dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
7101e111
SP
617 }
618}
6b7c5b94 619
3c8def97 620static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
6b7c5b94
SP
621 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb)
622{
7101e111
SP
623 dma_addr_t busaddr;
624 int i, copied = 0;
2b7bcebf 625 struct device *dev = &adapter->pdev->dev;
6b7c5b94 626 struct sk_buff *first_skb = skb;
6b7c5b94
SP
627 struct be_eth_wrb *wrb;
628 struct be_eth_hdr_wrb *hdr;
7101e111
SP
629 bool map_single = false;
630 u16 map_head;
6b7c5b94 631
6b7c5b94
SP
632 hdr = queue_head_node(txq);
633 queue_head_inc(txq);
7101e111 634 map_head = txq->head;
6b7c5b94 635
ebc8d2ab 636 if (skb->len > skb->data_len) {
e743d313 637 int len = skb_headlen(skb);
2b7bcebf
IV
638 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
639 if (dma_mapping_error(dev, busaddr))
7101e111
SP
640 goto dma_err;
641 map_single = true;
ebc8d2ab
DM
642 wrb = queue_head_node(txq);
643 wrb_fill(wrb, busaddr, len);
644 be_dws_cpu_to_le(wrb, sizeof(*wrb));
645 queue_head_inc(txq);
646 copied += len;
647 }
6b7c5b94 648
ebc8d2ab 649 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
9e903e08 650 const struct skb_frag_struct *frag =
ebc8d2ab 651 &skb_shinfo(skb)->frags[i];
b061b39e 652 busaddr = skb_frag_dma_map(dev, frag, 0,
9e903e08 653 skb_frag_size(frag), DMA_TO_DEVICE);
2b7bcebf 654 if (dma_mapping_error(dev, busaddr))
7101e111 655 goto dma_err;
ebc8d2ab 656 wrb = queue_head_node(txq);
9e903e08 657 wrb_fill(wrb, busaddr, skb_frag_size(frag));
ebc8d2ab
DM
658 be_dws_cpu_to_le(wrb, sizeof(*wrb));
659 queue_head_inc(txq);
9e903e08 660 copied += skb_frag_size(frag);
6b7c5b94
SP
661 }
662
663 if (dummy_wrb) {
664 wrb = queue_head_node(txq);
665 wrb_fill(wrb, 0, 0);
666 be_dws_cpu_to_le(wrb, sizeof(*wrb));
667 queue_head_inc(txq);
668 }
669
cc4ce020 670 wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied);
6b7c5b94
SP
671 be_dws_cpu_to_le(hdr, sizeof(*hdr));
672
673 return copied;
7101e111
SP
674dma_err:
675 txq->head = map_head;
676 while (copied) {
677 wrb = queue_head_node(txq);
2b7bcebf 678 unmap_tx_frag(dev, wrb, map_single);
7101e111
SP
679 map_single = false;
680 copied -= wrb->frag_len;
681 queue_head_inc(txq);
682 }
683 return 0;
6b7c5b94
SP
684}
685
61357325 686static netdev_tx_t be_xmit(struct sk_buff *skb,
b31c50a7 687 struct net_device *netdev)
6b7c5b94
SP
688{
689 struct be_adapter *adapter = netdev_priv(netdev);
3c8def97
SP
690 struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
691 struct be_queue_info *txq = &txo->q;
6b7c5b94
SP
692 u32 wrb_cnt = 0, copied = 0;
693 u32 start = txq->head;
694 bool dummy_wrb, stopped = false;
695
fe6d2a38 696 wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
6b7c5b94 697
3c8def97 698 copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb);
c190e3c8
AK
699 if (copied) {
700 /* record the sent skb in the sent_skb table */
3c8def97
SP
701 BUG_ON(txo->sent_skb_list[start]);
702 txo->sent_skb_list[start] = skb;
c190e3c8
AK
703
704 /* Ensure txq has space for the next skb; Else stop the queue
705 * *BEFORE* ringing the tx doorbell, so that we serialze the
706 * tx compls of the current transmit which'll wake up the queue
707 */
7101e111 708 atomic_add(wrb_cnt, &txq->used);
c190e3c8
AK
709 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
710 txq->len) {
3c8def97 711 netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
c190e3c8
AK
712 stopped = true;
713 }
6b7c5b94 714
c190e3c8 715 be_txq_notify(adapter, txq->id, wrb_cnt);
6b7c5b94 716
3c8def97 717 be_tx_stats_update(txo, wrb_cnt, copied,
91992e44 718 skb_shinfo(skb)->gso_segs, stopped);
c190e3c8
AK
719 } else {
720 txq->head = start;
721 dev_kfree_skb_any(skb);
6b7c5b94 722 }
6b7c5b94
SP
723 return NETDEV_TX_OK;
724}
725
726static int be_change_mtu(struct net_device *netdev, int new_mtu)
727{
728 struct be_adapter *adapter = netdev_priv(netdev);
729 if (new_mtu < BE_MIN_MTU ||
34a89b8c
AK
730 new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
731 (ETH_HLEN + ETH_FCS_LEN))) {
6b7c5b94
SP
732 dev_info(&adapter->pdev->dev,
733 "MTU must be between %d and %d bytes\n",
34a89b8c
AK
734 BE_MIN_MTU,
735 (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
6b7c5b94
SP
736 return -EINVAL;
737 }
738 dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
739 netdev->mtu, new_mtu);
740 netdev->mtu = new_mtu;
741 return 0;
742}
743
744/*
82903e4b
AK
745 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
746 * If the user configures more, place BE in vlan promiscuous mode.
6b7c5b94 747 */
1da87b7f 748static int be_vid_config(struct be_adapter *adapter, bool vf, u32 vf_num)
6b7c5b94 749{
6b7c5b94
SP
750 u16 vtag[BE_NUM_VLANS_SUPPORTED];
751 u16 ntags = 0, i;
82903e4b 752 int status = 0;
1da87b7f
AK
753 u32 if_handle;
754
755 if (vf) {
756 if_handle = adapter->vf_cfg[vf_num].vf_if_handle;
757 vtag[0] = cpu_to_le16(adapter->vf_cfg[vf_num].vf_vlan_tag);
758 status = be_cmd_vlan_config(adapter, if_handle, vtag, 1, 1, 0);
759 }
6b7c5b94 760
c0e64ef4
SP
761 /* No need to further configure vids if in promiscuous mode */
762 if (adapter->promiscuous)
763 return 0;
764
82903e4b 765 if (adapter->vlans_added <= adapter->max_vlans) {
6b7c5b94 766 /* Construct VLAN Table to give to HW */
b738127d 767 for (i = 0; i < VLAN_N_VID; i++) {
6b7c5b94
SP
768 if (adapter->vlan_tag[i]) {
769 vtag[ntags] = cpu_to_le16(i);
770 ntags++;
771 }
772 }
b31c50a7
SP
773 status = be_cmd_vlan_config(adapter, adapter->if_handle,
774 vtag, ntags, 1, 0);
6b7c5b94 775 } else {
b31c50a7
SP
776 status = be_cmd_vlan_config(adapter, adapter->if_handle,
777 NULL, 0, 1, 1);
6b7c5b94 778 }
1da87b7f 779
b31c50a7 780 return status;
6b7c5b94
SP
781}
782
6b7c5b94
SP
783static void be_vlan_add_vid(struct net_device *netdev, u16 vid)
784{
785 struct be_adapter *adapter = netdev_priv(netdev);
786
1da87b7f 787 adapter->vlans_added++;
ba343c77
SB
788 if (!be_physfn(adapter))
789 return;
790
6b7c5b94 791 adapter->vlan_tag[vid] = 1;
82903e4b 792 if (adapter->vlans_added <= (adapter->max_vlans + 1))
1da87b7f 793 be_vid_config(adapter, false, 0);
6b7c5b94
SP
794}
795
796static void be_vlan_rem_vid(struct net_device *netdev, u16 vid)
797{
798 struct be_adapter *adapter = netdev_priv(netdev);
799
1da87b7f 800 adapter->vlans_added--;
1da87b7f 801
ba343c77
SB
802 if (!be_physfn(adapter))
803 return;
804
6b7c5b94 805 adapter->vlan_tag[vid] = 0;
82903e4b 806 if (adapter->vlans_added <= adapter->max_vlans)
1da87b7f 807 be_vid_config(adapter, false, 0);
6b7c5b94
SP
808}
809
a54769f5 810static void be_set_rx_mode(struct net_device *netdev)
6b7c5b94
SP
811{
812 struct be_adapter *adapter = netdev_priv(netdev);
6b7c5b94 813
24307eef 814 if (netdev->flags & IFF_PROMISC) {
5b8821b7 815 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
24307eef
SP
816 adapter->promiscuous = true;
817 goto done;
6b7c5b94
SP
818 }
819
25985edc 820 /* BE was previously in promiscuous mode; disable it */
24307eef
SP
821 if (adapter->promiscuous) {
822 adapter->promiscuous = false;
5b8821b7 823 be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
c0e64ef4
SP
824
825 if (adapter->vlans_added)
826 be_vid_config(adapter, false, 0);
6b7c5b94
SP
827 }
828
e7b909a6 829 /* Enable multicast promisc if num configured exceeds what we support */
4cd24eaf 830 if (netdev->flags & IFF_ALLMULTI ||
5b8821b7
SP
831 netdev_mc_count(netdev) > BE_MAX_MC) {
832 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
24307eef 833 goto done;
6b7c5b94 834 }
6b7c5b94 835
5b8821b7 836 be_cmd_rx_filter(adapter, IFF_MULTICAST, ON);
24307eef
SP
837done:
838 return;
6b7c5b94
SP
839}
840
ba343c77
SB
841static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
842{
843 struct be_adapter *adapter = netdev_priv(netdev);
844 int status;
845
846 if (!adapter->sriov_enabled)
847 return -EPERM;
848
849 if (!is_valid_ether_addr(mac) || (vf >= num_vfs))
850 return -EINVAL;
851
590c391d
PR
852 if (lancer_chip(adapter)) {
853 status = be_cmd_set_mac_list(adapter, mac, 1, vf + 1);
854 } else {
855 status = be_cmd_pmac_del(adapter,
856 adapter->vf_cfg[vf].vf_if_handle,
30128031 857 adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
ba343c77 858
590c391d
PR
859 status = be_cmd_pmac_add(adapter, mac,
860 adapter->vf_cfg[vf].vf_if_handle,
f8617e08 861 &adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
590c391d
PR
862 }
863
64600ea5 864 if (status)
ba343c77
SB
865 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
866 mac, vf);
64600ea5
AK
867 else
868 memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
869
ba343c77
SB
870 return status;
871}
872
64600ea5
AK
873static int be_get_vf_config(struct net_device *netdev, int vf,
874 struct ifla_vf_info *vi)
875{
876 struct be_adapter *adapter = netdev_priv(netdev);
877
878 if (!adapter->sriov_enabled)
879 return -EPERM;
880
881 if (vf >= num_vfs)
882 return -EINVAL;
883
884 vi->vf = vf;
e1d18735 885 vi->tx_rate = adapter->vf_cfg[vf].vf_tx_rate;
1da87b7f 886 vi->vlan = adapter->vf_cfg[vf].vf_vlan_tag;
64600ea5
AK
887 vi->qos = 0;
888 memcpy(&vi->mac, adapter->vf_cfg[vf].vf_mac_addr, ETH_ALEN);
889
890 return 0;
891}
892
1da87b7f
AK
893static int be_set_vf_vlan(struct net_device *netdev,
894 int vf, u16 vlan, u8 qos)
895{
896 struct be_adapter *adapter = netdev_priv(netdev);
897 int status = 0;
898
899 if (!adapter->sriov_enabled)
900 return -EPERM;
901
902 if ((vf >= num_vfs) || (vlan > 4095))
903 return -EINVAL;
904
905 if (vlan) {
906 adapter->vf_cfg[vf].vf_vlan_tag = vlan;
907 adapter->vlans_added++;
908 } else {
909 adapter->vf_cfg[vf].vf_vlan_tag = 0;
910 adapter->vlans_added--;
911 }
912
913 status = be_vid_config(adapter, true, vf);
914
915 if (status)
916 dev_info(&adapter->pdev->dev,
917 "VLAN %d config on VF %d failed\n", vlan, vf);
918 return status;
919}
920
e1d18735
AK
921static int be_set_vf_tx_rate(struct net_device *netdev,
922 int vf, int rate)
923{
924 struct be_adapter *adapter = netdev_priv(netdev);
925 int status = 0;
926
927 if (!adapter->sriov_enabled)
928 return -EPERM;
929
930 if ((vf >= num_vfs) || (rate < 0))
931 return -EINVAL;
932
933 if (rate > 10000)
934 rate = 10000;
935
936 adapter->vf_cfg[vf].vf_tx_rate = rate;
856c4012 937 status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
e1d18735
AK
938
939 if (status)
940 dev_info(&adapter->pdev->dev,
941 "tx rate %d on VF %d failed\n", rate, vf);
942 return status;
943}
944
ac124ff9 945static void be_rx_eqd_update(struct be_adapter *adapter, struct be_rx_obj *rxo)
6b7c5b94 946{
ac124ff9
SP
947 struct be_eq_obj *rx_eq = &rxo->rx_eq;
948 struct be_rx_stats *stats = rx_stats(rxo);
4097f663 949 ulong now = jiffies;
ac124ff9 950 ulong delta = now - stats->rx_jiffies;
ab1594e9
SP
951 u64 pkts;
952 unsigned int start, eqd;
ac124ff9
SP
953
954 if (!rx_eq->enable_aic)
955 return;
6b7c5b94 956
4097f663 957 /* Wrapped around */
3abcdeda
SP
958 if (time_before(now, stats->rx_jiffies)) {
959 stats->rx_jiffies = now;
4097f663
SP
960 return;
961 }
6b7c5b94 962
ac124ff9
SP
963 /* Update once a second */
964 if (delta < HZ)
6b7c5b94
SP
965 return;
966
ab1594e9
SP
967 do {
968 start = u64_stats_fetch_begin_bh(&stats->sync);
969 pkts = stats->rx_pkts;
970 } while (u64_stats_fetch_retry_bh(&stats->sync, start));
971
68c3e5a7 972 stats->rx_pps = (unsigned long)(pkts - stats->rx_pkts_prev) / (delta / HZ);
ab1594e9 973 stats->rx_pkts_prev = pkts;
3abcdeda 974 stats->rx_jiffies = now;
ac124ff9
SP
975 eqd = stats->rx_pps / 110000;
976 eqd = eqd << 3;
977 if (eqd > rx_eq->max_eqd)
978 eqd = rx_eq->max_eqd;
979 if (eqd < rx_eq->min_eqd)
980 eqd = rx_eq->min_eqd;
981 if (eqd < 10)
982 eqd = 0;
983 if (eqd != rx_eq->cur_eqd) {
984 be_cmd_modify_eqd(adapter, rx_eq->q.id, eqd);
985 rx_eq->cur_eqd = eqd;
986 }
6b7c5b94
SP
987}
988
3abcdeda 989static void be_rx_stats_update(struct be_rx_obj *rxo,
2e588f84 990 struct be_rx_compl_info *rxcp)
4097f663 991{
ac124ff9 992 struct be_rx_stats *stats = rx_stats(rxo);
1ef78abe 993
ab1594e9 994 u64_stats_update_begin(&stats->sync);
3abcdeda 995 stats->rx_compl++;
2e588f84 996 stats->rx_bytes += rxcp->pkt_size;
3abcdeda 997 stats->rx_pkts++;
2e588f84 998 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
3abcdeda 999 stats->rx_mcast_pkts++;
2e588f84 1000 if (rxcp->err)
ac124ff9 1001 stats->rx_compl_err++;
ab1594e9 1002 u64_stats_update_end(&stats->sync);
4097f663
SP
1003}
1004
2e588f84 1005static inline bool csum_passed(struct be_rx_compl_info *rxcp)
728a9972 1006{
19fad86f
PR
1007 /* L4 checksum is not reliable for non TCP/UDP packets.
1008 * Also ignore ipcksm for ipv6 pkts */
2e588f84
SP
1009 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
1010 (rxcp->ip_csum || rxcp->ipv6);
728a9972
AK
1011}
1012
6b7c5b94 1013static struct be_rx_page_info *
3abcdeda
SP
1014get_rx_page_info(struct be_adapter *adapter,
1015 struct be_rx_obj *rxo,
1016 u16 frag_idx)
6b7c5b94
SP
1017{
1018 struct be_rx_page_info *rx_page_info;
3abcdeda 1019 struct be_queue_info *rxq = &rxo->q;
6b7c5b94 1020
3abcdeda 1021 rx_page_info = &rxo->page_info_tbl[frag_idx];
6b7c5b94
SP
1022 BUG_ON(!rx_page_info->page);
1023
205859a2 1024 if (rx_page_info->last_page_user) {
2b7bcebf
IV
1025 dma_unmap_page(&adapter->pdev->dev,
1026 dma_unmap_addr(rx_page_info, bus),
1027 adapter->big_page_size, DMA_FROM_DEVICE);
205859a2
AK
1028 rx_page_info->last_page_user = false;
1029 }
6b7c5b94
SP
1030
1031 atomic_dec(&rxq->used);
1032 return rx_page_info;
1033}
1034
1035/* Throwaway the data in the Rx completion */
1036static void be_rx_compl_discard(struct be_adapter *adapter,
3abcdeda 1037 struct be_rx_obj *rxo,
2e588f84 1038 struct be_rx_compl_info *rxcp)
6b7c5b94 1039{
3abcdeda 1040 struct be_queue_info *rxq = &rxo->q;
6b7c5b94 1041 struct be_rx_page_info *page_info;
2e588f84 1042 u16 i, num_rcvd = rxcp->num_rcvd;
6b7c5b94 1043
e80d9da6 1044 for (i = 0; i < num_rcvd; i++) {
2e588f84 1045 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
e80d9da6
PR
1046 put_page(page_info->page);
1047 memset(page_info, 0, sizeof(*page_info));
2e588f84 1048 index_inc(&rxcp->rxq_idx, rxq->len);
6b7c5b94
SP
1049 }
1050}
1051
1052/*
1053 * skb_fill_rx_data forms a complete skb for an ether frame
1054 * indicated by rxcp.
1055 */
3abcdeda 1056static void skb_fill_rx_data(struct be_adapter *adapter, struct be_rx_obj *rxo,
2e588f84 1057 struct sk_buff *skb, struct be_rx_compl_info *rxcp)
6b7c5b94 1058{
3abcdeda 1059 struct be_queue_info *rxq = &rxo->q;
6b7c5b94 1060 struct be_rx_page_info *page_info;
2e588f84
SP
1061 u16 i, j;
1062 u16 hdr_len, curr_frag_len, remaining;
6b7c5b94 1063 u8 *start;
6b7c5b94 1064
2e588f84 1065 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
6b7c5b94
SP
1066 start = page_address(page_info->page) + page_info->page_offset;
1067 prefetch(start);
1068
1069 /* Copy data in the first descriptor of this completion */
2e588f84 1070 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
6b7c5b94
SP
1071
1072 /* Copy the header portion into skb_data */
2e588f84 1073 hdr_len = min(BE_HDR_LEN, curr_frag_len);
6b7c5b94
SP
1074 memcpy(skb->data, start, hdr_len);
1075 skb->len = curr_frag_len;
1076 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
1077 /* Complete packet has now been moved to data */
1078 put_page(page_info->page);
1079 skb->data_len = 0;
1080 skb->tail += curr_frag_len;
1081 } else {
1082 skb_shinfo(skb)->nr_frags = 1;
b061b39e 1083 skb_frag_set_page(skb, 0, page_info->page);
6b7c5b94
SP
1084 skb_shinfo(skb)->frags[0].page_offset =
1085 page_info->page_offset + hdr_len;
9e903e08 1086 skb_frag_size_set(&skb_shinfo(skb)->frags[0], curr_frag_len - hdr_len);
6b7c5b94 1087 skb->data_len = curr_frag_len - hdr_len;
bdb28a97 1088 skb->truesize += rx_frag_size;
6b7c5b94
SP
1089 skb->tail += hdr_len;
1090 }
205859a2 1091 page_info->page = NULL;
6b7c5b94 1092
2e588f84
SP
1093 if (rxcp->pkt_size <= rx_frag_size) {
1094 BUG_ON(rxcp->num_rcvd != 1);
1095 return;
6b7c5b94
SP
1096 }
1097
1098 /* More frags present for this completion */
2e588f84
SP
1099 index_inc(&rxcp->rxq_idx, rxq->len);
1100 remaining = rxcp->pkt_size - curr_frag_len;
1101 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
1102 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
1103 curr_frag_len = min(remaining, rx_frag_size);
6b7c5b94 1104
bd46cb6c
AK
1105 /* Coalesce all frags from the same physical page in one slot */
1106 if (page_info->page_offset == 0) {
1107 /* Fresh page */
1108 j++;
b061b39e 1109 skb_frag_set_page(skb, j, page_info->page);
bd46cb6c
AK
1110 skb_shinfo(skb)->frags[j].page_offset =
1111 page_info->page_offset;
9e903e08 1112 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
bd46cb6c
AK
1113 skb_shinfo(skb)->nr_frags++;
1114 } else {
1115 put_page(page_info->page);
1116 }
1117
9e903e08 1118 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
6b7c5b94
SP
1119 skb->len += curr_frag_len;
1120 skb->data_len += curr_frag_len;
bdb28a97 1121 skb->truesize += rx_frag_size;
2e588f84
SP
1122 remaining -= curr_frag_len;
1123 index_inc(&rxcp->rxq_idx, rxq->len);
205859a2 1124 page_info->page = NULL;
6b7c5b94 1125 }
bd46cb6c 1126 BUG_ON(j > MAX_SKB_FRAGS);
6b7c5b94
SP
1127}
1128
5be93b9a 1129/* Process the RX completion indicated by rxcp when GRO is disabled */
6b7c5b94 1130static void be_rx_compl_process(struct be_adapter *adapter,
3abcdeda 1131 struct be_rx_obj *rxo,
2e588f84 1132 struct be_rx_compl_info *rxcp)
6b7c5b94 1133{
6332c8d3 1134 struct net_device *netdev = adapter->netdev;
6b7c5b94 1135 struct sk_buff *skb;
89420424 1136
6332c8d3 1137 skb = netdev_alloc_skb_ip_align(netdev, BE_HDR_LEN);
a058a632 1138 if (unlikely(!skb)) {
ac124ff9 1139 rx_stats(rxo)->rx_drops_no_skbs++;
3abcdeda 1140 be_rx_compl_discard(adapter, rxo, rxcp);
6b7c5b94
SP
1141 return;
1142 }
1143
2e588f84 1144 skb_fill_rx_data(adapter, rxo, skb, rxcp);
6b7c5b94 1145
6332c8d3 1146 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
728a9972 1147 skb->ip_summed = CHECKSUM_UNNECESSARY;
c6ce2f4b
SK
1148 else
1149 skb_checksum_none_assert(skb);
6b7c5b94 1150
6332c8d3 1151 skb->protocol = eth_type_trans(skb, netdev);
4b972914
AK
1152 if (adapter->netdev->features & NETIF_F_RXHASH)
1153 skb->rxhash = rxcp->rss_hash;
1154
6b7c5b94 1155
343e43c0 1156 if (rxcp->vlanf)
4c5102f9
AK
1157 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1158
1159 netif_receive_skb(skb);
6b7c5b94
SP
1160}
1161
5be93b9a
AK
1162/* Process the RX completion indicated by rxcp when GRO is enabled */
1163static void be_rx_compl_process_gro(struct be_adapter *adapter,
3abcdeda 1164 struct be_rx_obj *rxo,
2e588f84 1165 struct be_rx_compl_info *rxcp)
6b7c5b94
SP
1166{
1167 struct be_rx_page_info *page_info;
5be93b9a 1168 struct sk_buff *skb = NULL;
3abcdeda
SP
1169 struct be_queue_info *rxq = &rxo->q;
1170 struct be_eq_obj *eq_obj = &rxo->rx_eq;
2e588f84
SP
1171 u16 remaining, curr_frag_len;
1172 u16 i, j;
3968fa1e 1173
5be93b9a
AK
1174 skb = napi_get_frags(&eq_obj->napi);
1175 if (!skb) {
3abcdeda 1176 be_rx_compl_discard(adapter, rxo, rxcp);
5be93b9a
AK
1177 return;
1178 }
1179
2e588f84
SP
1180 remaining = rxcp->pkt_size;
1181 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
1182 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
6b7c5b94
SP
1183
1184 curr_frag_len = min(remaining, rx_frag_size);
1185
bd46cb6c
AK
1186 /* Coalesce all frags from the same physical page in one slot */
1187 if (i == 0 || page_info->page_offset == 0) {
1188 /* First frag or Fresh page */
1189 j++;
b061b39e 1190 skb_frag_set_page(skb, j, page_info->page);
5be93b9a
AK
1191 skb_shinfo(skb)->frags[j].page_offset =
1192 page_info->page_offset;
9e903e08 1193 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
bd46cb6c
AK
1194 } else {
1195 put_page(page_info->page);
1196 }
9e903e08 1197 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
bdb28a97 1198 skb->truesize += rx_frag_size;
bd46cb6c 1199 remaining -= curr_frag_len;
2e588f84 1200 index_inc(&rxcp->rxq_idx, rxq->len);
6b7c5b94
SP
1201 memset(page_info, 0, sizeof(*page_info));
1202 }
bd46cb6c 1203 BUG_ON(j > MAX_SKB_FRAGS);
6b7c5b94 1204
5be93b9a 1205 skb_shinfo(skb)->nr_frags = j + 1;
2e588f84
SP
1206 skb->len = rxcp->pkt_size;
1207 skb->data_len = rxcp->pkt_size;
5be93b9a 1208 skb->ip_summed = CHECKSUM_UNNECESSARY;
4b972914
AK
1209 if (adapter->netdev->features & NETIF_F_RXHASH)
1210 skb->rxhash = rxcp->rss_hash;
5be93b9a 1211
343e43c0 1212 if (rxcp->vlanf)
4c5102f9
AK
1213 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1214
1215 napi_gro_frags(&eq_obj->napi);
2e588f84
SP
1216}
1217
1218static void be_parse_rx_compl_v1(struct be_adapter *adapter,
1219 struct be_eth_rx_compl *compl,
1220 struct be_rx_compl_info *rxcp)
1221{
1222 rxcp->pkt_size =
1223 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1224 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1225 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1226 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
9ecb42fd 1227 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
2e588f84
SP
1228 rxcp->ip_csum =
1229 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1230 rxcp->l4_csum =
1231 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1232 rxcp->ipv6 =
1233 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
1234 rxcp->rxq_idx =
1235 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, fragndx, compl);
1236 rxcp->num_rcvd =
1237 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1238 rxcp->pkt_type =
1239 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
4b972914
AK
1240 rxcp->rss_hash =
1241 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, rxcp);
15d72184
SP
1242 if (rxcp->vlanf) {
1243 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm,
3c709f8f
DM
1244 compl);
1245 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
1246 compl);
15d72184 1247 }
12004ae9 1248 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, port, compl);
2e588f84
SP
1249}
1250
1251static void be_parse_rx_compl_v0(struct be_adapter *adapter,
1252 struct be_eth_rx_compl *compl,
1253 struct be_rx_compl_info *rxcp)
1254{
1255 rxcp->pkt_size =
1256 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1257 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1258 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1259 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
9ecb42fd 1260 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
2e588f84
SP
1261 rxcp->ip_csum =
1262 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1263 rxcp->l4_csum =
1264 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1265 rxcp->ipv6 =
1266 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
1267 rxcp->rxq_idx =
1268 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, fragndx, compl);
1269 rxcp->num_rcvd =
1270 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1271 rxcp->pkt_type =
1272 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
4b972914
AK
1273 rxcp->rss_hash =
1274 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, rxcp);
15d72184
SP
1275 if (rxcp->vlanf) {
1276 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm,
3c709f8f
DM
1277 compl);
1278 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
1279 compl);
15d72184 1280 }
12004ae9 1281 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, port, compl);
2e588f84
SP
1282}
1283
1284static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1285{
1286 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1287 struct be_rx_compl_info *rxcp = &rxo->rxcp;
1288 struct be_adapter *adapter = rxo->adapter;
6b7c5b94 1289
2e588f84
SP
1290 /* For checking the valid bit it is Ok to use either definition as the
1291 * valid bit is at the same position in both v0 and v1 Rx compl */
1292 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
1293 return NULL;
6b7c5b94 1294
2e588f84
SP
1295 rmb();
1296 be_dws_le_to_cpu(compl, sizeof(*compl));
6b7c5b94 1297
2e588f84
SP
1298 if (adapter->be3_native)
1299 be_parse_rx_compl_v1(adapter, compl, rxcp);
1300 else
1301 be_parse_rx_compl_v0(adapter, compl, rxcp);
6b7c5b94 1302
15d72184
SP
1303 if (rxcp->vlanf) {
1304 /* vlanf could be wrongly set in some cards.
1305 * ignore if vtm is not set */
752961a1 1306 if ((adapter->function_mode & FLEX10_MODE) && !rxcp->vtm)
15d72184 1307 rxcp->vlanf = 0;
6b7c5b94 1308
15d72184 1309 if (!lancer_chip(adapter))
3c709f8f 1310 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
6b7c5b94 1311
939cf306 1312 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
3c709f8f 1313 !adapter->vlan_tag[rxcp->vlan_tag])
15d72184
SP
1314 rxcp->vlanf = 0;
1315 }
2e588f84
SP
1316
1317 /* As the compl has been parsed, reset it; we wont touch it again */
1318 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
6b7c5b94 1319
3abcdeda 1320 queue_tail_inc(&rxo->cq);
6b7c5b94
SP
1321 return rxcp;
1322}
1323
1829b086 1324static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
6b7c5b94 1325{
6b7c5b94 1326 u32 order = get_order(size);
1829b086 1327
6b7c5b94 1328 if (order > 0)
1829b086
ED
1329 gfp |= __GFP_COMP;
1330 return alloc_pages(gfp, order);
6b7c5b94
SP
1331}
1332
1333/*
1334 * Allocate a page, split it to fragments of size rx_frag_size and post as
1335 * receive buffers to BE
1336 */
1829b086 1337static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
6b7c5b94 1338{
3abcdeda
SP
1339 struct be_adapter *adapter = rxo->adapter;
1340 struct be_rx_page_info *page_info_tbl = rxo->page_info_tbl;
26d92f92 1341 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
3abcdeda 1342 struct be_queue_info *rxq = &rxo->q;
6b7c5b94
SP
1343 struct page *pagep = NULL;
1344 struct be_eth_rx_d *rxd;
1345 u64 page_dmaaddr = 0, frag_dmaaddr;
1346 u32 posted, page_offset = 0;
1347
3abcdeda 1348 page_info = &rxo->page_info_tbl[rxq->head];
6b7c5b94
SP
1349 for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1350 if (!pagep) {
1829b086 1351 pagep = be_alloc_pages(adapter->big_page_size, gfp);
6b7c5b94 1352 if (unlikely(!pagep)) {
ac124ff9 1353 rx_stats(rxo)->rx_post_fail++;
6b7c5b94
SP
1354 break;
1355 }
2b7bcebf
IV
1356 page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
1357 0, adapter->big_page_size,
1358 DMA_FROM_DEVICE);
6b7c5b94
SP
1359 page_info->page_offset = 0;
1360 } else {
1361 get_page(pagep);
1362 page_info->page_offset = page_offset + rx_frag_size;
1363 }
1364 page_offset = page_info->page_offset;
1365 page_info->page = pagep;
fac6da5b 1366 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
6b7c5b94
SP
1367 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1368
1369 rxd = queue_head_node(rxq);
1370 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1371 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
6b7c5b94
SP
1372
1373 /* Any space left in the current big page for another frag? */
1374 if ((page_offset + rx_frag_size + rx_frag_size) >
1375 adapter->big_page_size) {
1376 pagep = NULL;
1377 page_info->last_page_user = true;
1378 }
26d92f92
SP
1379
1380 prev_page_info = page_info;
1381 queue_head_inc(rxq);
6b7c5b94
SP
1382 page_info = &page_info_tbl[rxq->head];
1383 }
1384 if (pagep)
26d92f92 1385 prev_page_info->last_page_user = true;
6b7c5b94
SP
1386
1387 if (posted) {
6b7c5b94 1388 atomic_add(posted, &rxq->used);
8788fdc2 1389 be_rxq_notify(adapter, rxq->id, posted);
ea1dae11
SP
1390 } else if (atomic_read(&rxq->used) == 0) {
1391 /* Let be_worker replenish when memory is available */
3abcdeda 1392 rxo->rx_post_starved = true;
6b7c5b94 1393 }
6b7c5b94
SP
1394}
1395
5fb379ee 1396static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
6b7c5b94 1397{
6b7c5b94
SP
1398 struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1399
1400 if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1401 return NULL;
1402
f3eb62d2 1403 rmb();
6b7c5b94
SP
1404 be_dws_le_to_cpu(txcp, sizeof(*txcp));
1405
1406 txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1407
1408 queue_tail_inc(tx_cq);
1409 return txcp;
1410}
1411
3c8def97
SP
1412static u16 be_tx_compl_process(struct be_adapter *adapter,
1413 struct be_tx_obj *txo, u16 last_index)
6b7c5b94 1414{
3c8def97 1415 struct be_queue_info *txq = &txo->q;
a73b796e 1416 struct be_eth_wrb *wrb;
3c8def97 1417 struct sk_buff **sent_skbs = txo->sent_skb_list;
6b7c5b94 1418 struct sk_buff *sent_skb;
ec43b1a6
SP
1419 u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1420 bool unmap_skb_hdr = true;
6b7c5b94 1421
ec43b1a6 1422 sent_skb = sent_skbs[txq->tail];
6b7c5b94 1423 BUG_ON(!sent_skb);
ec43b1a6
SP
1424 sent_skbs[txq->tail] = NULL;
1425
1426 /* skip header wrb */
a73b796e 1427 queue_tail_inc(txq);
6b7c5b94 1428
ec43b1a6 1429 do {
6b7c5b94 1430 cur_index = txq->tail;
a73b796e 1431 wrb = queue_tail_node(txq);
2b7bcebf
IV
1432 unmap_tx_frag(&adapter->pdev->dev, wrb,
1433 (unmap_skb_hdr && skb_headlen(sent_skb)));
ec43b1a6
SP
1434 unmap_skb_hdr = false;
1435
6b7c5b94
SP
1436 num_wrbs++;
1437 queue_tail_inc(txq);
ec43b1a6 1438 } while (cur_index != last_index);
6b7c5b94 1439
6b7c5b94 1440 kfree_skb(sent_skb);
4d586b82 1441 return num_wrbs;
6b7c5b94
SP
1442}
1443
859b1e4e
SP
1444static inline struct be_eq_entry *event_get(struct be_eq_obj *eq_obj)
1445{
1446 struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
1447
1448 if (!eqe->evt)
1449 return NULL;
1450
f3eb62d2 1451 rmb();
859b1e4e
SP
1452 eqe->evt = le32_to_cpu(eqe->evt);
1453 queue_tail_inc(&eq_obj->q);
1454 return eqe;
1455}
1456
1457static int event_handle(struct be_adapter *adapter,
3c8def97
SP
1458 struct be_eq_obj *eq_obj,
1459 bool rearm)
859b1e4e
SP
1460{
1461 struct be_eq_entry *eqe;
1462 u16 num = 0;
1463
1464 while ((eqe = event_get(eq_obj)) != NULL) {
1465 eqe->evt = 0;
1466 num++;
1467 }
1468
1469 /* Deal with any spurious interrupts that come
1470 * without events
1471 */
3c8def97
SP
1472 if (!num)
1473 rearm = true;
1474
1475 be_eq_notify(adapter, eq_obj->q.id, rearm, true, num);
859b1e4e
SP
1476 if (num)
1477 napi_schedule(&eq_obj->napi);
1478
1479 return num;
1480}
1481
1482/* Just read and notify events without processing them.
1483 * Used at the time of destroying event queues */
1484static void be_eq_clean(struct be_adapter *adapter,
1485 struct be_eq_obj *eq_obj)
1486{
1487 struct be_eq_entry *eqe;
1488 u16 num = 0;
1489
1490 while ((eqe = event_get(eq_obj)) != NULL) {
1491 eqe->evt = 0;
1492 num++;
1493 }
1494
1495 if (num)
1496 be_eq_notify(adapter, eq_obj->q.id, false, true, num);
1497}
1498
3abcdeda 1499static void be_rx_q_clean(struct be_adapter *adapter, struct be_rx_obj *rxo)
6b7c5b94
SP
1500{
1501 struct be_rx_page_info *page_info;
3abcdeda
SP
1502 struct be_queue_info *rxq = &rxo->q;
1503 struct be_queue_info *rx_cq = &rxo->cq;
2e588f84 1504 struct be_rx_compl_info *rxcp;
6b7c5b94
SP
1505 u16 tail;
1506
1507 /* First cleanup pending rx completions */
3abcdeda
SP
1508 while ((rxcp = be_rx_compl_get(rxo)) != NULL) {
1509 be_rx_compl_discard(adapter, rxo, rxcp);
64642811 1510 be_cq_notify(adapter, rx_cq->id, false, 1);
6b7c5b94
SP
1511 }
1512
1513 /* Then free posted rx buffer that were not used */
1514 tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
cdab23b7 1515 for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
3abcdeda 1516 page_info = get_rx_page_info(adapter, rxo, tail);
6b7c5b94
SP
1517 put_page(page_info->page);
1518 memset(page_info, 0, sizeof(*page_info));
1519 }
1520 BUG_ON(atomic_read(&rxq->used));
482c9e79 1521 rxq->tail = rxq->head = 0;
6b7c5b94
SP
1522}
1523
3c8def97
SP
1524static void be_tx_compl_clean(struct be_adapter *adapter,
1525 struct be_tx_obj *txo)
6b7c5b94 1526{
3c8def97
SP
1527 struct be_queue_info *tx_cq = &txo->cq;
1528 struct be_queue_info *txq = &txo->q;
a8e9179a 1529 struct be_eth_tx_compl *txcp;
4d586b82 1530 u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
3c8def97 1531 struct sk_buff **sent_skbs = txo->sent_skb_list;
b03388d6
SP
1532 struct sk_buff *sent_skb;
1533 bool dummy_wrb;
a8e9179a
SP
1534
1535 /* Wait for a max of 200ms for all the tx-completions to arrive. */
1536 do {
1537 while ((txcp = be_tx_compl_get(tx_cq))) {
1538 end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl,
1539 wrb_index, txcp);
3c8def97 1540 num_wrbs += be_tx_compl_process(adapter, txo, end_idx);
a8e9179a
SP
1541 cmpl++;
1542 }
1543 if (cmpl) {
1544 be_cq_notify(adapter, tx_cq->id, false, cmpl);
4d586b82 1545 atomic_sub(num_wrbs, &txq->used);
a8e9179a 1546 cmpl = 0;
4d586b82 1547 num_wrbs = 0;
a8e9179a
SP
1548 }
1549
1550 if (atomic_read(&txq->used) == 0 || ++timeo > 200)
1551 break;
1552
1553 mdelay(1);
1554 } while (true);
1555
1556 if (atomic_read(&txq->used))
1557 dev_err(&adapter->pdev->dev, "%d pending tx-completions\n",
1558 atomic_read(&txq->used));
b03388d6
SP
1559
1560 /* free posted tx for which compls will never arrive */
1561 while (atomic_read(&txq->used)) {
1562 sent_skb = sent_skbs[txq->tail];
1563 end_idx = txq->tail;
1564 index_adv(&end_idx,
fe6d2a38
SP
1565 wrb_cnt_for_skb(adapter, sent_skb, &dummy_wrb) - 1,
1566 txq->len);
3c8def97 1567 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
4d586b82 1568 atomic_sub(num_wrbs, &txq->used);
b03388d6 1569 }
6b7c5b94
SP
1570}
1571
5fb379ee
SP
1572static void be_mcc_queues_destroy(struct be_adapter *adapter)
1573{
1574 struct be_queue_info *q;
5fb379ee 1575
8788fdc2 1576 q = &adapter->mcc_obj.q;
5fb379ee 1577 if (q->created)
8788fdc2 1578 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
5fb379ee
SP
1579 be_queue_free(adapter, q);
1580
8788fdc2 1581 q = &adapter->mcc_obj.cq;
5fb379ee 1582 if (q->created)
8788fdc2 1583 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
5fb379ee
SP
1584 be_queue_free(adapter, q);
1585}
1586
1587/* Must be called only after TX qs are created as MCC shares TX EQ */
1588static int be_mcc_queues_create(struct be_adapter *adapter)
1589{
1590 struct be_queue_info *q, *cq;
5fb379ee
SP
1591
1592 /* Alloc MCC compl queue */
8788fdc2 1593 cq = &adapter->mcc_obj.cq;
5fb379ee 1594 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
efd2e40a 1595 sizeof(struct be_mcc_compl)))
5fb379ee
SP
1596 goto err;
1597
1598 /* Ask BE to create MCC compl queue; share TX's eq */
8788fdc2 1599 if (be_cmd_cq_create(adapter, cq, &adapter->tx_eq.q, false, true, 0))
5fb379ee
SP
1600 goto mcc_cq_free;
1601
1602 /* Alloc MCC queue */
8788fdc2 1603 q = &adapter->mcc_obj.q;
5fb379ee
SP
1604 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
1605 goto mcc_cq_destroy;
1606
1607 /* Ask BE to create MCC queue */
8788fdc2 1608 if (be_cmd_mccq_create(adapter, q, cq))
5fb379ee
SP
1609 goto mcc_q_free;
1610
1611 return 0;
1612
1613mcc_q_free:
1614 be_queue_free(adapter, q);
1615mcc_cq_destroy:
8788fdc2 1616 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
5fb379ee
SP
1617mcc_cq_free:
1618 be_queue_free(adapter, cq);
1619err:
1620 return -1;
1621}
1622
6b7c5b94
SP
1623static void be_tx_queues_destroy(struct be_adapter *adapter)
1624{
1625 struct be_queue_info *q;
3c8def97
SP
1626 struct be_tx_obj *txo;
1627 u8 i;
6b7c5b94 1628
3c8def97
SP
1629 for_all_tx_queues(adapter, txo, i) {
1630 q = &txo->q;
1631 if (q->created)
1632 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
1633 be_queue_free(adapter, q);
6b7c5b94 1634
3c8def97
SP
1635 q = &txo->cq;
1636 if (q->created)
1637 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1638 be_queue_free(adapter, q);
1639 }
6b7c5b94 1640
859b1e4e
SP
1641 /* Clear any residual events */
1642 be_eq_clean(adapter, &adapter->tx_eq);
1643
6b7c5b94
SP
1644 q = &adapter->tx_eq.q;
1645 if (q->created)
8788fdc2 1646 be_cmd_q_destroy(adapter, q, QTYPE_EQ);
6b7c5b94
SP
1647 be_queue_free(adapter, q);
1648}
1649
dafc0fe3
SP
1650static int be_num_txqs_want(struct be_adapter *adapter)
1651{
1652 if ((num_vfs && adapter->sriov_enabled) ||
752961a1 1653 be_is_mc(adapter) ||
dafc0fe3
SP
1654 lancer_chip(adapter) || !be_physfn(adapter) ||
1655 adapter->generation == BE_GEN2)
1656 return 1;
1657 else
1658 return MAX_TX_QS;
1659}
1660
3c8def97 1661/* One TX event queue is shared by all TX compl qs */
6b7c5b94
SP
1662static int be_tx_queues_create(struct be_adapter *adapter)
1663{
1664 struct be_queue_info *eq, *q, *cq;
3c8def97
SP
1665 struct be_tx_obj *txo;
1666 u8 i;
6b7c5b94 1667
dafc0fe3 1668 adapter->num_tx_qs = be_num_txqs_want(adapter);
3bb62f4f
PR
1669 if (adapter->num_tx_qs != MAX_TX_QS) {
1670 rtnl_lock();
dafc0fe3
SP
1671 netif_set_real_num_tx_queues(adapter->netdev,
1672 adapter->num_tx_qs);
3bb62f4f
PR
1673 rtnl_unlock();
1674 }
dafc0fe3 1675
6b7c5b94
SP
1676 adapter->tx_eq.max_eqd = 0;
1677 adapter->tx_eq.min_eqd = 0;
1678 adapter->tx_eq.cur_eqd = 96;
1679 adapter->tx_eq.enable_aic = false;
3c8def97 1680
6b7c5b94 1681 eq = &adapter->tx_eq.q;
3c8def97
SP
1682 if (be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1683 sizeof(struct be_eq_entry)))
6b7c5b94
SP
1684 return -1;
1685
8788fdc2 1686 if (be_cmd_eq_create(adapter, eq, adapter->tx_eq.cur_eqd))
3c8def97 1687 goto err;
ecd62107 1688 adapter->tx_eq.eq_idx = adapter->eq_next_idx++;
fe6d2a38 1689
3c8def97
SP
1690 for_all_tx_queues(adapter, txo, i) {
1691 cq = &txo->cq;
1692 if (be_queue_alloc(adapter, cq, TX_CQ_LEN,
6b7c5b94 1693 sizeof(struct be_eth_tx_compl)))
3c8def97 1694 goto err;
6b7c5b94 1695
3c8def97
SP
1696 if (be_cmd_cq_create(adapter, cq, eq, false, false, 3))
1697 goto err;
6b7c5b94 1698
3c8def97
SP
1699 q = &txo->q;
1700 if (be_queue_alloc(adapter, q, TX_Q_LEN,
1701 sizeof(struct be_eth_wrb)))
1702 goto err;
3c8def97 1703 }
6b7c5b94
SP
1704 return 0;
1705
3c8def97
SP
1706err:
1707 be_tx_queues_destroy(adapter);
6b7c5b94
SP
1708 return -1;
1709}
1710
1711static void be_rx_queues_destroy(struct be_adapter *adapter)
1712{
1713 struct be_queue_info *q;
3abcdeda
SP
1714 struct be_rx_obj *rxo;
1715 int i;
1716
1717 for_all_rx_queues(adapter, rxo, i) {
482c9e79 1718 be_queue_free(adapter, &rxo->q);
3abcdeda
SP
1719
1720 q = &rxo->cq;
1721 if (q->created)
1722 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1723 be_queue_free(adapter, q);
1724
3abcdeda 1725 q = &rxo->rx_eq.q;
482c9e79 1726 if (q->created)
3abcdeda 1727 be_cmd_q_destroy(adapter, q, QTYPE_EQ);
3abcdeda 1728 be_queue_free(adapter, q);
6b7c5b94 1729 }
6b7c5b94
SP
1730}
1731
ac6a0c4a
SP
1732static u32 be_num_rxqs_want(struct be_adapter *adapter)
1733{
c814fd36 1734 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
752961a1
SP
1735 !adapter->sriov_enabled && be_physfn(adapter) &&
1736 !be_is_mc(adapter)) {
ac6a0c4a
SP
1737 return 1 + MAX_RSS_QS; /* one default non-RSS queue */
1738 } else {
1739 dev_warn(&adapter->pdev->dev,
1740 "No support for multiple RX queues\n");
1741 return 1;
1742 }
1743}
1744
6b7c5b94
SP
1745static int be_rx_queues_create(struct be_adapter *adapter)
1746{
1747 struct be_queue_info *eq, *q, *cq;
3abcdeda
SP
1748 struct be_rx_obj *rxo;
1749 int rc, i;
6b7c5b94 1750
ac6a0c4a
SP
1751 adapter->num_rx_qs = min(be_num_rxqs_want(adapter),
1752 msix_enabled(adapter) ?
1753 adapter->num_msix_vec - 1 : 1);
1754 if (adapter->num_rx_qs != MAX_RX_QS)
1755 dev_warn(&adapter->pdev->dev,
1756 "Can create only %d RX queues", adapter->num_rx_qs);
1757
6b7c5b94 1758 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
3abcdeda
SP
1759 for_all_rx_queues(adapter, rxo, i) {
1760 rxo->adapter = adapter;
1761 rxo->rx_eq.max_eqd = BE_MAX_EQD;
1762 rxo->rx_eq.enable_aic = true;
1763
1764 /* EQ */
1765 eq = &rxo->rx_eq.q;
1766 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1767 sizeof(struct be_eq_entry));
1768 if (rc)
1769 goto err;
1770
1771 rc = be_cmd_eq_create(adapter, eq, rxo->rx_eq.cur_eqd);
1772 if (rc)
1773 goto err;
1774
ecd62107 1775 rxo->rx_eq.eq_idx = adapter->eq_next_idx++;
fe6d2a38 1776
3abcdeda
SP
1777 /* CQ */
1778 cq = &rxo->cq;
1779 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
1780 sizeof(struct be_eth_rx_compl));
1781 if (rc)
1782 goto err;
1783
1784 rc = be_cmd_cq_create(adapter, cq, eq, false, false, 3);
1785 if (rc)
1786 goto err;
482c9e79
SP
1787
1788 /* Rx Q - will be created in be_open() */
3abcdeda
SP
1789 q = &rxo->q;
1790 rc = be_queue_alloc(adapter, q, RX_Q_LEN,
1791 sizeof(struct be_eth_rx_d));
1792 if (rc)
1793 goto err;
1794
3abcdeda 1795 }
6b7c5b94
SP
1796
1797 return 0;
3abcdeda
SP
1798err:
1799 be_rx_queues_destroy(adapter);
1800 return -1;
6b7c5b94 1801}
6b7c5b94 1802
fe6d2a38 1803static bool event_peek(struct be_eq_obj *eq_obj)
b628bde2 1804{
fe6d2a38
SP
1805 struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
1806 if (!eqe->evt)
1807 return false;
1808 else
1809 return true;
b628bde2
SP
1810}
1811
6b7c5b94
SP
1812static irqreturn_t be_intx(int irq, void *dev)
1813{
1814 struct be_adapter *adapter = dev;
3abcdeda 1815 struct be_rx_obj *rxo;
fe6d2a38 1816 int isr, i, tx = 0 , rx = 0;
6b7c5b94 1817
fe6d2a38
SP
1818 if (lancer_chip(adapter)) {
1819 if (event_peek(&adapter->tx_eq))
3c8def97 1820 tx = event_handle(adapter, &adapter->tx_eq, false);
fe6d2a38
SP
1821 for_all_rx_queues(adapter, rxo, i) {
1822 if (event_peek(&rxo->rx_eq))
3c8def97 1823 rx |= event_handle(adapter, &rxo->rx_eq, true);
fe6d2a38 1824 }
6b7c5b94 1825
fe6d2a38
SP
1826 if (!(tx || rx))
1827 return IRQ_NONE;
3abcdeda 1828
fe6d2a38
SP
1829 } else {
1830 isr = ioread32(adapter->csr + CEV_ISR0_OFFSET +
1831 (adapter->tx_eq.q.id / 8) * CEV_ISR_SIZE);
1832 if (!isr)
1833 return IRQ_NONE;
1834
ecd62107 1835 if ((1 << adapter->tx_eq.eq_idx & isr))
3c8def97 1836 event_handle(adapter, &adapter->tx_eq, false);
fe6d2a38
SP
1837
1838 for_all_rx_queues(adapter, rxo, i) {
ecd62107 1839 if ((1 << rxo->rx_eq.eq_idx & isr))
3c8def97 1840 event_handle(adapter, &rxo->rx_eq, true);
fe6d2a38 1841 }
3abcdeda 1842 }
c001c213 1843
8788fdc2 1844 return IRQ_HANDLED;
6b7c5b94
SP
1845}
1846
1847static irqreturn_t be_msix_rx(int irq, void *dev)
1848{
3abcdeda
SP
1849 struct be_rx_obj *rxo = dev;
1850 struct be_adapter *adapter = rxo->adapter;
6b7c5b94 1851
3c8def97 1852 event_handle(adapter, &rxo->rx_eq, true);
6b7c5b94
SP
1853
1854 return IRQ_HANDLED;
1855}
1856
5fb379ee 1857static irqreturn_t be_msix_tx_mcc(int irq, void *dev)
6b7c5b94
SP
1858{
1859 struct be_adapter *adapter = dev;
1860
3c8def97 1861 event_handle(adapter, &adapter->tx_eq, false);
6b7c5b94
SP
1862
1863 return IRQ_HANDLED;
1864}
1865
2e588f84 1866static inline bool do_gro(struct be_rx_compl_info *rxcp)
6b7c5b94 1867{
2e588f84 1868 return (rxcp->tcpf && !rxcp->err) ? true : false;
6b7c5b94
SP
1869}
1870
49b05221 1871static int be_poll_rx(struct napi_struct *napi, int budget)
6b7c5b94
SP
1872{
1873 struct be_eq_obj *rx_eq = container_of(napi, struct be_eq_obj, napi);
3abcdeda
SP
1874 struct be_rx_obj *rxo = container_of(rx_eq, struct be_rx_obj, rx_eq);
1875 struct be_adapter *adapter = rxo->adapter;
1876 struct be_queue_info *rx_cq = &rxo->cq;
2e588f84 1877 struct be_rx_compl_info *rxcp;
6b7c5b94
SP
1878 u32 work_done;
1879
ac124ff9 1880 rx_stats(rxo)->rx_polls++;
6b7c5b94 1881 for (work_done = 0; work_done < budget; work_done++) {
3abcdeda 1882 rxcp = be_rx_compl_get(rxo);
6b7c5b94
SP
1883 if (!rxcp)
1884 break;
1885
12004ae9
SP
1886 /* Is it a flush compl that has no data */
1887 if (unlikely(rxcp->num_rcvd == 0))
1888 goto loop_continue;
1889
1890 /* Discard compl with partial DMA Lancer B0 */
1891 if (unlikely(!rxcp->pkt_size)) {
1892 be_rx_compl_discard(adapter, rxo, rxcp);
1893 goto loop_continue;
1894 }
1895
1896 /* On BE drop pkts that arrive due to imperfect filtering in
1897 * promiscuous mode on some skews
1898 */
1899 if (unlikely(rxcp->port != adapter->port_num &&
1900 !lancer_chip(adapter))) {
009dd872 1901 be_rx_compl_discard(adapter, rxo, rxcp);
12004ae9 1902 goto loop_continue;
64642811 1903 }
009dd872 1904
12004ae9
SP
1905 if (do_gro(rxcp))
1906 be_rx_compl_process_gro(adapter, rxo, rxcp);
1907 else
1908 be_rx_compl_process(adapter, rxo, rxcp);
1909loop_continue:
2e588f84 1910 be_rx_stats_update(rxo, rxcp);
6b7c5b94
SP
1911 }
1912
9372cacb
PR
1913 be_cq_notify(adapter, rx_cq->id, false, work_done);
1914
6b7c5b94 1915 /* Refill the queue */
857c9905 1916 if (work_done && atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
1829b086 1917 be_post_rx_frags(rxo, GFP_ATOMIC);
6b7c5b94
SP
1918
1919 /* All consumed */
1920 if (work_done < budget) {
1921 napi_complete(napi);
9372cacb
PR
1922 /* Arm CQ */
1923 be_cq_notify(adapter, rx_cq->id, true, 0);
6b7c5b94
SP
1924 }
1925 return work_done;
1926}
1927
f31e50a8
SP
1928/* As TX and MCC share the same EQ check for both TX and MCC completions.
1929 * For TX/MCC we don't honour budget; consume everything
1930 */
1931static int be_poll_tx_mcc(struct napi_struct *napi, int budget)
6b7c5b94 1932{
f31e50a8
SP
1933 struct be_eq_obj *tx_eq = container_of(napi, struct be_eq_obj, napi);
1934 struct be_adapter *adapter =
1935 container_of(tx_eq, struct be_adapter, tx_eq);
3c8def97 1936 struct be_tx_obj *txo;
6b7c5b94 1937 struct be_eth_tx_compl *txcp;
3c8def97
SP
1938 int tx_compl, mcc_compl, status = 0;
1939 u8 i;
1940 u16 num_wrbs;
1941
1942 for_all_tx_queues(adapter, txo, i) {
1943 tx_compl = 0;
1944 num_wrbs = 0;
1945 while ((txcp = be_tx_compl_get(&txo->cq))) {
1946 num_wrbs += be_tx_compl_process(adapter, txo,
1947 AMAP_GET_BITS(struct amap_eth_tx_compl,
1948 wrb_index, txcp));
1949 tx_compl++;
1950 }
1951 if (tx_compl) {
1952 be_cq_notify(adapter, txo->cq.id, true, tx_compl);
1953
1954 atomic_sub(num_wrbs, &txo->q.used);
6b7c5b94 1955
3c8def97
SP
1956 /* As Tx wrbs have been freed up, wake up netdev queue
1957 * if it was stopped due to lack of tx wrbs. */
1958 if (__netif_subqueue_stopped(adapter->netdev, i) &&
1959 atomic_read(&txo->q.used) < txo->q.len / 2) {
1960 netif_wake_subqueue(adapter->netdev, i);
1961 }
1962
ab1594e9 1963 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
ac124ff9 1964 tx_stats(txo)->tx_compl += tx_compl;
ab1594e9 1965 u64_stats_update_end(&tx_stats(txo)->sync_compl);
3c8def97 1966 }
6b7c5b94
SP
1967 }
1968
f31e50a8
SP
1969 mcc_compl = be_process_mcc(adapter, &status);
1970
f31e50a8
SP
1971 if (mcc_compl) {
1972 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
1973 be_cq_notify(adapter, mcc_obj->cq.id, true, mcc_compl);
1974 }
1975
3c8def97 1976 napi_complete(napi);
6b7c5b94 1977
3c8def97 1978 be_eq_notify(adapter, tx_eq->q.id, true, false, 0);
ab1594e9 1979 adapter->drv_stats.tx_events++;
6b7c5b94
SP
1980 return 1;
1981}
1982
d053de91 1983void be_detect_dump_ue(struct be_adapter *adapter)
7c185276 1984{
e1cfb67a
PR
1985 u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
1986 u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
7c185276
AK
1987 u32 i;
1988
72f02485
SP
1989 if (adapter->eeh_err || adapter->ue_detected)
1990 return;
1991
e1cfb67a
PR
1992 if (lancer_chip(adapter)) {
1993 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
1994 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
1995 sliport_err1 = ioread32(adapter->db +
1996 SLIPORT_ERROR1_OFFSET);
1997 sliport_err2 = ioread32(adapter->db +
1998 SLIPORT_ERROR2_OFFSET);
1999 }
2000 } else {
2001 pci_read_config_dword(adapter->pdev,
2002 PCICFG_UE_STATUS_LOW, &ue_lo);
2003 pci_read_config_dword(adapter->pdev,
2004 PCICFG_UE_STATUS_HIGH, &ue_hi);
2005 pci_read_config_dword(adapter->pdev,
2006 PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask);
2007 pci_read_config_dword(adapter->pdev,
2008 PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
2009
2010 ue_lo = (ue_lo & (~ue_lo_mask));
2011 ue_hi = (ue_hi & (~ue_hi_mask));
2012 }
7c185276 2013
e1cfb67a
PR
2014 if (ue_lo || ue_hi ||
2015 sliport_status & SLIPORT_STATUS_ERR_MASK) {
d053de91 2016 adapter->ue_detected = true;
7acc2087 2017 adapter->eeh_err = true;
434b3648
SP
2018 dev_err(&adapter->pdev->dev,
2019 "Unrecoverable error in the card\n");
d053de91
AK
2020 }
2021
e1cfb67a
PR
2022 if (ue_lo) {
2023 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
2024 if (ue_lo & 1)
7c185276
AK
2025 dev_err(&adapter->pdev->dev,
2026 "UE: %s bit set\n", ue_status_low_desc[i]);
2027 }
2028 }
e1cfb67a
PR
2029 if (ue_hi) {
2030 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
2031 if (ue_hi & 1)
7c185276
AK
2032 dev_err(&adapter->pdev->dev,
2033 "UE: %s bit set\n", ue_status_hi_desc[i]);
2034 }
2035 }
2036
e1cfb67a
PR
2037 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2038 dev_err(&adapter->pdev->dev,
2039 "sliport status 0x%x\n", sliport_status);
2040 dev_err(&adapter->pdev->dev,
2041 "sliport error1 0x%x\n", sliport_err1);
2042 dev_err(&adapter->pdev->dev,
2043 "sliport error2 0x%x\n", sliport_err2);
2044 }
7c185276
AK
2045}
2046
8d56ff11
SP
2047static void be_msix_disable(struct be_adapter *adapter)
2048{
ac6a0c4a 2049 if (msix_enabled(adapter)) {
8d56ff11 2050 pci_disable_msix(adapter->pdev);
ac6a0c4a 2051 adapter->num_msix_vec = 0;
3abcdeda
SP
2052 }
2053}
2054
6b7c5b94
SP
2055static void be_msix_enable(struct be_adapter *adapter)
2056{
3abcdeda 2057#define BE_MIN_MSIX_VECTORS (1 + 1) /* Rx + Tx */
ac6a0c4a 2058 int i, status, num_vec;
6b7c5b94 2059
ac6a0c4a 2060 num_vec = be_num_rxqs_want(adapter) + 1;
3abcdeda 2061
ac6a0c4a 2062 for (i = 0; i < num_vec; i++)
6b7c5b94
SP
2063 adapter->msix_entries[i].entry = i;
2064
ac6a0c4a 2065 status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
3abcdeda
SP
2066 if (status == 0) {
2067 goto done;
2068 } else if (status >= BE_MIN_MSIX_VECTORS) {
ac6a0c4a 2069 num_vec = status;
3abcdeda 2070 if (pci_enable_msix(adapter->pdev, adapter->msix_entries,
ac6a0c4a 2071 num_vec) == 0)
3abcdeda 2072 goto done;
3abcdeda
SP
2073 }
2074 return;
2075done:
ac6a0c4a
SP
2076 adapter->num_msix_vec = num_vec;
2077 return;
6b7c5b94
SP
2078}
2079
f9449ab7 2080static int be_sriov_enable(struct be_adapter *adapter)
ba343c77 2081{
344dbf10 2082 be_check_sriov_fn_type(adapter);
6dedec81 2083#ifdef CONFIG_PCI_IOV
ba343c77 2084 if (be_physfn(adapter) && num_vfs) {
81be8f0a
AK
2085 int status, pos;
2086 u16 nvfs;
2087
2088 pos = pci_find_ext_capability(adapter->pdev,
2089 PCI_EXT_CAP_ID_SRIOV);
2090 pci_read_config_word(adapter->pdev,
2091 pos + PCI_SRIOV_TOTAL_VF, &nvfs);
2092
2093 if (num_vfs > nvfs) {
2094 dev_info(&adapter->pdev->dev,
2095 "Device supports %d VFs and not %d\n",
2096 nvfs, num_vfs);
2097 num_vfs = nvfs;
2098 }
6dedec81 2099
ba343c77
SB
2100 status = pci_enable_sriov(adapter->pdev, num_vfs);
2101 adapter->sriov_enabled = status ? false : true;
f9449ab7
SP
2102
2103 if (adapter->sriov_enabled) {
2104 adapter->vf_cfg = kcalloc(num_vfs,
2105 sizeof(struct be_vf_cfg),
2106 GFP_KERNEL);
2107 if (!adapter->vf_cfg)
2108 return -ENOMEM;
2109 }
ba343c77
SB
2110 }
2111#endif
f9449ab7 2112 return 0;
ba343c77
SB
2113}
2114
2115static void be_sriov_disable(struct be_adapter *adapter)
2116{
2117#ifdef CONFIG_PCI_IOV
2118 if (adapter->sriov_enabled) {
2119 pci_disable_sriov(adapter->pdev);
f9449ab7 2120 kfree(adapter->vf_cfg);
ba343c77
SB
2121 adapter->sriov_enabled = false;
2122 }
2123#endif
2124}
2125
fe6d2a38
SP
2126static inline int be_msix_vec_get(struct be_adapter *adapter,
2127 struct be_eq_obj *eq_obj)
6b7c5b94 2128{
ecd62107 2129 return adapter->msix_entries[eq_obj->eq_idx].vector;
6b7c5b94
SP
2130}
2131
b628bde2
SP
2132static int be_request_irq(struct be_adapter *adapter,
2133 struct be_eq_obj *eq_obj,
3abcdeda 2134 void *handler, char *desc, void *context)
6b7c5b94
SP
2135{
2136 struct net_device *netdev = adapter->netdev;
b628bde2
SP
2137 int vec;
2138
2139 sprintf(eq_obj->desc, "%s-%s", netdev->name, desc);
fe6d2a38 2140 vec = be_msix_vec_get(adapter, eq_obj);
3abcdeda 2141 return request_irq(vec, handler, 0, eq_obj->desc, context);
b628bde2
SP
2142}
2143
3abcdeda
SP
2144static void be_free_irq(struct be_adapter *adapter, struct be_eq_obj *eq_obj,
2145 void *context)
b628bde2 2146{
fe6d2a38 2147 int vec = be_msix_vec_get(adapter, eq_obj);
3abcdeda 2148 free_irq(vec, context);
b628bde2 2149}
6b7c5b94 2150
b628bde2
SP
2151static int be_msix_register(struct be_adapter *adapter)
2152{
3abcdeda
SP
2153 struct be_rx_obj *rxo;
2154 int status, i;
2155 char qname[10];
b628bde2 2156
3abcdeda
SP
2157 status = be_request_irq(adapter, &adapter->tx_eq, be_msix_tx_mcc, "tx",
2158 adapter);
6b7c5b94
SP
2159 if (status)
2160 goto err;
2161
3abcdeda
SP
2162 for_all_rx_queues(adapter, rxo, i) {
2163 sprintf(qname, "rxq%d", i);
2164 status = be_request_irq(adapter, &rxo->rx_eq, be_msix_rx,
2165 qname, rxo);
2166 if (status)
2167 goto err_msix;
2168 }
b628bde2 2169
6b7c5b94 2170 return 0;
b628bde2 2171
3abcdeda
SP
2172err_msix:
2173 be_free_irq(adapter, &adapter->tx_eq, adapter);
2174
2175 for (i--, rxo = &adapter->rx_obj[i]; i >= 0; i--, rxo--)
2176 be_free_irq(adapter, &rxo->rx_eq, rxo);
2177
6b7c5b94
SP
2178err:
2179 dev_warn(&adapter->pdev->dev,
2180 "MSIX Request IRQ failed - err %d\n", status);
ac6a0c4a 2181 be_msix_disable(adapter);
6b7c5b94
SP
2182 return status;
2183}
2184
2185static int be_irq_register(struct be_adapter *adapter)
2186{
2187 struct net_device *netdev = adapter->netdev;
2188 int status;
2189
ac6a0c4a 2190 if (msix_enabled(adapter)) {
6b7c5b94
SP
2191 status = be_msix_register(adapter);
2192 if (status == 0)
2193 goto done;
ba343c77
SB
2194 /* INTx is not supported for VF */
2195 if (!be_physfn(adapter))
2196 return status;
6b7c5b94
SP
2197 }
2198
2199 /* INTx */
2200 netdev->irq = adapter->pdev->irq;
2201 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
2202 adapter);
2203 if (status) {
2204 dev_err(&adapter->pdev->dev,
2205 "INTx request IRQ failed - err %d\n", status);
2206 return status;
2207 }
2208done:
2209 adapter->isr_registered = true;
2210 return 0;
2211}
2212
2213static void be_irq_unregister(struct be_adapter *adapter)
2214{
2215 struct net_device *netdev = adapter->netdev;
3abcdeda
SP
2216 struct be_rx_obj *rxo;
2217 int i;
6b7c5b94
SP
2218
2219 if (!adapter->isr_registered)
2220 return;
2221
2222 /* INTx */
ac6a0c4a 2223 if (!msix_enabled(adapter)) {
6b7c5b94
SP
2224 free_irq(netdev->irq, adapter);
2225 goto done;
2226 }
2227
2228 /* MSIx */
3abcdeda
SP
2229 be_free_irq(adapter, &adapter->tx_eq, adapter);
2230
2231 for_all_rx_queues(adapter, rxo, i)
2232 be_free_irq(adapter, &rxo->rx_eq, rxo);
2233
6b7c5b94
SP
2234done:
2235 adapter->isr_registered = false;
6b7c5b94
SP
2236}
2237
482c9e79
SP
2238static void be_rx_queues_clear(struct be_adapter *adapter)
2239{
2240 struct be_queue_info *q;
2241 struct be_rx_obj *rxo;
2242 int i;
2243
2244 for_all_rx_queues(adapter, rxo, i) {
2245 q = &rxo->q;
2246 if (q->created) {
2247 be_cmd_rxq_destroy(adapter, q);
2248 /* After the rxq is invalidated, wait for a grace time
2249 * of 1ms for all dma to end and the flush compl to
2250 * arrive
2251 */
2252 mdelay(1);
2253 be_rx_q_clean(adapter, rxo);
2254 }
2255
2256 /* Clear any residual events */
2257 q = &rxo->rx_eq.q;
2258 if (q->created)
2259 be_eq_clean(adapter, &rxo->rx_eq);
2260 }
2261}
2262
889cd4b2
SP
2263static int be_close(struct net_device *netdev)
2264{
2265 struct be_adapter *adapter = netdev_priv(netdev);
3abcdeda 2266 struct be_rx_obj *rxo;
3c8def97 2267 struct be_tx_obj *txo;
889cd4b2 2268 struct be_eq_obj *tx_eq = &adapter->tx_eq;
3abcdeda 2269 int vec, i;
889cd4b2 2270
889cd4b2
SP
2271 be_async_mcc_disable(adapter);
2272
fe6d2a38
SP
2273 if (!lancer_chip(adapter))
2274 be_intr_set(adapter, false);
889cd4b2 2275
63fcb27f
PR
2276 for_all_rx_queues(adapter, rxo, i)
2277 napi_disable(&rxo->rx_eq.napi);
2278
2279 napi_disable(&tx_eq->napi);
2280
2281 if (lancer_chip(adapter)) {
63fcb27f
PR
2282 be_cq_notify(adapter, adapter->mcc_obj.cq.id, false, 0);
2283 for_all_rx_queues(adapter, rxo, i)
2284 be_cq_notify(adapter, rxo->cq.id, false, 0);
3c8def97
SP
2285 for_all_tx_queues(adapter, txo, i)
2286 be_cq_notify(adapter, txo->cq.id, false, 0);
63fcb27f
PR
2287 }
2288
ac6a0c4a 2289 if (msix_enabled(adapter)) {
fe6d2a38 2290 vec = be_msix_vec_get(adapter, tx_eq);
889cd4b2 2291 synchronize_irq(vec);
3abcdeda
SP
2292
2293 for_all_rx_queues(adapter, rxo, i) {
fe6d2a38 2294 vec = be_msix_vec_get(adapter, &rxo->rx_eq);
3abcdeda
SP
2295 synchronize_irq(vec);
2296 }
889cd4b2
SP
2297 } else {
2298 synchronize_irq(netdev->irq);
2299 }
2300 be_irq_unregister(adapter);
2301
889cd4b2
SP
2302 /* Wait for all pending tx completions to arrive so that
2303 * all tx skbs are freed.
2304 */
3c8def97
SP
2305 for_all_tx_queues(adapter, txo, i)
2306 be_tx_compl_clean(adapter, txo);
889cd4b2 2307
482c9e79
SP
2308 be_rx_queues_clear(adapter);
2309 return 0;
2310}
2311
2312static int be_rx_queues_setup(struct be_adapter *adapter)
2313{
2314 struct be_rx_obj *rxo;
2315 int rc, i;
2316 u8 rsstable[MAX_RSS_QS];
2317
2318 for_all_rx_queues(adapter, rxo, i) {
2319 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
2320 rx_frag_size, BE_MAX_JUMBO_FRAME_SIZE,
2321 adapter->if_handle,
2322 (i > 0) ? 1 : 0/* rss enable */, &rxo->rss_id);
2323 if (rc)
2324 return rc;
2325 }
2326
2327 if (be_multi_rxq(adapter)) {
2328 for_all_rss_queues(adapter, rxo, i)
2329 rsstable[i] = rxo->rss_id;
2330
2331 rc = be_cmd_rss_config(adapter, rsstable,
2332 adapter->num_rx_qs - 1);
2333 if (rc)
2334 return rc;
2335 }
2336
2337 /* First time posting */
2338 for_all_rx_queues(adapter, rxo, i) {
2339 be_post_rx_frags(rxo, GFP_KERNEL);
2340 napi_enable(&rxo->rx_eq.napi);
2341 }
889cd4b2
SP
2342 return 0;
2343}
2344
6b7c5b94
SP
2345static int be_open(struct net_device *netdev)
2346{
2347 struct be_adapter *adapter = netdev_priv(netdev);
6b7c5b94 2348 struct be_eq_obj *tx_eq = &adapter->tx_eq;
3abcdeda 2349 struct be_rx_obj *rxo;
3abcdeda 2350 int status, i;
5fb379ee 2351
482c9e79
SP
2352 status = be_rx_queues_setup(adapter);
2353 if (status)
2354 goto err;
2355
5fb379ee
SP
2356 napi_enable(&tx_eq->napi);
2357
2358 be_irq_register(adapter);
2359
fe6d2a38
SP
2360 if (!lancer_chip(adapter))
2361 be_intr_set(adapter, true);
5fb379ee
SP
2362
2363 /* The evt queues are created in unarmed state; arm them */
3abcdeda
SP
2364 for_all_rx_queues(adapter, rxo, i) {
2365 be_eq_notify(adapter, rxo->rx_eq.q.id, true, false, 0);
2366 be_cq_notify(adapter, rxo->cq.id, true, 0);
2367 }
8788fdc2 2368 be_eq_notify(adapter, tx_eq->q.id, true, false, 0);
5fb379ee 2369
7a1e9b20
SP
2370 /* Now that interrupts are on we can process async mcc */
2371 be_async_mcc_enable(adapter);
2372
889cd4b2
SP
2373 return 0;
2374err:
2375 be_close(adapter->netdev);
2376 return -EIO;
5fb379ee
SP
2377}
2378
71d8d1b5
AK
2379static int be_setup_wol(struct be_adapter *adapter, bool enable)
2380{
2381 struct be_dma_mem cmd;
2382 int status = 0;
2383 u8 mac[ETH_ALEN];
2384
2385 memset(mac, 0, ETH_ALEN);
2386
2387 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
2b7bcebf
IV
2388 cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2389 GFP_KERNEL);
71d8d1b5
AK
2390 if (cmd.va == NULL)
2391 return -1;
2392 memset(cmd.va, 0, cmd.size);
2393
2394 if (enable) {
2395 status = pci_write_config_dword(adapter->pdev,
2396 PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2397 if (status) {
2398 dev_err(&adapter->pdev->dev,
2381a55c 2399 "Could not enable Wake-on-lan\n");
2b7bcebf
IV
2400 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2401 cmd.dma);
71d8d1b5
AK
2402 return status;
2403 }
2404 status = be_cmd_enable_magic_wol(adapter,
2405 adapter->netdev->dev_addr, &cmd);
2406 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2407 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2408 } else {
2409 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2410 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2411 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2412 }
2413
2b7bcebf 2414 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
71d8d1b5
AK
2415 return status;
2416}
2417
6d87f5c3
AK
2418/*
2419 * Generate a seed MAC address from the PF MAC Address using jhash.
2420 * MAC Address for VFs are assigned incrementally starting from the seed.
2421 * These addresses are programmed in the ASIC by the PF and the VF driver
2422 * queries for the MAC address during its probe.
2423 */
2424static inline int be_vf_eth_addr_config(struct be_adapter *adapter)
2425{
f9449ab7 2426 u32 vf;
3abcdeda 2427 int status = 0;
6d87f5c3
AK
2428 u8 mac[ETH_ALEN];
2429
2430 be_vf_eth_addr_generate(adapter, mac);
2431
2432 for (vf = 0; vf < num_vfs; vf++) {
590c391d
PR
2433 if (lancer_chip(adapter)) {
2434 status = be_cmd_set_mac_list(adapter, mac, 1, vf + 1);
2435 } else {
2436 status = be_cmd_pmac_add(adapter, mac,
6d87f5c3 2437 adapter->vf_cfg[vf].vf_if_handle,
f8617e08
AK
2438 &adapter->vf_cfg[vf].vf_pmac_id,
2439 vf + 1);
590c391d
PR
2440 }
2441
6d87f5c3
AK
2442 if (status)
2443 dev_err(&adapter->pdev->dev,
590c391d 2444 "Mac address assignment failed for VF %d\n", vf);
6d87f5c3
AK
2445 else
2446 memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
2447
2448 mac[5] += 1;
2449 }
2450 return status;
2451}
2452
f9449ab7 2453static void be_vf_clear(struct be_adapter *adapter)
6d87f5c3
AK
2454{
2455 u32 vf;
2456
590c391d
PR
2457 for (vf = 0; vf < num_vfs; vf++) {
2458 if (lancer_chip(adapter))
2459 be_cmd_set_mac_list(adapter, NULL, 0, vf + 1);
2460 else
2461 be_cmd_pmac_del(adapter,
2462 adapter->vf_cfg[vf].vf_if_handle,
2463 adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
2464 }
f9449ab7
SP
2465
2466 for (vf = 0; vf < num_vfs; vf++)
30128031
SP
2467 be_cmd_if_destroy(adapter, adapter->vf_cfg[vf].vf_if_handle,
2468 vf + 1);
6d87f5c3
AK
2469}
2470
a54769f5
SP
2471static int be_clear(struct be_adapter *adapter)
2472{
a54769f5 2473 if (be_physfn(adapter) && adapter->sriov_enabled)
f9449ab7
SP
2474 be_vf_clear(adapter);
2475
2476 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
a54769f5
SP
2477
2478 be_mcc_queues_destroy(adapter);
2479 be_rx_queues_destroy(adapter);
2480 be_tx_queues_destroy(adapter);
a54769f5
SP
2481
2482 /* tell fw we're done with firing cmds */
2483 be_cmd_fw_clean(adapter);
2484 return 0;
2485}
2486
30128031
SP
2487static void be_vf_setup_init(struct be_adapter *adapter)
2488{
2489 int vf;
2490
2491 for (vf = 0; vf < num_vfs; vf++) {
2492 adapter->vf_cfg[vf].vf_if_handle = -1;
2493 adapter->vf_cfg[vf].vf_pmac_id = -1;
2494 }
2495}
2496
f9449ab7
SP
2497static int be_vf_setup(struct be_adapter *adapter)
2498{
2499 u32 cap_flags, en_flags, vf;
2500 u16 lnk_speed;
2501 int status;
2502
30128031
SP
2503 be_vf_setup_init(adapter);
2504
590c391d
PR
2505 cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2506 BE_IF_FLAGS_MULTICAST;
2507
f9449ab7
SP
2508 for (vf = 0; vf < num_vfs; vf++) {
2509 status = be_cmd_if_create(adapter, cap_flags, en_flags, NULL,
2510 &adapter->vf_cfg[vf].vf_if_handle,
2511 NULL, vf+1);
2512 if (status)
2513 goto err;
f9449ab7
SP
2514 }
2515
590c391d
PR
2516 status = be_vf_eth_addr_config(adapter);
2517 if (status)
2518 goto err;
f9449ab7
SP
2519
2520 for (vf = 0; vf < num_vfs; vf++) {
2521 status = be_cmd_link_status_query(adapter, NULL, &lnk_speed,
2522 vf + 1);
2523 if (status)
2524 goto err;
2525 adapter->vf_cfg[vf].vf_tx_rate = lnk_speed * 10;
2526 }
2527 return 0;
2528err:
2529 return status;
2530}
2531
30128031
SP
2532static void be_setup_init(struct be_adapter *adapter)
2533{
2534 adapter->vlan_prio_bmap = 0xff;
2535 adapter->link_speed = -1;
2536 adapter->if_handle = -1;
2537 adapter->be3_native = false;
2538 adapter->promiscuous = false;
2539 adapter->eq_next_idx = 0;
2540}
2541
590c391d
PR
2542static int be_configure_mac_from_list(struct be_adapter *adapter, u8 *mac)
2543{
2544 u32 pmac_id;
2545 int status = be_cmd_get_mac_from_list(adapter, 0, &pmac_id);
2546 if (status != 0)
2547 goto do_none;
2548 status = be_cmd_mac_addr_query(adapter, mac,
2549 MAC_ADDRESS_TYPE_NETWORK,
2550 false, adapter->if_handle, pmac_id);
2551 if (status != 0)
2552 goto do_none;
2553 status = be_cmd_pmac_add(adapter, mac, adapter->if_handle,
2554 &adapter->pmac_id, 0);
2555do_none:
2556 return status;
2557}
2558
5fb379ee
SP
2559static int be_setup(struct be_adapter *adapter)
2560{
5fb379ee 2561 struct net_device *netdev = adapter->netdev;
f9449ab7 2562 u32 cap_flags, en_flags;
a54769f5 2563 u32 tx_fc, rx_fc;
293c4a7d 2564 int status, i;
ba343c77 2565 u8 mac[ETH_ALEN];
293c4a7d 2566 struct be_tx_obj *txo;
ba343c77 2567
30128031 2568 be_setup_init(adapter);
6b7c5b94 2569
f9449ab7 2570 be_cmd_req_native_mode(adapter);
73d540f2 2571
f9449ab7 2572 status = be_tx_queues_create(adapter);
6b7c5b94 2573 if (status != 0)
a54769f5 2574 goto err;
6b7c5b94 2575
f9449ab7 2576 status = be_rx_queues_create(adapter);
6b7c5b94 2577 if (status != 0)
a54769f5 2578 goto err;
6b7c5b94 2579
f9449ab7 2580 status = be_mcc_queues_create(adapter);
6b7c5b94 2581 if (status != 0)
a54769f5 2582 goto err;
6b7c5b94 2583
f9449ab7
SP
2584 memset(mac, 0, ETH_ALEN);
2585 status = be_cmd_mac_addr_query(adapter, mac, MAC_ADDRESS_TYPE_NETWORK,
590c391d 2586 true /*permanent */, 0, 0);
f9449ab7
SP
2587 if (status)
2588 return status;
2589 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2590 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2903dd65 2591
f9449ab7
SP
2592 en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2593 BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
2594 cap_flags = en_flags | BE_IF_FLAGS_MCAST_PROMISCUOUS |
5d5adb93
PR
2595 BE_IF_FLAGS_VLAN_PROMISCUOUS | BE_IF_FLAGS_PROMISCUOUS;
2596
f9449ab7
SP
2597 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS) {
2598 cap_flags |= BE_IF_FLAGS_RSS;
2599 en_flags |= BE_IF_FLAGS_RSS;
2600 }
2601 status = be_cmd_if_create(adapter, cap_flags, en_flags,
2602 netdev->dev_addr, &adapter->if_handle,
2603 &adapter->pmac_id, 0);
5fb379ee 2604 if (status != 0)
a54769f5 2605 goto err;
6b7c5b94 2606
293c4a7d
PR
2607 for_all_tx_queues(adapter, txo, i) {
2608 status = be_cmd_txq_create(adapter, &txo->q, &txo->cq);
2609 if (status)
2610 goto err;
2611 }
2612
590c391d
PR
2613 /* The VF's permanent mac queried from card is incorrect.
2614 * For BEx: Query the mac configued by the PF using if_handle
2615 * For Lancer: Get and use mac_list to obtain mac address.
2616 */
2617 if (!be_physfn(adapter)) {
2618 if (lancer_chip(adapter))
2619 status = be_configure_mac_from_list(adapter, mac);
2620 else
2621 status = be_cmd_mac_addr_query(adapter, mac,
2622 MAC_ADDRESS_TYPE_NETWORK, false,
2623 adapter->if_handle, 0);
f9449ab7
SP
2624 if (!status) {
2625 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2626 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2627 }
2628 }
0dffc83e 2629
04b71175 2630 be_cmd_get_fw_ver(adapter, adapter->fw_ver, NULL);
5a56eb10 2631
a54769f5
SP
2632 status = be_vid_config(adapter, false, 0);
2633 if (status)
2634 goto err;
7ab8b0b4 2635
a54769f5 2636 be_set_rx_mode(adapter->netdev);
5fb379ee 2637
a54769f5 2638 status = be_cmd_get_flow_control(adapter, &tx_fc, &rx_fc);
590c391d
PR
2639 /* For Lancer: It is legal for this cmd to fail on VF */
2640 if (status && (be_physfn(adapter) || !lancer_chip(adapter)))
a54769f5 2641 goto err;
590c391d 2642
a54769f5
SP
2643 if (rx_fc != adapter->rx_fc || tx_fc != adapter->tx_fc) {
2644 status = be_cmd_set_flow_control(adapter, adapter->tx_fc,
2645 adapter->rx_fc);
590c391d
PR
2646 /* For Lancer: It is legal for this cmd to fail on VF */
2647 if (status && (be_physfn(adapter) || !lancer_chip(adapter)))
a54769f5
SP
2648 goto err;
2649 }
2dc1deb6 2650
a54769f5 2651 pcie_set_readrq(adapter->pdev, 4096);
5fb379ee 2652
f9449ab7
SP
2653 if (be_physfn(adapter) && adapter->sriov_enabled) {
2654 status = be_vf_setup(adapter);
2655 if (status)
2656 goto err;
2657 }
2658
2659 return 0;
a54769f5
SP
2660err:
2661 be_clear(adapter);
2662 return status;
2663}
6b7c5b94 2664
84517482 2665#define FW_FILE_HDR_SIGN "ServerEngines Corp. "
fa9a6fed 2666static bool be_flash_redboot(struct be_adapter *adapter,
3f0d4560
AK
2667 const u8 *p, u32 img_start, int image_size,
2668 int hdr_size)
fa9a6fed
SB
2669{
2670 u32 crc_offset;
2671 u8 flashed_crc[4];
2672 int status;
3f0d4560
AK
2673
2674 crc_offset = hdr_size + img_start + image_size - 4;
2675
fa9a6fed 2676 p += crc_offset;
3f0d4560
AK
2677
2678 status = be_cmd_get_flash_crc(adapter, flashed_crc,
f510fc64 2679 (image_size - 4));
fa9a6fed
SB
2680 if (status) {
2681 dev_err(&adapter->pdev->dev,
2682 "could not get crc from flash, not flashing redboot\n");
2683 return false;
2684 }
2685
2686 /*update redboot only if crc does not match*/
2687 if (!memcmp(flashed_crc, p, 4))
2688 return false;
2689 else
2690 return true;
fa9a6fed
SB
2691}
2692
306f1348
SP
2693static bool phy_flashing_required(struct be_adapter *adapter)
2694{
2695 int status = 0;
2696 struct be_phy_info phy_info;
2697
2698 status = be_cmd_get_phy_info(adapter, &phy_info);
2699 if (status)
2700 return false;
2701 if ((phy_info.phy_type == TN_8022) &&
2702 (phy_info.interface_type == PHY_TYPE_BASET_10GB)) {
2703 return true;
2704 }
2705 return false;
2706}
2707
3f0d4560 2708static int be_flash_data(struct be_adapter *adapter,
84517482 2709 const struct firmware *fw,
3f0d4560
AK
2710 struct be_dma_mem *flash_cmd, int num_of_images)
2711
84517482 2712{
3f0d4560
AK
2713 int status = 0, i, filehdr_size = 0;
2714 u32 total_bytes = 0, flash_op;
84517482
AK
2715 int num_bytes;
2716 const u8 *p = fw->data;
2717 struct be_cmd_write_flashrom *req = flash_cmd->va;
215faf9c 2718 const struct flash_comp *pflashcomp;
9fe96934 2719 int num_comp;
3f0d4560 2720
306f1348 2721 static const struct flash_comp gen3_flash_types[10] = {
3f0d4560
AK
2722 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, IMG_TYPE_ISCSI_ACTIVE,
2723 FLASH_IMAGE_MAX_SIZE_g3},
2724 { FLASH_REDBOOT_START_g3, IMG_TYPE_REDBOOT,
2725 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3},
2726 { FLASH_iSCSI_BIOS_START_g3, IMG_TYPE_BIOS,
2727 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2728 { FLASH_PXE_BIOS_START_g3, IMG_TYPE_PXE_BIOS,
2729 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2730 { FLASH_FCoE_BIOS_START_g3, IMG_TYPE_FCOE_BIOS,
2731 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2732 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, IMG_TYPE_ISCSI_BACKUP,
2733 FLASH_IMAGE_MAX_SIZE_g3},
2734 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, IMG_TYPE_FCOE_FW_ACTIVE,
2735 FLASH_IMAGE_MAX_SIZE_g3},
2736 { FLASH_FCoE_BACKUP_IMAGE_START_g3, IMG_TYPE_FCOE_FW_BACKUP,
9fe96934
SB
2737 FLASH_IMAGE_MAX_SIZE_g3},
2738 { FLASH_NCSI_START_g3, IMG_TYPE_NCSI_FW,
306f1348
SP
2739 FLASH_NCSI_IMAGE_MAX_SIZE_g3},
2740 { FLASH_PHY_FW_START_g3, IMG_TYPE_PHY_FW,
2741 FLASH_PHY_FW_IMAGE_MAX_SIZE_g3}
3f0d4560 2742 };
215faf9c 2743 static const struct flash_comp gen2_flash_types[8] = {
3f0d4560
AK
2744 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, IMG_TYPE_ISCSI_ACTIVE,
2745 FLASH_IMAGE_MAX_SIZE_g2},
2746 { FLASH_REDBOOT_START_g2, IMG_TYPE_REDBOOT,
2747 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2},
2748 { FLASH_iSCSI_BIOS_START_g2, IMG_TYPE_BIOS,
2749 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2750 { FLASH_PXE_BIOS_START_g2, IMG_TYPE_PXE_BIOS,
2751 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2752 { FLASH_FCoE_BIOS_START_g2, IMG_TYPE_FCOE_BIOS,
2753 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2754 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, IMG_TYPE_ISCSI_BACKUP,
2755 FLASH_IMAGE_MAX_SIZE_g2},
2756 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, IMG_TYPE_FCOE_FW_ACTIVE,
2757 FLASH_IMAGE_MAX_SIZE_g2},
2758 { FLASH_FCoE_BACKUP_IMAGE_START_g2, IMG_TYPE_FCOE_FW_BACKUP,
2759 FLASH_IMAGE_MAX_SIZE_g2}
2760 };
2761
2762 if (adapter->generation == BE_GEN3) {
2763 pflashcomp = gen3_flash_types;
2764 filehdr_size = sizeof(struct flash_file_hdr_g3);
215faf9c 2765 num_comp = ARRAY_SIZE(gen3_flash_types);
3f0d4560
AK
2766 } else {
2767 pflashcomp = gen2_flash_types;
2768 filehdr_size = sizeof(struct flash_file_hdr_g2);
215faf9c 2769 num_comp = ARRAY_SIZE(gen2_flash_types);
84517482 2770 }
9fe96934
SB
2771 for (i = 0; i < num_comp; i++) {
2772 if ((pflashcomp[i].optype == IMG_TYPE_NCSI_FW) &&
2773 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
2774 continue;
306f1348
SP
2775 if (pflashcomp[i].optype == IMG_TYPE_PHY_FW) {
2776 if (!phy_flashing_required(adapter))
2777 continue;
2778 }
3f0d4560
AK
2779 if ((pflashcomp[i].optype == IMG_TYPE_REDBOOT) &&
2780 (!be_flash_redboot(adapter, fw->data,
fae21a4d
AK
2781 pflashcomp[i].offset, pflashcomp[i].size, filehdr_size +
2782 (num_of_images * sizeof(struct image_hdr)))))
3f0d4560
AK
2783 continue;
2784 p = fw->data;
2785 p += filehdr_size + pflashcomp[i].offset
2786 + (num_of_images * sizeof(struct image_hdr));
306f1348
SP
2787 if (p + pflashcomp[i].size > fw->data + fw->size)
2788 return -1;
2789 total_bytes = pflashcomp[i].size;
3f0d4560
AK
2790 while (total_bytes) {
2791 if (total_bytes > 32*1024)
2792 num_bytes = 32*1024;
2793 else
2794 num_bytes = total_bytes;
2795 total_bytes -= num_bytes;
306f1348
SP
2796 if (!total_bytes) {
2797 if (pflashcomp[i].optype == IMG_TYPE_PHY_FW)
2798 flash_op = FLASHROM_OPER_PHY_FLASH;
2799 else
2800 flash_op = FLASHROM_OPER_FLASH;
2801 } else {
2802 if (pflashcomp[i].optype == IMG_TYPE_PHY_FW)
2803 flash_op = FLASHROM_OPER_PHY_SAVE;
2804 else
2805 flash_op = FLASHROM_OPER_SAVE;
2806 }
3f0d4560
AK
2807 memcpy(req->params.data_buf, p, num_bytes);
2808 p += num_bytes;
2809 status = be_cmd_write_flashrom(adapter, flash_cmd,
2810 pflashcomp[i].optype, flash_op, num_bytes);
2811 if (status) {
306f1348
SP
2812 if ((status == ILLEGAL_IOCTL_REQ) &&
2813 (pflashcomp[i].optype ==
2814 IMG_TYPE_PHY_FW))
2815 break;
3f0d4560
AK
2816 dev_err(&adapter->pdev->dev,
2817 "cmd to write to flash rom failed.\n");
2818 return -1;
2819 }
84517482 2820 }
84517482 2821 }
84517482
AK
2822 return 0;
2823}
2824
3f0d4560
AK
2825static int get_ufigen_type(struct flash_file_hdr_g2 *fhdr)
2826{
2827 if (fhdr == NULL)
2828 return 0;
2829 if (fhdr->build[0] == '3')
2830 return BE_GEN3;
2831 else if (fhdr->build[0] == '2')
2832 return BE_GEN2;
2833 else
2834 return 0;
2835}
2836
485bf569
SN
2837static int lancer_fw_download(struct be_adapter *adapter,
2838 const struct firmware *fw)
84517482 2839{
485bf569
SN
2840#define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024)
2841#define LANCER_FW_DOWNLOAD_LOCATION "/prg"
84517482 2842 struct be_dma_mem flash_cmd;
485bf569
SN
2843 const u8 *data_ptr = NULL;
2844 u8 *dest_image_ptr = NULL;
2845 size_t image_size = 0;
2846 u32 chunk_size = 0;
2847 u32 data_written = 0;
2848 u32 offset = 0;
2849 int status = 0;
2850 u8 add_status = 0;
84517482 2851
485bf569 2852 if (!IS_ALIGNED(fw->size, sizeof(u32))) {
d9efd2af 2853 dev_err(&adapter->pdev->dev,
485bf569
SN
2854 "FW Image not properly aligned. "
2855 "Length must be 4 byte aligned.\n");
2856 status = -EINVAL;
2857 goto lancer_fw_exit;
d9efd2af
SB
2858 }
2859
485bf569
SN
2860 flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
2861 + LANCER_FW_DOWNLOAD_CHUNK;
2862 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
2863 &flash_cmd.dma, GFP_KERNEL);
2864 if (!flash_cmd.va) {
2865 status = -ENOMEM;
2866 dev_err(&adapter->pdev->dev,
2867 "Memory allocation failure while flashing\n");
2868 goto lancer_fw_exit;
2869 }
84517482 2870
485bf569
SN
2871 dest_image_ptr = flash_cmd.va +
2872 sizeof(struct lancer_cmd_req_write_object);
2873 image_size = fw->size;
2874 data_ptr = fw->data;
2875
2876 while (image_size) {
2877 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
2878
2879 /* Copy the image chunk content. */
2880 memcpy(dest_image_ptr, data_ptr, chunk_size);
2881
2882 status = lancer_cmd_write_object(adapter, &flash_cmd,
2883 chunk_size, offset, LANCER_FW_DOWNLOAD_LOCATION,
2884 &data_written, &add_status);
2885
2886 if (status)
2887 break;
2888
2889 offset += data_written;
2890 data_ptr += data_written;
2891 image_size -= data_written;
2892 }
2893
2894 if (!status) {
2895 /* Commit the FW written */
2896 status = lancer_cmd_write_object(adapter, &flash_cmd,
2897 0, offset, LANCER_FW_DOWNLOAD_LOCATION,
2898 &data_written, &add_status);
2899 }
2900
2901 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
2902 flash_cmd.dma);
2903 if (status) {
2904 dev_err(&adapter->pdev->dev,
2905 "Firmware load error. "
2906 "Status code: 0x%x Additional Status: 0x%x\n",
2907 status, add_status);
2908 goto lancer_fw_exit;
2909 }
2910
2911 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
2912lancer_fw_exit:
2913 return status;
2914}
2915
2916static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
2917{
2918 struct flash_file_hdr_g2 *fhdr;
2919 struct flash_file_hdr_g3 *fhdr3;
2920 struct image_hdr *img_hdr_ptr = NULL;
2921 struct be_dma_mem flash_cmd;
2922 const u8 *p;
2923 int status = 0, i = 0, num_imgs = 0;
84517482
AK
2924
2925 p = fw->data;
3f0d4560 2926 fhdr = (struct flash_file_hdr_g2 *) p;
84517482 2927
84517482 2928 flash_cmd.size = sizeof(struct be_cmd_write_flashrom) + 32*1024;
2b7bcebf
IV
2929 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
2930 &flash_cmd.dma, GFP_KERNEL);
84517482
AK
2931 if (!flash_cmd.va) {
2932 status = -ENOMEM;
2933 dev_err(&adapter->pdev->dev,
2934 "Memory allocation failure while flashing\n");
485bf569 2935 goto be_fw_exit;
84517482
AK
2936 }
2937
3f0d4560
AK
2938 if ((adapter->generation == BE_GEN3) &&
2939 (get_ufigen_type(fhdr) == BE_GEN3)) {
2940 fhdr3 = (struct flash_file_hdr_g3 *) fw->data;
8b93b710
AK
2941 num_imgs = le32_to_cpu(fhdr3->num_imgs);
2942 for (i = 0; i < num_imgs; i++) {
3f0d4560
AK
2943 img_hdr_ptr = (struct image_hdr *) (fw->data +
2944 (sizeof(struct flash_file_hdr_g3) +
8b93b710
AK
2945 i * sizeof(struct image_hdr)));
2946 if (le32_to_cpu(img_hdr_ptr->imageid) == 1)
2947 status = be_flash_data(adapter, fw, &flash_cmd,
2948 num_imgs);
3f0d4560
AK
2949 }
2950 } else if ((adapter->generation == BE_GEN2) &&
2951 (get_ufigen_type(fhdr) == BE_GEN2)) {
2952 status = be_flash_data(adapter, fw, &flash_cmd, 0);
2953 } else {
2954 dev_err(&adapter->pdev->dev,
2955 "UFI and Interface are not compatible for flashing\n");
2956 status = -1;
84517482
AK
2957 }
2958
2b7bcebf
IV
2959 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
2960 flash_cmd.dma);
84517482
AK
2961 if (status) {
2962 dev_err(&adapter->pdev->dev, "Firmware load error\n");
485bf569 2963 goto be_fw_exit;
84517482
AK
2964 }
2965
af901ca1 2966 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
84517482 2967
485bf569
SN
2968be_fw_exit:
2969 return status;
2970}
2971
2972int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
2973{
2974 const struct firmware *fw;
2975 int status;
2976
2977 if (!netif_running(adapter->netdev)) {
2978 dev_err(&adapter->pdev->dev,
2979 "Firmware load not allowed (interface is down)\n");
2980 return -1;
2981 }
2982
2983 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
2984 if (status)
2985 goto fw_exit;
2986
2987 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
2988
2989 if (lancer_chip(adapter))
2990 status = lancer_fw_download(adapter, fw);
2991 else
2992 status = be_fw_download(adapter, fw);
2993
84517482
AK
2994fw_exit:
2995 release_firmware(fw);
2996 return status;
2997}
2998
6b7c5b94
SP
2999static struct net_device_ops be_netdev_ops = {
3000 .ndo_open = be_open,
3001 .ndo_stop = be_close,
3002 .ndo_start_xmit = be_xmit,
a54769f5 3003 .ndo_set_rx_mode = be_set_rx_mode,
6b7c5b94
SP
3004 .ndo_set_mac_address = be_mac_addr_set,
3005 .ndo_change_mtu = be_change_mtu,
ab1594e9 3006 .ndo_get_stats64 = be_get_stats64,
6b7c5b94 3007 .ndo_validate_addr = eth_validate_addr,
6b7c5b94
SP
3008 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
3009 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
64600ea5 3010 .ndo_set_vf_mac = be_set_vf_mac,
1da87b7f 3011 .ndo_set_vf_vlan = be_set_vf_vlan,
e1d18735 3012 .ndo_set_vf_tx_rate = be_set_vf_tx_rate,
64600ea5 3013 .ndo_get_vf_config = be_get_vf_config
6b7c5b94
SP
3014};
3015
3016static void be_netdev_init(struct net_device *netdev)
3017{
3018 struct be_adapter *adapter = netdev_priv(netdev);
3abcdeda
SP
3019 struct be_rx_obj *rxo;
3020 int i;
6b7c5b94 3021
6332c8d3 3022 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
8b8ddc68
MM
3023 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
3024 NETIF_F_HW_VLAN_TX;
3025 if (be_multi_rxq(adapter))
3026 netdev->hw_features |= NETIF_F_RXHASH;
6332c8d3
MM
3027
3028 netdev->features |= netdev->hw_features |
8b8ddc68 3029 NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
4b972914 3030
eb8a50d9 3031 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
79032644 3032 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
51c59870 3033
6b7c5b94
SP
3034 netdev->flags |= IFF_MULTICAST;
3035
c190e3c8
AK
3036 netif_set_gso_max_size(netdev, 65535);
3037
6b7c5b94
SP
3038 BE_SET_NETDEV_OPS(netdev, &be_netdev_ops);
3039
3040 SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
3041
3abcdeda
SP
3042 for_all_rx_queues(adapter, rxo, i)
3043 netif_napi_add(netdev, &rxo->rx_eq.napi, be_poll_rx,
3044 BE_NAPI_WEIGHT);
3045
5fb379ee 3046 netif_napi_add(netdev, &adapter->tx_eq.napi, be_poll_tx_mcc,
6b7c5b94 3047 BE_NAPI_WEIGHT);
6b7c5b94
SP
3048}
3049
3050static void be_unmap_pci_bars(struct be_adapter *adapter)
3051{
8788fdc2
SP
3052 if (adapter->csr)
3053 iounmap(adapter->csr);
3054 if (adapter->db)
3055 iounmap(adapter->db);
6b7c5b94
SP
3056}
3057
3058static int be_map_pci_bars(struct be_adapter *adapter)
3059{
3060 u8 __iomem *addr;
db3ea781 3061 int db_reg;
6b7c5b94 3062
fe6d2a38
SP
3063 if (lancer_chip(adapter)) {
3064 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 0),
3065 pci_resource_len(adapter->pdev, 0));
3066 if (addr == NULL)
3067 return -ENOMEM;
3068 adapter->db = addr;
3069 return 0;
3070 }
3071
ba343c77
SB
3072 if (be_physfn(adapter)) {
3073 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 2),
3074 pci_resource_len(adapter->pdev, 2));
3075 if (addr == NULL)
3076 return -ENOMEM;
3077 adapter->csr = addr;
3078 }
6b7c5b94 3079
ba343c77 3080 if (adapter->generation == BE_GEN2) {
ba343c77
SB
3081 db_reg = 4;
3082 } else {
ba343c77
SB
3083 if (be_physfn(adapter))
3084 db_reg = 4;
3085 else
3086 db_reg = 0;
3087 }
3088 addr = ioremap_nocache(pci_resource_start(adapter->pdev, db_reg),
3089 pci_resource_len(adapter->pdev, db_reg));
6b7c5b94
SP
3090 if (addr == NULL)
3091 goto pci_map_err;
ba343c77
SB
3092 adapter->db = addr;
3093
6b7c5b94
SP
3094 return 0;
3095pci_map_err:
3096 be_unmap_pci_bars(adapter);
3097 return -ENOMEM;
3098}
3099
3100
3101static void be_ctrl_cleanup(struct be_adapter *adapter)
3102{
8788fdc2 3103 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
6b7c5b94
SP
3104
3105 be_unmap_pci_bars(adapter);
3106
3107 if (mem->va)
2b7bcebf
IV
3108 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3109 mem->dma);
e7b909a6 3110
5b8821b7 3111 mem = &adapter->rx_filter;
e7b909a6 3112 if (mem->va)
2b7bcebf
IV
3113 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3114 mem->dma);
6b7c5b94
SP
3115}
3116
6b7c5b94
SP
3117static int be_ctrl_init(struct be_adapter *adapter)
3118{
8788fdc2
SP
3119 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
3120 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
5b8821b7 3121 struct be_dma_mem *rx_filter = &adapter->rx_filter;
6b7c5b94 3122 int status;
6b7c5b94
SP
3123
3124 status = be_map_pci_bars(adapter);
3125 if (status)
e7b909a6 3126 goto done;
6b7c5b94
SP
3127
3128 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
2b7bcebf
IV
3129 mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
3130 mbox_mem_alloc->size,
3131 &mbox_mem_alloc->dma,
3132 GFP_KERNEL);
6b7c5b94 3133 if (!mbox_mem_alloc->va) {
e7b909a6
SP
3134 status = -ENOMEM;
3135 goto unmap_pci_bars;
6b7c5b94
SP
3136 }
3137 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
3138 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
3139 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
3140 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
e7b909a6 3141
5b8821b7
SP
3142 rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
3143 rx_filter->va = dma_alloc_coherent(&adapter->pdev->dev, rx_filter->size,
3144 &rx_filter->dma, GFP_KERNEL);
3145 if (rx_filter->va == NULL) {
e7b909a6
SP
3146 status = -ENOMEM;
3147 goto free_mbox;
3148 }
5b8821b7 3149 memset(rx_filter->va, 0, rx_filter->size);
e7b909a6 3150
2984961c 3151 mutex_init(&adapter->mbox_lock);
8788fdc2
SP
3152 spin_lock_init(&adapter->mcc_lock);
3153 spin_lock_init(&adapter->mcc_cq_lock);
a8f447bd 3154
dd131e76 3155 init_completion(&adapter->flash_compl);
cf588477 3156 pci_save_state(adapter->pdev);
6b7c5b94 3157 return 0;
e7b909a6
SP
3158
3159free_mbox:
2b7bcebf
IV
3160 dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
3161 mbox_mem_alloc->va, mbox_mem_alloc->dma);
e7b909a6
SP
3162
3163unmap_pci_bars:
3164 be_unmap_pci_bars(adapter);
3165
3166done:
3167 return status;
6b7c5b94
SP
3168}
3169
3170static void be_stats_cleanup(struct be_adapter *adapter)
3171{
3abcdeda 3172 struct be_dma_mem *cmd = &adapter->stats_cmd;
6b7c5b94
SP
3173
3174 if (cmd->va)
2b7bcebf
IV
3175 dma_free_coherent(&adapter->pdev->dev, cmd->size,
3176 cmd->va, cmd->dma);
6b7c5b94
SP
3177}
3178
3179static int be_stats_init(struct be_adapter *adapter)
3180{
3abcdeda 3181 struct be_dma_mem *cmd = &adapter->stats_cmd;
6b7c5b94 3182
005d5696 3183 if (adapter->generation == BE_GEN2) {
89a88ab8 3184 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
005d5696
SX
3185 } else {
3186 if (lancer_chip(adapter))
3187 cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
3188 else
3189 cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
3190 }
2b7bcebf
IV
3191 cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
3192 GFP_KERNEL);
6b7c5b94
SP
3193 if (cmd->va == NULL)
3194 return -1;
d291b9af 3195 memset(cmd->va, 0, cmd->size);
6b7c5b94
SP
3196 return 0;
3197}
3198
3199static void __devexit be_remove(struct pci_dev *pdev)
3200{
3201 struct be_adapter *adapter = pci_get_drvdata(pdev);
8d56ff11 3202
6b7c5b94
SP
3203 if (!adapter)
3204 return;
3205
f203af70
SK
3206 cancel_delayed_work_sync(&adapter->work);
3207
6b7c5b94
SP
3208 unregister_netdev(adapter->netdev);
3209
5fb379ee
SP
3210 be_clear(adapter);
3211
6b7c5b94
SP
3212 be_stats_cleanup(adapter);
3213
3214 be_ctrl_cleanup(adapter);
3215
ba343c77
SB
3216 be_sriov_disable(adapter);
3217
8d56ff11 3218 be_msix_disable(adapter);
6b7c5b94
SP
3219
3220 pci_set_drvdata(pdev, NULL);
3221 pci_release_regions(pdev);
3222 pci_disable_device(pdev);
3223
3224 free_netdev(adapter->netdev);
3225}
3226
2243e2e9 3227static int be_get_config(struct be_adapter *adapter)
6b7c5b94 3228{
6b7c5b94
SP
3229 int status;
3230
3abcdeda
SP
3231 status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
3232 &adapter->function_mode, &adapter->function_caps);
43a04fdc
SP
3233 if (status)
3234 return status;
3235
752961a1 3236 if (adapter->function_mode & FLEX10_MODE)
82903e4b
AK
3237 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/4;
3238 else
3239 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
3240
9e1453c5
AK
3241 status = be_cmd_get_cntl_attributes(adapter);
3242 if (status)
3243 return status;
3244
2243e2e9 3245 return 0;
6b7c5b94
SP
3246}
3247
fe6d2a38
SP
3248static int be_dev_family_check(struct be_adapter *adapter)
3249{
3250 struct pci_dev *pdev = adapter->pdev;
3251 u32 sli_intf = 0, if_type;
3252
3253 switch (pdev->device) {
3254 case BE_DEVICE_ID1:
3255 case OC_DEVICE_ID1:
3256 adapter->generation = BE_GEN2;
3257 break;
3258 case BE_DEVICE_ID2:
3259 case OC_DEVICE_ID2:
3260 adapter->generation = BE_GEN3;
3261 break;
3262 case OC_DEVICE_ID3:
12f4d0a8 3263 case OC_DEVICE_ID4:
fe6d2a38
SP
3264 pci_read_config_dword(pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3265 if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
3266 SLI_INTF_IF_TYPE_SHIFT;
3267
3268 if (((sli_intf & SLI_INTF_VALID_MASK) != SLI_INTF_VALID) ||
3269 if_type != 0x02) {
3270 dev_err(&pdev->dev, "SLI_INTF reg val is not valid\n");
3271 return -EINVAL;
3272 }
fe6d2a38
SP
3273 adapter->sli_family = ((sli_intf & SLI_INTF_FAMILY_MASK) >>
3274 SLI_INTF_FAMILY_SHIFT);
3275 adapter->generation = BE_GEN3;
3276 break;
3277 default:
3278 adapter->generation = 0;
3279 }
3280 return 0;
3281}
3282
37eed1cb
PR
3283static int lancer_wait_ready(struct be_adapter *adapter)
3284{
d8110f62 3285#define SLIPORT_READY_TIMEOUT 30
37eed1cb
PR
3286 u32 sliport_status;
3287 int status = 0, i;
3288
3289 for (i = 0; i < SLIPORT_READY_TIMEOUT; i++) {
3290 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3291 if (sliport_status & SLIPORT_STATUS_RDY_MASK)
3292 break;
3293
d8110f62 3294 msleep(1000);
37eed1cb
PR
3295 }
3296
3297 if (i == SLIPORT_READY_TIMEOUT)
3298 status = -1;
3299
3300 return status;
3301}
3302
3303static int lancer_test_and_set_rdy_state(struct be_adapter *adapter)
3304{
3305 int status;
3306 u32 sliport_status, err, reset_needed;
3307 status = lancer_wait_ready(adapter);
3308 if (!status) {
3309 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3310 err = sliport_status & SLIPORT_STATUS_ERR_MASK;
3311 reset_needed = sliport_status & SLIPORT_STATUS_RN_MASK;
3312 if (err && reset_needed) {
3313 iowrite32(SLI_PORT_CONTROL_IP_MASK,
3314 adapter->db + SLIPORT_CONTROL_OFFSET);
3315
3316 /* check adapter has corrected the error */
3317 status = lancer_wait_ready(adapter);
3318 sliport_status = ioread32(adapter->db +
3319 SLIPORT_STATUS_OFFSET);
3320 sliport_status &= (SLIPORT_STATUS_ERR_MASK |
3321 SLIPORT_STATUS_RN_MASK);
3322 if (status || sliport_status)
3323 status = -1;
3324 } else if (err || reset_needed) {
3325 status = -1;
3326 }
3327 }
3328 return status;
3329}
3330
d8110f62
PR
3331static void lancer_test_and_recover_fn_err(struct be_adapter *adapter)
3332{
3333 int status;
3334 u32 sliport_status;
3335
3336 if (adapter->eeh_err || adapter->ue_detected)
3337 return;
3338
3339 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3340
3341 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
3342 dev_err(&adapter->pdev->dev,
3343 "Adapter in error state."
3344 "Trying to recover.\n");
3345
3346 status = lancer_test_and_set_rdy_state(adapter);
3347 if (status)
3348 goto err;
3349
3350 netif_device_detach(adapter->netdev);
3351
3352 if (netif_running(adapter->netdev))
3353 be_close(adapter->netdev);
3354
3355 be_clear(adapter);
3356
3357 adapter->fw_timeout = false;
3358
3359 status = be_setup(adapter);
3360 if (status)
3361 goto err;
3362
3363 if (netif_running(adapter->netdev)) {
3364 status = be_open(adapter->netdev);
3365 if (status)
3366 goto err;
3367 }
3368
3369 netif_device_attach(adapter->netdev);
3370
3371 dev_err(&adapter->pdev->dev,
3372 "Adapter error recovery succeeded\n");
3373 }
3374 return;
3375err:
3376 dev_err(&adapter->pdev->dev,
3377 "Adapter error recovery failed\n");
3378}
3379
3380static void be_worker(struct work_struct *work)
3381{
3382 struct be_adapter *adapter =
3383 container_of(work, struct be_adapter, work.work);
3384 struct be_rx_obj *rxo;
3385 int i;
3386
3387 if (lancer_chip(adapter))
3388 lancer_test_and_recover_fn_err(adapter);
3389
3390 be_detect_dump_ue(adapter);
3391
3392 /* when interrupts are not yet enabled, just reap any pending
3393 * mcc completions */
3394 if (!netif_running(adapter->netdev)) {
3395 int mcc_compl, status = 0;
3396
3397 mcc_compl = be_process_mcc(adapter, &status);
3398
3399 if (mcc_compl) {
3400 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
3401 be_cq_notify(adapter, mcc_obj->cq.id, false, mcc_compl);
3402 }
3403
3404 goto reschedule;
3405 }
3406
3407 if (!adapter->stats_cmd_sent) {
3408 if (lancer_chip(adapter))
3409 lancer_cmd_get_pport_stats(adapter,
3410 &adapter->stats_cmd);
3411 else
3412 be_cmd_get_stats(adapter, &adapter->stats_cmd);
3413 }
3414
3415 for_all_rx_queues(adapter, rxo, i) {
3416 be_rx_eqd_update(adapter, rxo);
3417
3418 if (rxo->rx_post_starved) {
3419 rxo->rx_post_starved = false;
3420 be_post_rx_frags(rxo, GFP_KERNEL);
3421 }
3422 }
3423
3424reschedule:
3425 adapter->work_counter++;
3426 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
3427}
3428
6b7c5b94
SP
3429static int __devinit be_probe(struct pci_dev *pdev,
3430 const struct pci_device_id *pdev_id)
3431{
3432 int status = 0;
3433 struct be_adapter *adapter;
3434 struct net_device *netdev;
6b7c5b94
SP
3435
3436 status = pci_enable_device(pdev);
3437 if (status)
3438 goto do_none;
3439
3440 status = pci_request_regions(pdev, DRV_NAME);
3441 if (status)
3442 goto disable_dev;
3443 pci_set_master(pdev);
3444
3c8def97 3445 netdev = alloc_etherdev_mq(sizeof(struct be_adapter), MAX_TX_QS);
6b7c5b94
SP
3446 if (netdev == NULL) {
3447 status = -ENOMEM;
3448 goto rel_reg;
3449 }
3450 adapter = netdev_priv(netdev);
3451 adapter->pdev = pdev;
3452 pci_set_drvdata(pdev, adapter);
fe6d2a38
SP
3453
3454 status = be_dev_family_check(adapter);
63657b9c 3455 if (status)
fe6d2a38
SP
3456 goto free_netdev;
3457
6b7c5b94 3458 adapter->netdev = netdev;
2243e2e9 3459 SET_NETDEV_DEV(netdev, &pdev->dev);
6b7c5b94 3460
2b7bcebf 3461 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
6b7c5b94
SP
3462 if (!status) {
3463 netdev->features |= NETIF_F_HIGHDMA;
3464 } else {
2b7bcebf 3465 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
6b7c5b94
SP
3466 if (status) {
3467 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
3468 goto free_netdev;
3469 }
3470 }
3471
f9449ab7
SP
3472 status = be_sriov_enable(adapter);
3473 if (status)
3474 goto free_netdev;
ba343c77 3475
6b7c5b94
SP
3476 status = be_ctrl_init(adapter);
3477 if (status)
f9449ab7 3478 goto disable_sriov;
6b7c5b94 3479
37eed1cb 3480 if (lancer_chip(adapter)) {
d8110f62
PR
3481 status = lancer_wait_ready(adapter);
3482 if (!status) {
3483 iowrite32(SLI_PORT_CONTROL_IP_MASK,
3484 adapter->db + SLIPORT_CONTROL_OFFSET);
3485 status = lancer_test_and_set_rdy_state(adapter);
3486 }
37eed1cb
PR
3487 if (status) {
3488 dev_err(&pdev->dev, "Adapter in non recoverable error\n");
48f5a191 3489 goto ctrl_clean;
37eed1cb
PR
3490 }
3491 }
3492
2243e2e9 3493 /* sync up with fw's ready state */
ba343c77
SB
3494 if (be_physfn(adapter)) {
3495 status = be_cmd_POST(adapter);
3496 if (status)
3497 goto ctrl_clean;
ba343c77 3498 }
6b7c5b94 3499
2243e2e9
SP
3500 /* tell fw we're ready to fire cmds */
3501 status = be_cmd_fw_init(adapter);
6b7c5b94 3502 if (status)
2243e2e9
SP
3503 goto ctrl_clean;
3504
a4b4dfab
AK
3505 status = be_cmd_reset_function(adapter);
3506 if (status)
3507 goto ctrl_clean;
556ae191 3508
2243e2e9
SP
3509 status = be_stats_init(adapter);
3510 if (status)
3511 goto ctrl_clean;
3512
3513 status = be_get_config(adapter);
6b7c5b94
SP
3514 if (status)
3515 goto stats_clean;
6b7c5b94 3516
b9ab82c7
SP
3517 /* The INTR bit may be set in the card when probed by a kdump kernel
3518 * after a crash.
3519 */
3520 if (!lancer_chip(adapter))
3521 be_intr_set(adapter, false);
3522
3abcdeda
SP
3523 be_msix_enable(adapter);
3524
6b7c5b94 3525 INIT_DELAYED_WORK(&adapter->work, be_worker);
a54769f5 3526 adapter->rx_fc = adapter->tx_fc = true;
6b7c5b94 3527
5fb379ee
SP
3528 status = be_setup(adapter);
3529 if (status)
3abcdeda 3530 goto msix_disable;
2243e2e9 3531
3abcdeda 3532 be_netdev_init(netdev);
6b7c5b94
SP
3533 status = register_netdev(netdev);
3534 if (status != 0)
5fb379ee 3535 goto unsetup;
6b7c5b94 3536
c4ca2374 3537 dev_info(&pdev->dev, "%s port %d\n", nic_name(pdev), adapter->port_num);
34b1ef04 3538
f203af70 3539 schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
6b7c5b94
SP
3540 return 0;
3541
5fb379ee
SP
3542unsetup:
3543 be_clear(adapter);
3abcdeda
SP
3544msix_disable:
3545 be_msix_disable(adapter);
6b7c5b94
SP
3546stats_clean:
3547 be_stats_cleanup(adapter);
3548ctrl_clean:
3549 be_ctrl_cleanup(adapter);
f9449ab7 3550disable_sriov:
ba343c77 3551 be_sriov_disable(adapter);
f9449ab7 3552free_netdev:
fe6d2a38 3553 free_netdev(netdev);
8d56ff11 3554 pci_set_drvdata(pdev, NULL);
6b7c5b94
SP
3555rel_reg:
3556 pci_release_regions(pdev);
3557disable_dev:
3558 pci_disable_device(pdev);
3559do_none:
c4ca2374 3560 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
6b7c5b94
SP
3561 return status;
3562}
3563
3564static int be_suspend(struct pci_dev *pdev, pm_message_t state)
3565{
3566 struct be_adapter *adapter = pci_get_drvdata(pdev);
3567 struct net_device *netdev = adapter->netdev;
3568
a4ca055f 3569 cancel_delayed_work_sync(&adapter->work);
71d8d1b5
AK
3570 if (adapter->wol)
3571 be_setup_wol(adapter, true);
3572
6b7c5b94
SP
3573 netif_device_detach(netdev);
3574 if (netif_running(netdev)) {
3575 rtnl_lock();
3576 be_close(netdev);
3577 rtnl_unlock();
3578 }
9b0365f1 3579 be_clear(adapter);
6b7c5b94 3580
a4ca055f 3581 be_msix_disable(adapter);
6b7c5b94
SP
3582 pci_save_state(pdev);
3583 pci_disable_device(pdev);
3584 pci_set_power_state(pdev, pci_choose_state(pdev, state));
3585 return 0;
3586}
3587
3588static int be_resume(struct pci_dev *pdev)
3589{
3590 int status = 0;
3591 struct be_adapter *adapter = pci_get_drvdata(pdev);
3592 struct net_device *netdev = adapter->netdev;
3593
3594 netif_device_detach(netdev);
3595
3596 status = pci_enable_device(pdev);
3597 if (status)
3598 return status;
3599
3600 pci_set_power_state(pdev, 0);
3601 pci_restore_state(pdev);
3602
a4ca055f 3603 be_msix_enable(adapter);
2243e2e9
SP
3604 /* tell fw we're ready to fire cmds */
3605 status = be_cmd_fw_init(adapter);
3606 if (status)
3607 return status;
3608
9b0365f1 3609 be_setup(adapter);
6b7c5b94
SP
3610 if (netif_running(netdev)) {
3611 rtnl_lock();
3612 be_open(netdev);
3613 rtnl_unlock();
3614 }
3615 netif_device_attach(netdev);
71d8d1b5
AK
3616
3617 if (adapter->wol)
3618 be_setup_wol(adapter, false);
a4ca055f
AK
3619
3620 schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
6b7c5b94
SP
3621 return 0;
3622}
3623
82456b03
SP
3624/*
3625 * An FLR will stop BE from DMAing any data.
3626 */
3627static void be_shutdown(struct pci_dev *pdev)
3628{
3629 struct be_adapter *adapter = pci_get_drvdata(pdev);
82456b03 3630
2d5d4154
AK
3631 if (!adapter)
3632 return;
82456b03 3633
0f4a6828 3634 cancel_delayed_work_sync(&adapter->work);
a4ca055f 3635
2d5d4154 3636 netif_device_detach(adapter->netdev);
82456b03 3637
82456b03
SP
3638 if (adapter->wol)
3639 be_setup_wol(adapter, true);
3640
57841869
AK
3641 be_cmd_reset_function(adapter);
3642
82456b03 3643 pci_disable_device(pdev);
82456b03
SP
3644}
3645
cf588477
SP
3646static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
3647 pci_channel_state_t state)
3648{
3649 struct be_adapter *adapter = pci_get_drvdata(pdev);
3650 struct net_device *netdev = adapter->netdev;
3651
3652 dev_err(&adapter->pdev->dev, "EEH error detected\n");
3653
3654 adapter->eeh_err = true;
3655
3656 netif_device_detach(netdev);
3657
3658 if (netif_running(netdev)) {
3659 rtnl_lock();
3660 be_close(netdev);
3661 rtnl_unlock();
3662 }
3663 be_clear(adapter);
3664
3665 if (state == pci_channel_io_perm_failure)
3666 return PCI_ERS_RESULT_DISCONNECT;
3667
3668 pci_disable_device(pdev);
3669
3670 return PCI_ERS_RESULT_NEED_RESET;
3671}
3672
3673static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
3674{
3675 struct be_adapter *adapter = pci_get_drvdata(pdev);
3676 int status;
3677
3678 dev_info(&adapter->pdev->dev, "EEH reset\n");
3679 adapter->eeh_err = false;
6589ade0
SP
3680 adapter->ue_detected = false;
3681 adapter->fw_timeout = false;
cf588477
SP
3682
3683 status = pci_enable_device(pdev);
3684 if (status)
3685 return PCI_ERS_RESULT_DISCONNECT;
3686
3687 pci_set_master(pdev);
3688 pci_set_power_state(pdev, 0);
3689 pci_restore_state(pdev);
3690
3691 /* Check if card is ok and fw is ready */
3692 status = be_cmd_POST(adapter);
3693 if (status)
3694 return PCI_ERS_RESULT_DISCONNECT;
3695
3696 return PCI_ERS_RESULT_RECOVERED;
3697}
3698
3699static void be_eeh_resume(struct pci_dev *pdev)
3700{
3701 int status = 0;
3702 struct be_adapter *adapter = pci_get_drvdata(pdev);
3703 struct net_device *netdev = adapter->netdev;
3704
3705 dev_info(&adapter->pdev->dev, "EEH resume\n");
3706
3707 pci_save_state(pdev);
3708
3709 /* tell fw we're ready to fire cmds */
3710 status = be_cmd_fw_init(adapter);
3711 if (status)
3712 goto err;
3713
3714 status = be_setup(adapter);
3715 if (status)
3716 goto err;
3717
3718 if (netif_running(netdev)) {
3719 status = be_open(netdev);
3720 if (status)
3721 goto err;
3722 }
3723 netif_device_attach(netdev);
3724 return;
3725err:
3726 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
cf588477
SP
3727}
3728
3729static struct pci_error_handlers be_eeh_handlers = {
3730 .error_detected = be_eeh_err_detected,
3731 .slot_reset = be_eeh_reset,
3732 .resume = be_eeh_resume,
3733};
3734
6b7c5b94
SP
3735static struct pci_driver be_driver = {
3736 .name = DRV_NAME,
3737 .id_table = be_dev_ids,
3738 .probe = be_probe,
3739 .remove = be_remove,
3740 .suspend = be_suspend,
cf588477 3741 .resume = be_resume,
82456b03 3742 .shutdown = be_shutdown,
cf588477 3743 .err_handler = &be_eeh_handlers
6b7c5b94
SP
3744};
3745
3746static int __init be_init_module(void)
3747{
8e95a202
JP
3748 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
3749 rx_frag_size != 2048) {
6b7c5b94
SP
3750 printk(KERN_WARNING DRV_NAME
3751 " : Module param rx_frag_size must be 2048/4096/8192."
3752 " Using 2048\n");
3753 rx_frag_size = 2048;
3754 }
6b7c5b94
SP
3755
3756 return pci_register_driver(&be_driver);
3757}
3758module_init(be_init_module);
3759
3760static void __exit be_exit_module(void)
3761{
3762 pci_unregister_driver(&be_driver);
3763}
3764module_exit(be_exit_module);