netem: fix classful handling
[linux-2.6-block.git] / drivers / net / ethernet / emulex / benet / be_main.c
CommitLineData
6b7c5b94 1/*
d2145cde 2 * Copyright (C) 2005 - 2011 Emulex
6b7c5b94
SP
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
d2145cde 11 * linux-drivers@emulex.com
6b7c5b94 12 *
d2145cde
AK
13 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
6b7c5b94
SP
16 */
17
70c71606 18#include <linux/prefetch.h>
9d9779e7 19#include <linux/module.h>
6b7c5b94 20#include "be.h"
8788fdc2 21#include "be_cmds.h"
65f71b8b 22#include <asm/div64.h>
6b7c5b94
SP
23
24MODULE_VERSION(DRV_VER);
25MODULE_DEVICE_TABLE(pci, be_dev_ids);
26MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
27MODULE_AUTHOR("ServerEngines Corporation");
28MODULE_LICENSE("GPL");
29
ba343c77 30static unsigned int num_vfs;
ba343c77 31module_param(num_vfs, uint, S_IRUGO);
ba343c77 32MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
6b7c5b94 33
11ac75ed
SP
34static ushort rx_frag_size = 2048;
35module_param(rx_frag_size, ushort, S_IRUGO);
36MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
37
6b7c5b94 38static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
c4ca2374 39 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
59fd5d87 40 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
c4ca2374
AK
41 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
42 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
fe6d2a38 43 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
12f4d0a8 44 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
ecedb6ae 45 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
6b7c5b94
SP
46 { 0 }
47};
48MODULE_DEVICE_TABLE(pci, be_dev_ids);
7c185276 49/* UE Status Low CSR */
42c8b11e 50static const char * const ue_status_low_desc[] = {
7c185276
AK
51 "CEV",
52 "CTX",
53 "DBUF",
54 "ERX",
55 "Host",
56 "MPU",
57 "NDMA",
58 "PTC ",
59 "RDMA ",
60 "RXF ",
61 "RXIPS ",
62 "RXULP0 ",
63 "RXULP1 ",
64 "RXULP2 ",
65 "TIM ",
66 "TPOST ",
67 "TPRE ",
68 "TXIPS ",
69 "TXULP0 ",
70 "TXULP1 ",
71 "UC ",
72 "WDMA ",
73 "TXULP2 ",
74 "HOST1 ",
75 "P0_OB_LINK ",
76 "P1_OB_LINK ",
77 "HOST_GPIO ",
78 "MBOX ",
79 "AXGMAC0",
80 "AXGMAC1",
81 "JTAG",
82 "MPU_INTPEND"
83};
84/* UE Status High CSR */
42c8b11e 85static const char * const ue_status_hi_desc[] = {
7c185276
AK
86 "LPCMEMHOST",
87 "MGMT_MAC",
88 "PCS0ONLINE",
89 "MPU_IRAM",
90 "PCS1ONLINE",
91 "PCTL0",
92 "PCTL1",
93 "PMEM",
94 "RR",
95 "TXPB",
96 "RXPP",
97 "XAUI",
98 "TXP",
99 "ARM",
100 "IPC",
101 "HOST2",
102 "HOST3",
103 "HOST4",
104 "HOST5",
105 "HOST6",
106 "HOST7",
107 "HOST8",
108 "HOST9",
42c8b11e 109 "NETC",
7c185276
AK
110 "Unknown",
111 "Unknown",
112 "Unknown",
113 "Unknown",
114 "Unknown",
115 "Unknown",
116 "Unknown",
117 "Unknown"
118};
6b7c5b94 119
752961a1
SP
120/* Is BE in a multi-channel mode */
121static inline bool be_is_mc(struct be_adapter *adapter) {
122 return (adapter->function_mode & FLEX10_MODE ||
123 adapter->function_mode & VNIC_MODE ||
124 adapter->function_mode & UMC_ENABLED);
125}
126
6b7c5b94
SP
127static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
128{
129 struct be_dma_mem *mem = &q->dma_mem;
130 if (mem->va)
2b7bcebf
IV
131 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
132 mem->dma);
6b7c5b94
SP
133}
134
135static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
136 u16 len, u16 entry_size)
137{
138 struct be_dma_mem *mem = &q->dma_mem;
139
140 memset(q, 0, sizeof(*q));
141 q->len = len;
142 q->entry_size = entry_size;
143 mem->size = len * entry_size;
2b7bcebf
IV
144 mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
145 GFP_KERNEL);
6b7c5b94
SP
146 if (!mem->va)
147 return -1;
148 memset(mem->va, 0, mem->size);
149 return 0;
150}
151
8788fdc2 152static void be_intr_set(struct be_adapter *adapter, bool enable)
6b7c5b94 153{
db3ea781 154 u32 reg, enabled;
5f0b849e 155
cf588477
SP
156 if (adapter->eeh_err)
157 return;
158
db3ea781
SP
159 pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
160 &reg);
161 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
162
5f0b849e 163 if (!enabled && enable)
6b7c5b94 164 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
5f0b849e 165 else if (enabled && !enable)
6b7c5b94 166 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
5f0b849e 167 else
6b7c5b94 168 return;
5f0b849e 169
db3ea781
SP
170 pci_write_config_dword(adapter->pdev,
171 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
6b7c5b94
SP
172}
173
8788fdc2 174static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
6b7c5b94
SP
175{
176 u32 val = 0;
177 val |= qid & DB_RQ_RING_ID_MASK;
178 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
f3eb62d2
SP
179
180 wmb();
8788fdc2 181 iowrite32(val, adapter->db + DB_RQ_OFFSET);
6b7c5b94
SP
182}
183
8788fdc2 184static void be_txq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
6b7c5b94
SP
185{
186 u32 val = 0;
187 val |= qid & DB_TXULP_RING_ID_MASK;
188 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
f3eb62d2
SP
189
190 wmb();
8788fdc2 191 iowrite32(val, adapter->db + DB_TXULP1_OFFSET);
6b7c5b94
SP
192}
193
8788fdc2 194static void be_eq_notify(struct be_adapter *adapter, u16 qid,
6b7c5b94
SP
195 bool arm, bool clear_int, u16 num_popped)
196{
197 u32 val = 0;
198 val |= qid & DB_EQ_RING_ID_MASK;
fe6d2a38
SP
199 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
200 DB_EQ_RING_ID_EXT_MASK_SHIFT);
cf588477
SP
201
202 if (adapter->eeh_err)
203 return;
204
6b7c5b94
SP
205 if (arm)
206 val |= 1 << DB_EQ_REARM_SHIFT;
207 if (clear_int)
208 val |= 1 << DB_EQ_CLR_SHIFT;
209 val |= 1 << DB_EQ_EVNT_SHIFT;
210 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
8788fdc2 211 iowrite32(val, adapter->db + DB_EQ_OFFSET);
6b7c5b94
SP
212}
213
8788fdc2 214void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
6b7c5b94
SP
215{
216 u32 val = 0;
217 val |= qid & DB_CQ_RING_ID_MASK;
fe6d2a38
SP
218 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
219 DB_CQ_RING_ID_EXT_MASK_SHIFT);
cf588477
SP
220
221 if (adapter->eeh_err)
222 return;
223
6b7c5b94
SP
224 if (arm)
225 val |= 1 << DB_CQ_REARM_SHIFT;
226 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
8788fdc2 227 iowrite32(val, adapter->db + DB_CQ_OFFSET);
6b7c5b94
SP
228}
229
6b7c5b94
SP
230static int be_mac_addr_set(struct net_device *netdev, void *p)
231{
232 struct be_adapter *adapter = netdev_priv(netdev);
233 struct sockaddr *addr = p;
234 int status = 0;
e3a7ae2c
SK
235 u8 current_mac[ETH_ALEN];
236 u32 pmac_id = adapter->pmac_id;
6b7c5b94 237
ca9e4988
AK
238 if (!is_valid_ether_addr(addr->sa_data))
239 return -EADDRNOTAVAIL;
240
e3a7ae2c 241 status = be_cmd_mac_addr_query(adapter, current_mac,
590c391d
PR
242 MAC_ADDRESS_TYPE_NETWORK, false,
243 adapter->if_handle, 0);
a65027e4 244 if (status)
e3a7ae2c 245 goto err;
6b7c5b94 246
e3a7ae2c
SK
247 if (memcmp(addr->sa_data, current_mac, ETH_ALEN)) {
248 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
f8617e08 249 adapter->if_handle, &adapter->pmac_id, 0);
e3a7ae2c
SK
250 if (status)
251 goto err;
6b7c5b94 252
e3a7ae2c
SK
253 be_cmd_pmac_del(adapter, adapter->if_handle, pmac_id, 0);
254 }
255 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
256 return 0;
257err:
258 dev_err(&adapter->pdev->dev, "MAC %pM set Failed\n", addr->sa_data);
6b7c5b94
SP
259 return status;
260}
261
89a88ab8
AK
262static void populate_be2_stats(struct be_adapter *adapter)
263{
ac124ff9
SP
264 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
265 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
266 struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
89a88ab8 267 struct be_port_rxf_stats_v0 *port_stats =
ac124ff9
SP
268 &rxf_stats->port[adapter->port_num];
269 struct be_drv_stats *drvs = &adapter->drv_stats;
89a88ab8 270
ac124ff9 271 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
89a88ab8
AK
272 drvs->rx_pause_frames = port_stats->rx_pause_frames;
273 drvs->rx_crc_errors = port_stats->rx_crc_errors;
274 drvs->rx_control_frames = port_stats->rx_control_frames;
275 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
276 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
277 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
278 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
279 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
280 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
281 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
282 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
283 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
284 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
285 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
ac124ff9 286 drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
89a88ab8
AK
287 drvs->rx_dropped_header_too_small =
288 port_stats->rx_dropped_header_too_small;
ac124ff9 289 drvs->rx_address_match_errors = port_stats->rx_address_match_errors;
89a88ab8
AK
290 drvs->rx_alignment_symbol_errors =
291 port_stats->rx_alignment_symbol_errors;
292
293 drvs->tx_pauseframes = port_stats->tx_pauseframes;
294 drvs->tx_controlframes = port_stats->tx_controlframes;
295
296 if (adapter->port_num)
ac124ff9 297 drvs->jabber_events = rxf_stats->port1_jabber_events;
89a88ab8 298 else
ac124ff9 299 drvs->jabber_events = rxf_stats->port0_jabber_events;
89a88ab8
AK
300 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
301 drvs->rx_drops_no_txpb = rxf_stats->rx_drops_no_txpb;
302 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
303 drvs->rx_drops_invalid_ring = rxf_stats->rx_drops_invalid_ring;
304 drvs->forwarded_packets = rxf_stats->forwarded_packets;
305 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
ac124ff9
SP
306 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
307 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
89a88ab8
AK
308 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
309}
310
311static void populate_be3_stats(struct be_adapter *adapter)
312{
ac124ff9
SP
313 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
314 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
315 struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
89a88ab8 316 struct be_port_rxf_stats_v1 *port_stats =
ac124ff9
SP
317 &rxf_stats->port[adapter->port_num];
318 struct be_drv_stats *drvs = &adapter->drv_stats;
89a88ab8 319
ac124ff9 320 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
02fe7027
AK
321 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
322 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
89a88ab8
AK
323 drvs->rx_pause_frames = port_stats->rx_pause_frames;
324 drvs->rx_crc_errors = port_stats->rx_crc_errors;
325 drvs->rx_control_frames = port_stats->rx_control_frames;
326 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
327 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
328 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
329 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
330 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
331 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
332 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
333 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
334 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
335 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
336 drvs->rx_dropped_header_too_small =
337 port_stats->rx_dropped_header_too_small;
338 drvs->rx_input_fifo_overflow_drop =
339 port_stats->rx_input_fifo_overflow_drop;
ac124ff9 340 drvs->rx_address_match_errors = port_stats->rx_address_match_errors;
89a88ab8
AK
341 drvs->rx_alignment_symbol_errors =
342 port_stats->rx_alignment_symbol_errors;
ac124ff9 343 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
89a88ab8
AK
344 drvs->tx_pauseframes = port_stats->tx_pauseframes;
345 drvs->tx_controlframes = port_stats->tx_controlframes;
346 drvs->jabber_events = port_stats->jabber_events;
347 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
348 drvs->rx_drops_no_txpb = rxf_stats->rx_drops_no_txpb;
349 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
350 drvs->rx_drops_invalid_ring = rxf_stats->rx_drops_invalid_ring;
351 drvs->forwarded_packets = rxf_stats->forwarded_packets;
352 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
ac124ff9
SP
353 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
354 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
89a88ab8
AK
355 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
356}
357
005d5696
SX
358static void populate_lancer_stats(struct be_adapter *adapter)
359{
89a88ab8 360
005d5696 361 struct be_drv_stats *drvs = &adapter->drv_stats;
ac124ff9
SP
362 struct lancer_pport_stats *pport_stats =
363 pport_stats_from_cmd(adapter);
364
365 be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
366 drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
367 drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
368 drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
005d5696 369 drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
ac124ff9 370 drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
005d5696
SX
371 drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
372 drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
373 drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
374 drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
375 drvs->rx_dropped_tcp_length =
376 pport_stats->rx_dropped_invalid_tcp_length;
377 drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
378 drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
379 drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
380 drvs->rx_dropped_header_too_small =
381 pport_stats->rx_dropped_header_too_small;
382 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
383 drvs->rx_address_match_errors = pport_stats->rx_address_match_errors;
ac124ff9 384 drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
005d5696 385 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
ac124ff9
SP
386 drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
387 drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
005d5696 388 drvs->jabber_events = pport_stats->rx_jabbers;
005d5696 389 drvs->rx_drops_invalid_ring = pport_stats->rx_drops_invalid_queue;
ac124ff9
SP
390 drvs->forwarded_packets = pport_stats->num_forwards_lo;
391 drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
005d5696 392 drvs->rx_drops_too_many_frags =
ac124ff9 393 pport_stats->rx_drops_too_many_frags_lo;
005d5696 394}
89a88ab8 395
09c1c68f
SP
396static void accumulate_16bit_val(u32 *acc, u16 val)
397{
398#define lo(x) (x & 0xFFFF)
399#define hi(x) (x & 0xFFFF0000)
400 bool wrapped = val < lo(*acc);
401 u32 newacc = hi(*acc) + val;
402
403 if (wrapped)
404 newacc += 65536;
405 ACCESS_ONCE(*acc) = newacc;
406}
407
89a88ab8
AK
408void be_parse_stats(struct be_adapter *adapter)
409{
ac124ff9
SP
410 struct be_erx_stats_v1 *erx = be_erx_stats_from_cmd(adapter);
411 struct be_rx_obj *rxo;
412 int i;
413
005d5696
SX
414 if (adapter->generation == BE_GEN3) {
415 if (lancer_chip(adapter))
416 populate_lancer_stats(adapter);
417 else
418 populate_be3_stats(adapter);
419 } else {
89a88ab8 420 populate_be2_stats(adapter);
005d5696 421 }
ac124ff9
SP
422
423 /* as erx_v1 is longer than v0, ok to use v1 defn for v0 access */
09c1c68f
SP
424 for_all_rx_queues(adapter, rxo, i) {
425 /* below erx HW counter can actually wrap around after
426 * 65535. Driver accumulates a 32-bit value
427 */
428 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
429 (u16)erx->rx_drops_no_fragments[rxo->q.id]);
430 }
89a88ab8
AK
431}
432
ab1594e9
SP
433static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
434 struct rtnl_link_stats64 *stats)
6b7c5b94 435{
ab1594e9 436 struct be_adapter *adapter = netdev_priv(netdev);
89a88ab8 437 struct be_drv_stats *drvs = &adapter->drv_stats;
3abcdeda 438 struct be_rx_obj *rxo;
3c8def97 439 struct be_tx_obj *txo;
ab1594e9
SP
440 u64 pkts, bytes;
441 unsigned int start;
3abcdeda 442 int i;
6b7c5b94 443
3abcdeda 444 for_all_rx_queues(adapter, rxo, i) {
ab1594e9
SP
445 const struct be_rx_stats *rx_stats = rx_stats(rxo);
446 do {
447 start = u64_stats_fetch_begin_bh(&rx_stats->sync);
448 pkts = rx_stats(rxo)->rx_pkts;
449 bytes = rx_stats(rxo)->rx_bytes;
450 } while (u64_stats_fetch_retry_bh(&rx_stats->sync, start));
451 stats->rx_packets += pkts;
452 stats->rx_bytes += bytes;
453 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
454 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
455 rx_stats(rxo)->rx_drops_no_frags;
3abcdeda
SP
456 }
457
3c8def97 458 for_all_tx_queues(adapter, txo, i) {
ab1594e9
SP
459 const struct be_tx_stats *tx_stats = tx_stats(txo);
460 do {
461 start = u64_stats_fetch_begin_bh(&tx_stats->sync);
462 pkts = tx_stats(txo)->tx_pkts;
463 bytes = tx_stats(txo)->tx_bytes;
464 } while (u64_stats_fetch_retry_bh(&tx_stats->sync, start));
465 stats->tx_packets += pkts;
466 stats->tx_bytes += bytes;
3c8def97 467 }
6b7c5b94
SP
468
469 /* bad pkts received */
ab1594e9 470 stats->rx_errors = drvs->rx_crc_errors +
89a88ab8
AK
471 drvs->rx_alignment_symbol_errors +
472 drvs->rx_in_range_errors +
473 drvs->rx_out_range_errors +
474 drvs->rx_frame_too_long +
475 drvs->rx_dropped_too_small +
476 drvs->rx_dropped_too_short +
477 drvs->rx_dropped_header_too_small +
478 drvs->rx_dropped_tcp_length +
ab1594e9 479 drvs->rx_dropped_runt;
68110868 480
6b7c5b94 481 /* detailed rx errors */
ab1594e9 482 stats->rx_length_errors = drvs->rx_in_range_errors +
89a88ab8
AK
483 drvs->rx_out_range_errors +
484 drvs->rx_frame_too_long;
68110868 485
ab1594e9 486 stats->rx_crc_errors = drvs->rx_crc_errors;
6b7c5b94
SP
487
488 /* frame alignment errors */
ab1594e9 489 stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
68110868 490
6b7c5b94
SP
491 /* receiver fifo overrun */
492 /* drops_no_pbuf is no per i/f, it's per BE card */
ab1594e9 493 stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
89a88ab8
AK
494 drvs->rx_input_fifo_overflow_drop +
495 drvs->rx_drops_no_pbuf;
ab1594e9 496 return stats;
6b7c5b94
SP
497}
498
ea172a01 499void be_link_status_update(struct be_adapter *adapter, u32 link_status)
6b7c5b94 500{
6b7c5b94
SP
501 struct net_device *netdev = adapter->netdev;
502
ea172a01
SP
503 /* when link status changes, link speed must be re-queried from card */
504 adapter->link_speed = -1;
505 if ((link_status & LINK_STATUS_MASK) == LINK_UP) {
506 netif_carrier_on(netdev);
507 dev_info(&adapter->pdev->dev, "%s: Link up\n", netdev->name);
508 } else {
509 netif_carrier_off(netdev);
510 dev_info(&adapter->pdev->dev, "%s: Link down\n", netdev->name);
6b7c5b94 511 }
6b7c5b94
SP
512}
513
3c8def97 514static void be_tx_stats_update(struct be_tx_obj *txo,
91992e44 515 u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
6b7c5b94 516{
3c8def97
SP
517 struct be_tx_stats *stats = tx_stats(txo);
518
ab1594e9 519 u64_stats_update_begin(&stats->sync);
ac124ff9
SP
520 stats->tx_reqs++;
521 stats->tx_wrbs += wrb_cnt;
522 stats->tx_bytes += copied;
523 stats->tx_pkts += (gso_segs ? gso_segs : 1);
6b7c5b94 524 if (stopped)
ac124ff9 525 stats->tx_stops++;
ab1594e9 526 u64_stats_update_end(&stats->sync);
6b7c5b94
SP
527}
528
529/* Determine number of WRB entries needed to xmit data in an skb */
fe6d2a38
SP
530static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
531 bool *dummy)
6b7c5b94 532{
ebc8d2ab
DM
533 int cnt = (skb->len > skb->data_len);
534
535 cnt += skb_shinfo(skb)->nr_frags;
536
6b7c5b94
SP
537 /* to account for hdr wrb */
538 cnt++;
fe6d2a38
SP
539 if (lancer_chip(adapter) || !(cnt & 1)) {
540 *dummy = false;
541 } else {
6b7c5b94
SP
542 /* add a dummy to make it an even num */
543 cnt++;
544 *dummy = true;
fe6d2a38 545 }
6b7c5b94
SP
546 BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
547 return cnt;
548}
549
550static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
551{
552 wrb->frag_pa_hi = upper_32_bits(addr);
553 wrb->frag_pa_lo = addr & 0xFFFFFFFF;
554 wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
555}
556
1ded132d
AK
557static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
558 struct sk_buff *skb)
559{
560 u8 vlan_prio;
561 u16 vlan_tag;
562
563 vlan_tag = vlan_tx_tag_get(skb);
564 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
565 /* If vlan priority provided by OS is NOT in available bmap */
566 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
567 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
568 adapter->recommended_prio;
569
570 return vlan_tag;
571}
572
cc4ce020
SK
573static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
574 struct sk_buff *skb, u32 wrb_cnt, u32 len)
6b7c5b94 575{
1ded132d 576 u16 vlan_tag;
cc4ce020 577
6b7c5b94
SP
578 memset(hdr, 0, sizeof(*hdr));
579
580 AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
581
49e4b847 582 if (skb_is_gso(skb)) {
6b7c5b94
SP
583 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
584 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
585 hdr, skb_shinfo(skb)->gso_size);
fe6d2a38 586 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
49e4b847 587 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
fe6d2a38
SP
588 if (lancer_chip(adapter) && adapter->sli_family ==
589 LANCER_A0_SLI_FAMILY) {
590 AMAP_SET_BITS(struct amap_eth_hdr_wrb, ipcs, hdr, 1);
591 if (is_tcp_pkt(skb))
592 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
593 tcpcs, hdr, 1);
594 else if (is_udp_pkt(skb))
595 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
596 udpcs, hdr, 1);
597 }
6b7c5b94
SP
598 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
599 if (is_tcp_pkt(skb))
600 AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
601 else if (is_udp_pkt(skb))
602 AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
603 }
604
4c5102f9 605 if (vlan_tx_tag_present(skb)) {
6b7c5b94 606 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
1ded132d 607 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
cc4ce020 608 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
6b7c5b94
SP
609 }
610
611 AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
612 AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, 1);
613 AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
614 AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
615}
616
2b7bcebf 617static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
7101e111
SP
618 bool unmap_single)
619{
620 dma_addr_t dma;
621
622 be_dws_le_to_cpu(wrb, sizeof(*wrb));
623
624 dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
b681ee77 625 if (wrb->frag_len) {
7101e111 626 if (unmap_single)
2b7bcebf
IV
627 dma_unmap_single(dev, dma, wrb->frag_len,
628 DMA_TO_DEVICE);
7101e111 629 else
2b7bcebf 630 dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
7101e111
SP
631 }
632}
6b7c5b94 633
3c8def97 634static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
6b7c5b94
SP
635 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb)
636{
7101e111
SP
637 dma_addr_t busaddr;
638 int i, copied = 0;
2b7bcebf 639 struct device *dev = &adapter->pdev->dev;
6b7c5b94 640 struct sk_buff *first_skb = skb;
6b7c5b94
SP
641 struct be_eth_wrb *wrb;
642 struct be_eth_hdr_wrb *hdr;
7101e111
SP
643 bool map_single = false;
644 u16 map_head;
6b7c5b94 645
6b7c5b94
SP
646 hdr = queue_head_node(txq);
647 queue_head_inc(txq);
7101e111 648 map_head = txq->head;
6b7c5b94 649
ebc8d2ab 650 if (skb->len > skb->data_len) {
e743d313 651 int len = skb_headlen(skb);
2b7bcebf
IV
652 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
653 if (dma_mapping_error(dev, busaddr))
7101e111
SP
654 goto dma_err;
655 map_single = true;
ebc8d2ab
DM
656 wrb = queue_head_node(txq);
657 wrb_fill(wrb, busaddr, len);
658 be_dws_cpu_to_le(wrb, sizeof(*wrb));
659 queue_head_inc(txq);
660 copied += len;
661 }
6b7c5b94 662
ebc8d2ab 663 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
9e903e08 664 const struct skb_frag_struct *frag =
ebc8d2ab 665 &skb_shinfo(skb)->frags[i];
b061b39e 666 busaddr = skb_frag_dma_map(dev, frag, 0,
9e903e08 667 skb_frag_size(frag), DMA_TO_DEVICE);
2b7bcebf 668 if (dma_mapping_error(dev, busaddr))
7101e111 669 goto dma_err;
ebc8d2ab 670 wrb = queue_head_node(txq);
9e903e08 671 wrb_fill(wrb, busaddr, skb_frag_size(frag));
ebc8d2ab
DM
672 be_dws_cpu_to_le(wrb, sizeof(*wrb));
673 queue_head_inc(txq);
9e903e08 674 copied += skb_frag_size(frag);
6b7c5b94
SP
675 }
676
677 if (dummy_wrb) {
678 wrb = queue_head_node(txq);
679 wrb_fill(wrb, 0, 0);
680 be_dws_cpu_to_le(wrb, sizeof(*wrb));
681 queue_head_inc(txq);
682 }
683
cc4ce020 684 wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied);
6b7c5b94
SP
685 be_dws_cpu_to_le(hdr, sizeof(*hdr));
686
687 return copied;
7101e111
SP
688dma_err:
689 txq->head = map_head;
690 while (copied) {
691 wrb = queue_head_node(txq);
2b7bcebf 692 unmap_tx_frag(dev, wrb, map_single);
7101e111
SP
693 map_single = false;
694 copied -= wrb->frag_len;
695 queue_head_inc(txq);
696 }
697 return 0;
6b7c5b94
SP
698}
699
61357325 700static netdev_tx_t be_xmit(struct sk_buff *skb,
b31c50a7 701 struct net_device *netdev)
6b7c5b94
SP
702{
703 struct be_adapter *adapter = netdev_priv(netdev);
3c8def97
SP
704 struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
705 struct be_queue_info *txq = &txo->q;
6b7c5b94
SP
706 u32 wrb_cnt = 0, copied = 0;
707 u32 start = txq->head;
708 bool dummy_wrb, stopped = false;
709
1ded132d
AK
710 /* For vlan tagged pkts, BE
711 * 1) calculates checksum even when CSO is not requested
712 * 2) calculates checksum wrongly for padded pkt less than
713 * 60 bytes long.
714 * As a workaround disable TX vlan offloading in such cases.
715 */
716 if (unlikely(vlan_tx_tag_present(skb) &&
717 (skb->ip_summed != CHECKSUM_PARTIAL || skb->len <= 60))) {
718 skb = skb_share_check(skb, GFP_ATOMIC);
719 if (unlikely(!skb))
720 goto tx_drop;
721
722 skb = __vlan_put_tag(skb, be_get_tx_vlan_tag(adapter, skb));
723 if (unlikely(!skb))
724 goto tx_drop;
725
726 skb->vlan_tci = 0;
727 }
728
fe6d2a38 729 wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
6b7c5b94 730
3c8def97 731 copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb);
c190e3c8
AK
732 if (copied) {
733 /* record the sent skb in the sent_skb table */
3c8def97
SP
734 BUG_ON(txo->sent_skb_list[start]);
735 txo->sent_skb_list[start] = skb;
c190e3c8
AK
736
737 /* Ensure txq has space for the next skb; Else stop the queue
738 * *BEFORE* ringing the tx doorbell, so that we serialze the
739 * tx compls of the current transmit which'll wake up the queue
740 */
7101e111 741 atomic_add(wrb_cnt, &txq->used);
c190e3c8
AK
742 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
743 txq->len) {
3c8def97 744 netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
c190e3c8
AK
745 stopped = true;
746 }
6b7c5b94 747
c190e3c8 748 be_txq_notify(adapter, txq->id, wrb_cnt);
6b7c5b94 749
3c8def97 750 be_tx_stats_update(txo, wrb_cnt, copied,
91992e44 751 skb_shinfo(skb)->gso_segs, stopped);
c190e3c8
AK
752 } else {
753 txq->head = start;
754 dev_kfree_skb_any(skb);
6b7c5b94 755 }
1ded132d 756tx_drop:
6b7c5b94
SP
757 return NETDEV_TX_OK;
758}
759
760static int be_change_mtu(struct net_device *netdev, int new_mtu)
761{
762 struct be_adapter *adapter = netdev_priv(netdev);
763 if (new_mtu < BE_MIN_MTU ||
34a89b8c
AK
764 new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
765 (ETH_HLEN + ETH_FCS_LEN))) {
6b7c5b94
SP
766 dev_info(&adapter->pdev->dev,
767 "MTU must be between %d and %d bytes\n",
34a89b8c
AK
768 BE_MIN_MTU,
769 (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
6b7c5b94
SP
770 return -EINVAL;
771 }
772 dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
773 netdev->mtu, new_mtu);
774 netdev->mtu = new_mtu;
775 return 0;
776}
777
778/*
82903e4b
AK
779 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
780 * If the user configures more, place BE in vlan promiscuous mode.
6b7c5b94 781 */
1da87b7f 782static int be_vid_config(struct be_adapter *adapter, bool vf, u32 vf_num)
6b7c5b94 783{
11ac75ed 784 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf_num];
6b7c5b94
SP
785 u16 vtag[BE_NUM_VLANS_SUPPORTED];
786 u16 ntags = 0, i;
82903e4b 787 int status = 0;
1da87b7f
AK
788
789 if (vf) {
11ac75ed
SP
790 vtag[0] = cpu_to_le16(vf_cfg->vlan_tag);
791 status = be_cmd_vlan_config(adapter, vf_cfg->if_handle, vtag,
792 1, 1, 0);
1da87b7f 793 }
6b7c5b94 794
c0e64ef4
SP
795 /* No need to further configure vids if in promiscuous mode */
796 if (adapter->promiscuous)
797 return 0;
798
82903e4b 799 if (adapter->vlans_added <= adapter->max_vlans) {
6b7c5b94 800 /* Construct VLAN Table to give to HW */
b738127d 801 for (i = 0; i < VLAN_N_VID; i++) {
6b7c5b94
SP
802 if (adapter->vlan_tag[i]) {
803 vtag[ntags] = cpu_to_le16(i);
804 ntags++;
805 }
806 }
b31c50a7
SP
807 status = be_cmd_vlan_config(adapter, adapter->if_handle,
808 vtag, ntags, 1, 0);
6b7c5b94 809 } else {
b31c50a7
SP
810 status = be_cmd_vlan_config(adapter, adapter->if_handle,
811 NULL, 0, 1, 1);
6b7c5b94 812 }
1da87b7f 813
b31c50a7 814 return status;
6b7c5b94
SP
815}
816
8e586137 817static int be_vlan_add_vid(struct net_device *netdev, u16 vid)
6b7c5b94
SP
818{
819 struct be_adapter *adapter = netdev_priv(netdev);
820
1da87b7f 821 adapter->vlans_added++;
ba343c77 822 if (!be_physfn(adapter))
8e586137 823 return 0;
ba343c77 824
6b7c5b94 825 adapter->vlan_tag[vid] = 1;
82903e4b 826 if (adapter->vlans_added <= (adapter->max_vlans + 1))
1da87b7f 827 be_vid_config(adapter, false, 0);
8e586137
JP
828
829 return 0;
6b7c5b94
SP
830}
831
8e586137 832static int be_vlan_rem_vid(struct net_device *netdev, u16 vid)
6b7c5b94
SP
833{
834 struct be_adapter *adapter = netdev_priv(netdev);
835
1da87b7f 836 adapter->vlans_added--;
1da87b7f 837
ba343c77 838 if (!be_physfn(adapter))
8e586137 839 return 0;
ba343c77 840
6b7c5b94 841 adapter->vlan_tag[vid] = 0;
82903e4b 842 if (adapter->vlans_added <= adapter->max_vlans)
1da87b7f 843 be_vid_config(adapter, false, 0);
8e586137
JP
844
845 return 0;
6b7c5b94
SP
846}
847
a54769f5 848static void be_set_rx_mode(struct net_device *netdev)
6b7c5b94
SP
849{
850 struct be_adapter *adapter = netdev_priv(netdev);
6b7c5b94 851
24307eef 852 if (netdev->flags & IFF_PROMISC) {
5b8821b7 853 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
24307eef
SP
854 adapter->promiscuous = true;
855 goto done;
6b7c5b94
SP
856 }
857
25985edc 858 /* BE was previously in promiscuous mode; disable it */
24307eef
SP
859 if (adapter->promiscuous) {
860 adapter->promiscuous = false;
5b8821b7 861 be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
c0e64ef4
SP
862
863 if (adapter->vlans_added)
864 be_vid_config(adapter, false, 0);
6b7c5b94
SP
865 }
866
e7b909a6 867 /* Enable multicast promisc if num configured exceeds what we support */
4cd24eaf 868 if (netdev->flags & IFF_ALLMULTI ||
5b8821b7
SP
869 netdev_mc_count(netdev) > BE_MAX_MC) {
870 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
24307eef 871 goto done;
6b7c5b94 872 }
6b7c5b94 873
5b8821b7 874 be_cmd_rx_filter(adapter, IFF_MULTICAST, ON);
24307eef
SP
875done:
876 return;
6b7c5b94
SP
877}
878
ba343c77
SB
879static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
880{
881 struct be_adapter *adapter = netdev_priv(netdev);
11ac75ed 882 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
ba343c77
SB
883 int status;
884
11ac75ed 885 if (!sriov_enabled(adapter))
ba343c77
SB
886 return -EPERM;
887
11ac75ed 888 if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
ba343c77
SB
889 return -EINVAL;
890
590c391d
PR
891 if (lancer_chip(adapter)) {
892 status = be_cmd_set_mac_list(adapter, mac, 1, vf + 1);
893 } else {
11ac75ed
SP
894 status = be_cmd_pmac_del(adapter, vf_cfg->if_handle,
895 vf_cfg->pmac_id, vf + 1);
ba343c77 896
11ac75ed
SP
897 status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
898 &vf_cfg->pmac_id, vf + 1);
590c391d
PR
899 }
900
64600ea5 901 if (status)
ba343c77
SB
902 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
903 mac, vf);
64600ea5 904 else
11ac75ed 905 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
64600ea5 906
ba343c77
SB
907 return status;
908}
909
64600ea5
AK
910static int be_get_vf_config(struct net_device *netdev, int vf,
911 struct ifla_vf_info *vi)
912{
913 struct be_adapter *adapter = netdev_priv(netdev);
11ac75ed 914 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
64600ea5 915
11ac75ed 916 if (!sriov_enabled(adapter))
64600ea5
AK
917 return -EPERM;
918
11ac75ed 919 if (vf >= adapter->num_vfs)
64600ea5
AK
920 return -EINVAL;
921
922 vi->vf = vf;
11ac75ed
SP
923 vi->tx_rate = vf_cfg->tx_rate;
924 vi->vlan = vf_cfg->vlan_tag;
64600ea5 925 vi->qos = 0;
11ac75ed 926 memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
64600ea5
AK
927
928 return 0;
929}
930
1da87b7f
AK
931static int be_set_vf_vlan(struct net_device *netdev,
932 int vf, u16 vlan, u8 qos)
933{
934 struct be_adapter *adapter = netdev_priv(netdev);
935 int status = 0;
936
11ac75ed 937 if (!sriov_enabled(adapter))
1da87b7f
AK
938 return -EPERM;
939
11ac75ed 940 if (vf >= adapter->num_vfs || vlan > 4095)
1da87b7f
AK
941 return -EINVAL;
942
943 if (vlan) {
11ac75ed 944 adapter->vf_cfg[vf].vlan_tag = vlan;
1da87b7f
AK
945 adapter->vlans_added++;
946 } else {
11ac75ed 947 adapter->vf_cfg[vf].vlan_tag = 0;
1da87b7f
AK
948 adapter->vlans_added--;
949 }
950
951 status = be_vid_config(adapter, true, vf);
952
953 if (status)
954 dev_info(&adapter->pdev->dev,
955 "VLAN %d config on VF %d failed\n", vlan, vf);
956 return status;
957}
958
e1d18735
AK
959static int be_set_vf_tx_rate(struct net_device *netdev,
960 int vf, int rate)
961{
962 struct be_adapter *adapter = netdev_priv(netdev);
963 int status = 0;
964
11ac75ed 965 if (!sriov_enabled(adapter))
e1d18735
AK
966 return -EPERM;
967
11ac75ed 968 if (vf >= adapter->num_vfs || rate < 0)
e1d18735
AK
969 return -EINVAL;
970
971 if (rate > 10000)
972 rate = 10000;
973
11ac75ed 974 adapter->vf_cfg[vf].tx_rate = rate;
856c4012 975 status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
e1d18735
AK
976
977 if (status)
978 dev_info(&adapter->pdev->dev,
979 "tx rate %d on VF %d failed\n", rate, vf);
980 return status;
981}
982
ac124ff9 983static void be_rx_eqd_update(struct be_adapter *adapter, struct be_rx_obj *rxo)
6b7c5b94 984{
ac124ff9
SP
985 struct be_eq_obj *rx_eq = &rxo->rx_eq;
986 struct be_rx_stats *stats = rx_stats(rxo);
4097f663 987 ulong now = jiffies;
ac124ff9 988 ulong delta = now - stats->rx_jiffies;
ab1594e9
SP
989 u64 pkts;
990 unsigned int start, eqd;
ac124ff9
SP
991
992 if (!rx_eq->enable_aic)
993 return;
6b7c5b94 994
4097f663 995 /* Wrapped around */
3abcdeda
SP
996 if (time_before(now, stats->rx_jiffies)) {
997 stats->rx_jiffies = now;
4097f663
SP
998 return;
999 }
6b7c5b94 1000
ac124ff9
SP
1001 /* Update once a second */
1002 if (delta < HZ)
6b7c5b94
SP
1003 return;
1004
ab1594e9
SP
1005 do {
1006 start = u64_stats_fetch_begin_bh(&stats->sync);
1007 pkts = stats->rx_pkts;
1008 } while (u64_stats_fetch_retry_bh(&stats->sync, start));
1009
68c3e5a7 1010 stats->rx_pps = (unsigned long)(pkts - stats->rx_pkts_prev) / (delta / HZ);
ab1594e9 1011 stats->rx_pkts_prev = pkts;
3abcdeda 1012 stats->rx_jiffies = now;
ac124ff9
SP
1013 eqd = stats->rx_pps / 110000;
1014 eqd = eqd << 3;
1015 if (eqd > rx_eq->max_eqd)
1016 eqd = rx_eq->max_eqd;
1017 if (eqd < rx_eq->min_eqd)
1018 eqd = rx_eq->min_eqd;
1019 if (eqd < 10)
1020 eqd = 0;
1021 if (eqd != rx_eq->cur_eqd) {
1022 be_cmd_modify_eqd(adapter, rx_eq->q.id, eqd);
1023 rx_eq->cur_eqd = eqd;
1024 }
6b7c5b94
SP
1025}
1026
3abcdeda 1027static void be_rx_stats_update(struct be_rx_obj *rxo,
2e588f84 1028 struct be_rx_compl_info *rxcp)
4097f663 1029{
ac124ff9 1030 struct be_rx_stats *stats = rx_stats(rxo);
1ef78abe 1031
ab1594e9 1032 u64_stats_update_begin(&stats->sync);
3abcdeda 1033 stats->rx_compl++;
2e588f84 1034 stats->rx_bytes += rxcp->pkt_size;
3abcdeda 1035 stats->rx_pkts++;
2e588f84 1036 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
3abcdeda 1037 stats->rx_mcast_pkts++;
2e588f84 1038 if (rxcp->err)
ac124ff9 1039 stats->rx_compl_err++;
ab1594e9 1040 u64_stats_update_end(&stats->sync);
4097f663
SP
1041}
1042
2e588f84 1043static inline bool csum_passed(struct be_rx_compl_info *rxcp)
728a9972 1044{
19fad86f
PR
1045 /* L4 checksum is not reliable for non TCP/UDP packets.
1046 * Also ignore ipcksm for ipv6 pkts */
2e588f84
SP
1047 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
1048 (rxcp->ip_csum || rxcp->ipv6);
728a9972
AK
1049}
1050
6b7c5b94 1051static struct be_rx_page_info *
3abcdeda
SP
1052get_rx_page_info(struct be_adapter *adapter,
1053 struct be_rx_obj *rxo,
1054 u16 frag_idx)
6b7c5b94
SP
1055{
1056 struct be_rx_page_info *rx_page_info;
3abcdeda 1057 struct be_queue_info *rxq = &rxo->q;
6b7c5b94 1058
3abcdeda 1059 rx_page_info = &rxo->page_info_tbl[frag_idx];
6b7c5b94
SP
1060 BUG_ON(!rx_page_info->page);
1061
205859a2 1062 if (rx_page_info->last_page_user) {
2b7bcebf
IV
1063 dma_unmap_page(&adapter->pdev->dev,
1064 dma_unmap_addr(rx_page_info, bus),
1065 adapter->big_page_size, DMA_FROM_DEVICE);
205859a2
AK
1066 rx_page_info->last_page_user = false;
1067 }
6b7c5b94
SP
1068
1069 atomic_dec(&rxq->used);
1070 return rx_page_info;
1071}
1072
1073/* Throwaway the data in the Rx completion */
1074static void be_rx_compl_discard(struct be_adapter *adapter,
3abcdeda 1075 struct be_rx_obj *rxo,
2e588f84 1076 struct be_rx_compl_info *rxcp)
6b7c5b94 1077{
3abcdeda 1078 struct be_queue_info *rxq = &rxo->q;
6b7c5b94 1079 struct be_rx_page_info *page_info;
2e588f84 1080 u16 i, num_rcvd = rxcp->num_rcvd;
6b7c5b94 1081
e80d9da6 1082 for (i = 0; i < num_rcvd; i++) {
2e588f84 1083 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
e80d9da6
PR
1084 put_page(page_info->page);
1085 memset(page_info, 0, sizeof(*page_info));
2e588f84 1086 index_inc(&rxcp->rxq_idx, rxq->len);
6b7c5b94
SP
1087 }
1088}
1089
1090/*
1091 * skb_fill_rx_data forms a complete skb for an ether frame
1092 * indicated by rxcp.
1093 */
3abcdeda 1094static void skb_fill_rx_data(struct be_adapter *adapter, struct be_rx_obj *rxo,
2e588f84 1095 struct sk_buff *skb, struct be_rx_compl_info *rxcp)
6b7c5b94 1096{
3abcdeda 1097 struct be_queue_info *rxq = &rxo->q;
6b7c5b94 1098 struct be_rx_page_info *page_info;
2e588f84
SP
1099 u16 i, j;
1100 u16 hdr_len, curr_frag_len, remaining;
6b7c5b94 1101 u8 *start;
6b7c5b94 1102
2e588f84 1103 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
6b7c5b94
SP
1104 start = page_address(page_info->page) + page_info->page_offset;
1105 prefetch(start);
1106
1107 /* Copy data in the first descriptor of this completion */
2e588f84 1108 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
6b7c5b94
SP
1109
1110 /* Copy the header portion into skb_data */
2e588f84 1111 hdr_len = min(BE_HDR_LEN, curr_frag_len);
6b7c5b94
SP
1112 memcpy(skb->data, start, hdr_len);
1113 skb->len = curr_frag_len;
1114 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
1115 /* Complete packet has now been moved to data */
1116 put_page(page_info->page);
1117 skb->data_len = 0;
1118 skb->tail += curr_frag_len;
1119 } else {
1120 skb_shinfo(skb)->nr_frags = 1;
b061b39e 1121 skb_frag_set_page(skb, 0, page_info->page);
6b7c5b94
SP
1122 skb_shinfo(skb)->frags[0].page_offset =
1123 page_info->page_offset + hdr_len;
9e903e08 1124 skb_frag_size_set(&skb_shinfo(skb)->frags[0], curr_frag_len - hdr_len);
6b7c5b94 1125 skb->data_len = curr_frag_len - hdr_len;
bdb28a97 1126 skb->truesize += rx_frag_size;
6b7c5b94
SP
1127 skb->tail += hdr_len;
1128 }
205859a2 1129 page_info->page = NULL;
6b7c5b94 1130
2e588f84
SP
1131 if (rxcp->pkt_size <= rx_frag_size) {
1132 BUG_ON(rxcp->num_rcvd != 1);
1133 return;
6b7c5b94
SP
1134 }
1135
1136 /* More frags present for this completion */
2e588f84
SP
1137 index_inc(&rxcp->rxq_idx, rxq->len);
1138 remaining = rxcp->pkt_size - curr_frag_len;
1139 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
1140 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
1141 curr_frag_len = min(remaining, rx_frag_size);
6b7c5b94 1142
bd46cb6c
AK
1143 /* Coalesce all frags from the same physical page in one slot */
1144 if (page_info->page_offset == 0) {
1145 /* Fresh page */
1146 j++;
b061b39e 1147 skb_frag_set_page(skb, j, page_info->page);
bd46cb6c
AK
1148 skb_shinfo(skb)->frags[j].page_offset =
1149 page_info->page_offset;
9e903e08 1150 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
bd46cb6c
AK
1151 skb_shinfo(skb)->nr_frags++;
1152 } else {
1153 put_page(page_info->page);
1154 }
1155
9e903e08 1156 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
6b7c5b94
SP
1157 skb->len += curr_frag_len;
1158 skb->data_len += curr_frag_len;
bdb28a97 1159 skb->truesize += rx_frag_size;
2e588f84
SP
1160 remaining -= curr_frag_len;
1161 index_inc(&rxcp->rxq_idx, rxq->len);
205859a2 1162 page_info->page = NULL;
6b7c5b94 1163 }
bd46cb6c 1164 BUG_ON(j > MAX_SKB_FRAGS);
6b7c5b94
SP
1165}
1166
5be93b9a 1167/* Process the RX completion indicated by rxcp when GRO is disabled */
6b7c5b94 1168static void be_rx_compl_process(struct be_adapter *adapter,
3abcdeda 1169 struct be_rx_obj *rxo,
2e588f84 1170 struct be_rx_compl_info *rxcp)
6b7c5b94 1171{
6332c8d3 1172 struct net_device *netdev = adapter->netdev;
6b7c5b94 1173 struct sk_buff *skb;
89420424 1174
6332c8d3 1175 skb = netdev_alloc_skb_ip_align(netdev, BE_HDR_LEN);
a058a632 1176 if (unlikely(!skb)) {
ac124ff9 1177 rx_stats(rxo)->rx_drops_no_skbs++;
3abcdeda 1178 be_rx_compl_discard(adapter, rxo, rxcp);
6b7c5b94
SP
1179 return;
1180 }
1181
2e588f84 1182 skb_fill_rx_data(adapter, rxo, skb, rxcp);
6b7c5b94 1183
6332c8d3 1184 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
728a9972 1185 skb->ip_summed = CHECKSUM_UNNECESSARY;
c6ce2f4b
SK
1186 else
1187 skb_checksum_none_assert(skb);
6b7c5b94 1188
6332c8d3 1189 skb->protocol = eth_type_trans(skb, netdev);
4b972914
AK
1190 if (adapter->netdev->features & NETIF_F_RXHASH)
1191 skb->rxhash = rxcp->rss_hash;
1192
6b7c5b94 1193
343e43c0 1194 if (rxcp->vlanf)
4c5102f9
AK
1195 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1196
1197 netif_receive_skb(skb);
6b7c5b94
SP
1198}
1199
5be93b9a
AK
1200/* Process the RX completion indicated by rxcp when GRO is enabled */
1201static void be_rx_compl_process_gro(struct be_adapter *adapter,
3abcdeda 1202 struct be_rx_obj *rxo,
2e588f84 1203 struct be_rx_compl_info *rxcp)
6b7c5b94
SP
1204{
1205 struct be_rx_page_info *page_info;
5be93b9a 1206 struct sk_buff *skb = NULL;
3abcdeda
SP
1207 struct be_queue_info *rxq = &rxo->q;
1208 struct be_eq_obj *eq_obj = &rxo->rx_eq;
2e588f84
SP
1209 u16 remaining, curr_frag_len;
1210 u16 i, j;
3968fa1e 1211
5be93b9a
AK
1212 skb = napi_get_frags(&eq_obj->napi);
1213 if (!skb) {
3abcdeda 1214 be_rx_compl_discard(adapter, rxo, rxcp);
5be93b9a
AK
1215 return;
1216 }
1217
2e588f84
SP
1218 remaining = rxcp->pkt_size;
1219 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
1220 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
6b7c5b94
SP
1221
1222 curr_frag_len = min(remaining, rx_frag_size);
1223
bd46cb6c
AK
1224 /* Coalesce all frags from the same physical page in one slot */
1225 if (i == 0 || page_info->page_offset == 0) {
1226 /* First frag or Fresh page */
1227 j++;
b061b39e 1228 skb_frag_set_page(skb, j, page_info->page);
5be93b9a
AK
1229 skb_shinfo(skb)->frags[j].page_offset =
1230 page_info->page_offset;
9e903e08 1231 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
bd46cb6c
AK
1232 } else {
1233 put_page(page_info->page);
1234 }
9e903e08 1235 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
bdb28a97 1236 skb->truesize += rx_frag_size;
bd46cb6c 1237 remaining -= curr_frag_len;
2e588f84 1238 index_inc(&rxcp->rxq_idx, rxq->len);
6b7c5b94
SP
1239 memset(page_info, 0, sizeof(*page_info));
1240 }
bd46cb6c 1241 BUG_ON(j > MAX_SKB_FRAGS);
6b7c5b94 1242
5be93b9a 1243 skb_shinfo(skb)->nr_frags = j + 1;
2e588f84
SP
1244 skb->len = rxcp->pkt_size;
1245 skb->data_len = rxcp->pkt_size;
5be93b9a 1246 skb->ip_summed = CHECKSUM_UNNECESSARY;
4b972914
AK
1247 if (adapter->netdev->features & NETIF_F_RXHASH)
1248 skb->rxhash = rxcp->rss_hash;
5be93b9a 1249
343e43c0 1250 if (rxcp->vlanf)
4c5102f9
AK
1251 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1252
1253 napi_gro_frags(&eq_obj->napi);
2e588f84
SP
1254}
1255
1256static void be_parse_rx_compl_v1(struct be_adapter *adapter,
1257 struct be_eth_rx_compl *compl,
1258 struct be_rx_compl_info *rxcp)
1259{
1260 rxcp->pkt_size =
1261 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1262 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1263 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1264 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
9ecb42fd 1265 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
2e588f84
SP
1266 rxcp->ip_csum =
1267 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1268 rxcp->l4_csum =
1269 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1270 rxcp->ipv6 =
1271 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
1272 rxcp->rxq_idx =
1273 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, fragndx, compl);
1274 rxcp->num_rcvd =
1275 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1276 rxcp->pkt_type =
1277 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
4b972914
AK
1278 rxcp->rss_hash =
1279 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, rxcp);
15d72184
SP
1280 if (rxcp->vlanf) {
1281 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm,
3c709f8f
DM
1282 compl);
1283 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
1284 compl);
15d72184 1285 }
12004ae9 1286 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, port, compl);
2e588f84
SP
1287}
1288
1289static void be_parse_rx_compl_v0(struct be_adapter *adapter,
1290 struct be_eth_rx_compl *compl,
1291 struct be_rx_compl_info *rxcp)
1292{
1293 rxcp->pkt_size =
1294 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1295 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1296 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1297 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
9ecb42fd 1298 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
2e588f84
SP
1299 rxcp->ip_csum =
1300 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1301 rxcp->l4_csum =
1302 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1303 rxcp->ipv6 =
1304 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
1305 rxcp->rxq_idx =
1306 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, fragndx, compl);
1307 rxcp->num_rcvd =
1308 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1309 rxcp->pkt_type =
1310 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
4b972914
AK
1311 rxcp->rss_hash =
1312 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, rxcp);
15d72184
SP
1313 if (rxcp->vlanf) {
1314 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm,
3c709f8f
DM
1315 compl);
1316 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
1317 compl);
15d72184 1318 }
12004ae9 1319 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, port, compl);
2e588f84
SP
1320}
1321
1322static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1323{
1324 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1325 struct be_rx_compl_info *rxcp = &rxo->rxcp;
1326 struct be_adapter *adapter = rxo->adapter;
6b7c5b94 1327
2e588f84
SP
1328 /* For checking the valid bit it is Ok to use either definition as the
1329 * valid bit is at the same position in both v0 and v1 Rx compl */
1330 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
1331 return NULL;
6b7c5b94 1332
2e588f84
SP
1333 rmb();
1334 be_dws_le_to_cpu(compl, sizeof(*compl));
6b7c5b94 1335
2e588f84
SP
1336 if (adapter->be3_native)
1337 be_parse_rx_compl_v1(adapter, compl, rxcp);
1338 else
1339 be_parse_rx_compl_v0(adapter, compl, rxcp);
6b7c5b94 1340
15d72184
SP
1341 if (rxcp->vlanf) {
1342 /* vlanf could be wrongly set in some cards.
1343 * ignore if vtm is not set */
752961a1 1344 if ((adapter->function_mode & FLEX10_MODE) && !rxcp->vtm)
15d72184 1345 rxcp->vlanf = 0;
6b7c5b94 1346
15d72184 1347 if (!lancer_chip(adapter))
3c709f8f 1348 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
6b7c5b94 1349
939cf306 1350 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
3c709f8f 1351 !adapter->vlan_tag[rxcp->vlan_tag])
15d72184
SP
1352 rxcp->vlanf = 0;
1353 }
2e588f84
SP
1354
1355 /* As the compl has been parsed, reset it; we wont touch it again */
1356 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
6b7c5b94 1357
3abcdeda 1358 queue_tail_inc(&rxo->cq);
6b7c5b94
SP
1359 return rxcp;
1360}
1361
1829b086 1362static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
6b7c5b94 1363{
6b7c5b94 1364 u32 order = get_order(size);
1829b086 1365
6b7c5b94 1366 if (order > 0)
1829b086
ED
1367 gfp |= __GFP_COMP;
1368 return alloc_pages(gfp, order);
6b7c5b94
SP
1369}
1370
1371/*
1372 * Allocate a page, split it to fragments of size rx_frag_size and post as
1373 * receive buffers to BE
1374 */
1829b086 1375static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
6b7c5b94 1376{
3abcdeda
SP
1377 struct be_adapter *adapter = rxo->adapter;
1378 struct be_rx_page_info *page_info_tbl = rxo->page_info_tbl;
26d92f92 1379 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
3abcdeda 1380 struct be_queue_info *rxq = &rxo->q;
6b7c5b94
SP
1381 struct page *pagep = NULL;
1382 struct be_eth_rx_d *rxd;
1383 u64 page_dmaaddr = 0, frag_dmaaddr;
1384 u32 posted, page_offset = 0;
1385
3abcdeda 1386 page_info = &rxo->page_info_tbl[rxq->head];
6b7c5b94
SP
1387 for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1388 if (!pagep) {
1829b086 1389 pagep = be_alloc_pages(adapter->big_page_size, gfp);
6b7c5b94 1390 if (unlikely(!pagep)) {
ac124ff9 1391 rx_stats(rxo)->rx_post_fail++;
6b7c5b94
SP
1392 break;
1393 }
2b7bcebf
IV
1394 page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
1395 0, adapter->big_page_size,
1396 DMA_FROM_DEVICE);
6b7c5b94
SP
1397 page_info->page_offset = 0;
1398 } else {
1399 get_page(pagep);
1400 page_info->page_offset = page_offset + rx_frag_size;
1401 }
1402 page_offset = page_info->page_offset;
1403 page_info->page = pagep;
fac6da5b 1404 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
6b7c5b94
SP
1405 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1406
1407 rxd = queue_head_node(rxq);
1408 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1409 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
6b7c5b94
SP
1410
1411 /* Any space left in the current big page for another frag? */
1412 if ((page_offset + rx_frag_size + rx_frag_size) >
1413 adapter->big_page_size) {
1414 pagep = NULL;
1415 page_info->last_page_user = true;
1416 }
26d92f92
SP
1417
1418 prev_page_info = page_info;
1419 queue_head_inc(rxq);
6b7c5b94
SP
1420 page_info = &page_info_tbl[rxq->head];
1421 }
1422 if (pagep)
26d92f92 1423 prev_page_info->last_page_user = true;
6b7c5b94
SP
1424
1425 if (posted) {
6b7c5b94 1426 atomic_add(posted, &rxq->used);
8788fdc2 1427 be_rxq_notify(adapter, rxq->id, posted);
ea1dae11
SP
1428 } else if (atomic_read(&rxq->used) == 0) {
1429 /* Let be_worker replenish when memory is available */
3abcdeda 1430 rxo->rx_post_starved = true;
6b7c5b94 1431 }
6b7c5b94
SP
1432}
1433
5fb379ee 1434static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
6b7c5b94 1435{
6b7c5b94
SP
1436 struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1437
1438 if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1439 return NULL;
1440
f3eb62d2 1441 rmb();
6b7c5b94
SP
1442 be_dws_le_to_cpu(txcp, sizeof(*txcp));
1443
1444 txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1445
1446 queue_tail_inc(tx_cq);
1447 return txcp;
1448}
1449
3c8def97
SP
1450static u16 be_tx_compl_process(struct be_adapter *adapter,
1451 struct be_tx_obj *txo, u16 last_index)
6b7c5b94 1452{
3c8def97 1453 struct be_queue_info *txq = &txo->q;
a73b796e 1454 struct be_eth_wrb *wrb;
3c8def97 1455 struct sk_buff **sent_skbs = txo->sent_skb_list;
6b7c5b94 1456 struct sk_buff *sent_skb;
ec43b1a6
SP
1457 u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1458 bool unmap_skb_hdr = true;
6b7c5b94 1459
ec43b1a6 1460 sent_skb = sent_skbs[txq->tail];
6b7c5b94 1461 BUG_ON(!sent_skb);
ec43b1a6
SP
1462 sent_skbs[txq->tail] = NULL;
1463
1464 /* skip header wrb */
a73b796e 1465 queue_tail_inc(txq);
6b7c5b94 1466
ec43b1a6 1467 do {
6b7c5b94 1468 cur_index = txq->tail;
a73b796e 1469 wrb = queue_tail_node(txq);
2b7bcebf
IV
1470 unmap_tx_frag(&adapter->pdev->dev, wrb,
1471 (unmap_skb_hdr && skb_headlen(sent_skb)));
ec43b1a6
SP
1472 unmap_skb_hdr = false;
1473
6b7c5b94
SP
1474 num_wrbs++;
1475 queue_tail_inc(txq);
ec43b1a6 1476 } while (cur_index != last_index);
6b7c5b94 1477
6b7c5b94 1478 kfree_skb(sent_skb);
4d586b82 1479 return num_wrbs;
6b7c5b94
SP
1480}
1481
859b1e4e
SP
1482static inline struct be_eq_entry *event_get(struct be_eq_obj *eq_obj)
1483{
1484 struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
1485
1486 if (!eqe->evt)
1487 return NULL;
1488
f3eb62d2 1489 rmb();
859b1e4e
SP
1490 eqe->evt = le32_to_cpu(eqe->evt);
1491 queue_tail_inc(&eq_obj->q);
1492 return eqe;
1493}
1494
1495static int event_handle(struct be_adapter *adapter,
3c8def97
SP
1496 struct be_eq_obj *eq_obj,
1497 bool rearm)
859b1e4e
SP
1498{
1499 struct be_eq_entry *eqe;
1500 u16 num = 0;
1501
1502 while ((eqe = event_get(eq_obj)) != NULL) {
1503 eqe->evt = 0;
1504 num++;
1505 }
1506
1507 /* Deal with any spurious interrupts that come
1508 * without events
1509 */
3c8def97
SP
1510 if (!num)
1511 rearm = true;
1512
1513 be_eq_notify(adapter, eq_obj->q.id, rearm, true, num);
859b1e4e
SP
1514 if (num)
1515 napi_schedule(&eq_obj->napi);
1516
1517 return num;
1518}
1519
1520/* Just read and notify events without processing them.
1521 * Used at the time of destroying event queues */
1522static void be_eq_clean(struct be_adapter *adapter,
1523 struct be_eq_obj *eq_obj)
1524{
1525 struct be_eq_entry *eqe;
1526 u16 num = 0;
1527
1528 while ((eqe = event_get(eq_obj)) != NULL) {
1529 eqe->evt = 0;
1530 num++;
1531 }
1532
1533 if (num)
1534 be_eq_notify(adapter, eq_obj->q.id, false, true, num);
1535}
1536
3abcdeda 1537static void be_rx_q_clean(struct be_adapter *adapter, struct be_rx_obj *rxo)
6b7c5b94
SP
1538{
1539 struct be_rx_page_info *page_info;
3abcdeda
SP
1540 struct be_queue_info *rxq = &rxo->q;
1541 struct be_queue_info *rx_cq = &rxo->cq;
2e588f84 1542 struct be_rx_compl_info *rxcp;
6b7c5b94
SP
1543 u16 tail;
1544
1545 /* First cleanup pending rx completions */
3abcdeda
SP
1546 while ((rxcp = be_rx_compl_get(rxo)) != NULL) {
1547 be_rx_compl_discard(adapter, rxo, rxcp);
64642811 1548 be_cq_notify(adapter, rx_cq->id, false, 1);
6b7c5b94
SP
1549 }
1550
1551 /* Then free posted rx buffer that were not used */
1552 tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
cdab23b7 1553 for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
3abcdeda 1554 page_info = get_rx_page_info(adapter, rxo, tail);
6b7c5b94
SP
1555 put_page(page_info->page);
1556 memset(page_info, 0, sizeof(*page_info));
1557 }
1558 BUG_ON(atomic_read(&rxq->used));
482c9e79 1559 rxq->tail = rxq->head = 0;
6b7c5b94
SP
1560}
1561
3c8def97
SP
1562static void be_tx_compl_clean(struct be_adapter *adapter,
1563 struct be_tx_obj *txo)
6b7c5b94 1564{
3c8def97
SP
1565 struct be_queue_info *tx_cq = &txo->cq;
1566 struct be_queue_info *txq = &txo->q;
a8e9179a 1567 struct be_eth_tx_compl *txcp;
4d586b82 1568 u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
3c8def97 1569 struct sk_buff **sent_skbs = txo->sent_skb_list;
b03388d6
SP
1570 struct sk_buff *sent_skb;
1571 bool dummy_wrb;
a8e9179a
SP
1572
1573 /* Wait for a max of 200ms for all the tx-completions to arrive. */
1574 do {
1575 while ((txcp = be_tx_compl_get(tx_cq))) {
1576 end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl,
1577 wrb_index, txcp);
3c8def97 1578 num_wrbs += be_tx_compl_process(adapter, txo, end_idx);
a8e9179a
SP
1579 cmpl++;
1580 }
1581 if (cmpl) {
1582 be_cq_notify(adapter, tx_cq->id, false, cmpl);
4d586b82 1583 atomic_sub(num_wrbs, &txq->used);
a8e9179a 1584 cmpl = 0;
4d586b82 1585 num_wrbs = 0;
a8e9179a
SP
1586 }
1587
1588 if (atomic_read(&txq->used) == 0 || ++timeo > 200)
1589 break;
1590
1591 mdelay(1);
1592 } while (true);
1593
1594 if (atomic_read(&txq->used))
1595 dev_err(&adapter->pdev->dev, "%d pending tx-completions\n",
1596 atomic_read(&txq->used));
b03388d6
SP
1597
1598 /* free posted tx for which compls will never arrive */
1599 while (atomic_read(&txq->used)) {
1600 sent_skb = sent_skbs[txq->tail];
1601 end_idx = txq->tail;
1602 index_adv(&end_idx,
fe6d2a38
SP
1603 wrb_cnt_for_skb(adapter, sent_skb, &dummy_wrb) - 1,
1604 txq->len);
3c8def97 1605 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
4d586b82 1606 atomic_sub(num_wrbs, &txq->used);
b03388d6 1607 }
6b7c5b94
SP
1608}
1609
5fb379ee
SP
1610static void be_mcc_queues_destroy(struct be_adapter *adapter)
1611{
1612 struct be_queue_info *q;
5fb379ee 1613
8788fdc2 1614 q = &adapter->mcc_obj.q;
5fb379ee 1615 if (q->created)
8788fdc2 1616 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
5fb379ee
SP
1617 be_queue_free(adapter, q);
1618
8788fdc2 1619 q = &adapter->mcc_obj.cq;
5fb379ee 1620 if (q->created)
8788fdc2 1621 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
5fb379ee
SP
1622 be_queue_free(adapter, q);
1623}
1624
1625/* Must be called only after TX qs are created as MCC shares TX EQ */
1626static int be_mcc_queues_create(struct be_adapter *adapter)
1627{
1628 struct be_queue_info *q, *cq;
5fb379ee
SP
1629
1630 /* Alloc MCC compl queue */
8788fdc2 1631 cq = &adapter->mcc_obj.cq;
5fb379ee 1632 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
efd2e40a 1633 sizeof(struct be_mcc_compl)))
5fb379ee
SP
1634 goto err;
1635
1636 /* Ask BE to create MCC compl queue; share TX's eq */
8788fdc2 1637 if (be_cmd_cq_create(adapter, cq, &adapter->tx_eq.q, false, true, 0))
5fb379ee
SP
1638 goto mcc_cq_free;
1639
1640 /* Alloc MCC queue */
8788fdc2 1641 q = &adapter->mcc_obj.q;
5fb379ee
SP
1642 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
1643 goto mcc_cq_destroy;
1644
1645 /* Ask BE to create MCC queue */
8788fdc2 1646 if (be_cmd_mccq_create(adapter, q, cq))
5fb379ee
SP
1647 goto mcc_q_free;
1648
1649 return 0;
1650
1651mcc_q_free:
1652 be_queue_free(adapter, q);
1653mcc_cq_destroy:
8788fdc2 1654 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
5fb379ee
SP
1655mcc_cq_free:
1656 be_queue_free(adapter, cq);
1657err:
1658 return -1;
1659}
1660
6b7c5b94
SP
1661static void be_tx_queues_destroy(struct be_adapter *adapter)
1662{
1663 struct be_queue_info *q;
3c8def97
SP
1664 struct be_tx_obj *txo;
1665 u8 i;
6b7c5b94 1666
3c8def97
SP
1667 for_all_tx_queues(adapter, txo, i) {
1668 q = &txo->q;
1669 if (q->created)
1670 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
1671 be_queue_free(adapter, q);
6b7c5b94 1672
3c8def97
SP
1673 q = &txo->cq;
1674 if (q->created)
1675 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1676 be_queue_free(adapter, q);
1677 }
6b7c5b94 1678
859b1e4e
SP
1679 /* Clear any residual events */
1680 be_eq_clean(adapter, &adapter->tx_eq);
1681
6b7c5b94
SP
1682 q = &adapter->tx_eq.q;
1683 if (q->created)
8788fdc2 1684 be_cmd_q_destroy(adapter, q, QTYPE_EQ);
6b7c5b94
SP
1685 be_queue_free(adapter, q);
1686}
1687
dafc0fe3
SP
1688static int be_num_txqs_want(struct be_adapter *adapter)
1689{
11ac75ed 1690 if (sriov_enabled(adapter) || be_is_mc(adapter) ||
dafc0fe3
SP
1691 lancer_chip(adapter) || !be_physfn(adapter) ||
1692 adapter->generation == BE_GEN2)
1693 return 1;
1694 else
1695 return MAX_TX_QS;
1696}
1697
3c8def97 1698/* One TX event queue is shared by all TX compl qs */
6b7c5b94
SP
1699static int be_tx_queues_create(struct be_adapter *adapter)
1700{
1701 struct be_queue_info *eq, *q, *cq;
3c8def97
SP
1702 struct be_tx_obj *txo;
1703 u8 i;
6b7c5b94 1704
dafc0fe3 1705 adapter->num_tx_qs = be_num_txqs_want(adapter);
3bb62f4f
PR
1706 if (adapter->num_tx_qs != MAX_TX_QS) {
1707 rtnl_lock();
dafc0fe3
SP
1708 netif_set_real_num_tx_queues(adapter->netdev,
1709 adapter->num_tx_qs);
3bb62f4f
PR
1710 rtnl_unlock();
1711 }
dafc0fe3 1712
6b7c5b94
SP
1713 adapter->tx_eq.max_eqd = 0;
1714 adapter->tx_eq.min_eqd = 0;
1715 adapter->tx_eq.cur_eqd = 96;
1716 adapter->tx_eq.enable_aic = false;
3c8def97 1717
6b7c5b94 1718 eq = &adapter->tx_eq.q;
3c8def97
SP
1719 if (be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1720 sizeof(struct be_eq_entry)))
6b7c5b94
SP
1721 return -1;
1722
8788fdc2 1723 if (be_cmd_eq_create(adapter, eq, adapter->tx_eq.cur_eqd))
3c8def97 1724 goto err;
ecd62107 1725 adapter->tx_eq.eq_idx = adapter->eq_next_idx++;
fe6d2a38 1726
3c8def97
SP
1727 for_all_tx_queues(adapter, txo, i) {
1728 cq = &txo->cq;
1729 if (be_queue_alloc(adapter, cq, TX_CQ_LEN,
6b7c5b94 1730 sizeof(struct be_eth_tx_compl)))
3c8def97 1731 goto err;
6b7c5b94 1732
3c8def97
SP
1733 if (be_cmd_cq_create(adapter, cq, eq, false, false, 3))
1734 goto err;
6b7c5b94 1735
3c8def97
SP
1736 q = &txo->q;
1737 if (be_queue_alloc(adapter, q, TX_Q_LEN,
1738 sizeof(struct be_eth_wrb)))
1739 goto err;
3c8def97 1740 }
6b7c5b94
SP
1741 return 0;
1742
3c8def97
SP
1743err:
1744 be_tx_queues_destroy(adapter);
6b7c5b94
SP
1745 return -1;
1746}
1747
1748static void be_rx_queues_destroy(struct be_adapter *adapter)
1749{
1750 struct be_queue_info *q;
3abcdeda
SP
1751 struct be_rx_obj *rxo;
1752 int i;
1753
1754 for_all_rx_queues(adapter, rxo, i) {
482c9e79 1755 be_queue_free(adapter, &rxo->q);
3abcdeda
SP
1756
1757 q = &rxo->cq;
1758 if (q->created)
1759 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1760 be_queue_free(adapter, q);
1761
3abcdeda 1762 q = &rxo->rx_eq.q;
482c9e79 1763 if (q->created)
3abcdeda 1764 be_cmd_q_destroy(adapter, q, QTYPE_EQ);
3abcdeda 1765 be_queue_free(adapter, q);
6b7c5b94 1766 }
6b7c5b94
SP
1767}
1768
ac6a0c4a
SP
1769static u32 be_num_rxqs_want(struct be_adapter *adapter)
1770{
c814fd36 1771 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
11ac75ed
SP
1772 !sriov_enabled(adapter) && be_physfn(adapter) &&
1773 !be_is_mc(adapter)) {
ac6a0c4a
SP
1774 return 1 + MAX_RSS_QS; /* one default non-RSS queue */
1775 } else {
1776 dev_warn(&adapter->pdev->dev,
1777 "No support for multiple RX queues\n");
1778 return 1;
1779 }
1780}
1781
6b7c5b94
SP
1782static int be_rx_queues_create(struct be_adapter *adapter)
1783{
1784 struct be_queue_info *eq, *q, *cq;
3abcdeda
SP
1785 struct be_rx_obj *rxo;
1786 int rc, i;
6b7c5b94 1787
ac6a0c4a
SP
1788 adapter->num_rx_qs = min(be_num_rxqs_want(adapter),
1789 msix_enabled(adapter) ?
1790 adapter->num_msix_vec - 1 : 1);
1791 if (adapter->num_rx_qs != MAX_RX_QS)
1792 dev_warn(&adapter->pdev->dev,
1793 "Can create only %d RX queues", adapter->num_rx_qs);
1794
6b7c5b94 1795 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
3abcdeda
SP
1796 for_all_rx_queues(adapter, rxo, i) {
1797 rxo->adapter = adapter;
1798 rxo->rx_eq.max_eqd = BE_MAX_EQD;
1799 rxo->rx_eq.enable_aic = true;
1800
1801 /* EQ */
1802 eq = &rxo->rx_eq.q;
1803 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1804 sizeof(struct be_eq_entry));
1805 if (rc)
1806 goto err;
1807
1808 rc = be_cmd_eq_create(adapter, eq, rxo->rx_eq.cur_eqd);
1809 if (rc)
1810 goto err;
1811
ecd62107 1812 rxo->rx_eq.eq_idx = adapter->eq_next_idx++;
fe6d2a38 1813
3abcdeda
SP
1814 /* CQ */
1815 cq = &rxo->cq;
1816 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
1817 sizeof(struct be_eth_rx_compl));
1818 if (rc)
1819 goto err;
1820
1821 rc = be_cmd_cq_create(adapter, cq, eq, false, false, 3);
1822 if (rc)
1823 goto err;
482c9e79
SP
1824
1825 /* Rx Q - will be created in be_open() */
3abcdeda
SP
1826 q = &rxo->q;
1827 rc = be_queue_alloc(adapter, q, RX_Q_LEN,
1828 sizeof(struct be_eth_rx_d));
1829 if (rc)
1830 goto err;
1831
3abcdeda 1832 }
6b7c5b94
SP
1833
1834 return 0;
3abcdeda
SP
1835err:
1836 be_rx_queues_destroy(adapter);
1837 return -1;
6b7c5b94 1838}
6b7c5b94 1839
fe6d2a38 1840static bool event_peek(struct be_eq_obj *eq_obj)
b628bde2 1841{
fe6d2a38
SP
1842 struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
1843 if (!eqe->evt)
1844 return false;
1845 else
1846 return true;
b628bde2
SP
1847}
1848
6b7c5b94
SP
1849static irqreturn_t be_intx(int irq, void *dev)
1850{
1851 struct be_adapter *adapter = dev;
3abcdeda 1852 struct be_rx_obj *rxo;
fe6d2a38 1853 int isr, i, tx = 0 , rx = 0;
6b7c5b94 1854
fe6d2a38
SP
1855 if (lancer_chip(adapter)) {
1856 if (event_peek(&adapter->tx_eq))
3c8def97 1857 tx = event_handle(adapter, &adapter->tx_eq, false);
fe6d2a38
SP
1858 for_all_rx_queues(adapter, rxo, i) {
1859 if (event_peek(&rxo->rx_eq))
3c8def97 1860 rx |= event_handle(adapter, &rxo->rx_eq, true);
fe6d2a38 1861 }
6b7c5b94 1862
fe6d2a38
SP
1863 if (!(tx || rx))
1864 return IRQ_NONE;
3abcdeda 1865
fe6d2a38
SP
1866 } else {
1867 isr = ioread32(adapter->csr + CEV_ISR0_OFFSET +
1868 (adapter->tx_eq.q.id / 8) * CEV_ISR_SIZE);
1869 if (!isr)
1870 return IRQ_NONE;
1871
ecd62107 1872 if ((1 << adapter->tx_eq.eq_idx & isr))
3c8def97 1873 event_handle(adapter, &adapter->tx_eq, false);
fe6d2a38
SP
1874
1875 for_all_rx_queues(adapter, rxo, i) {
ecd62107 1876 if ((1 << rxo->rx_eq.eq_idx & isr))
3c8def97 1877 event_handle(adapter, &rxo->rx_eq, true);
fe6d2a38 1878 }
3abcdeda 1879 }
c001c213 1880
8788fdc2 1881 return IRQ_HANDLED;
6b7c5b94
SP
1882}
1883
1884static irqreturn_t be_msix_rx(int irq, void *dev)
1885{
3abcdeda
SP
1886 struct be_rx_obj *rxo = dev;
1887 struct be_adapter *adapter = rxo->adapter;
6b7c5b94 1888
3c8def97 1889 event_handle(adapter, &rxo->rx_eq, true);
6b7c5b94
SP
1890
1891 return IRQ_HANDLED;
1892}
1893
5fb379ee 1894static irqreturn_t be_msix_tx_mcc(int irq, void *dev)
6b7c5b94
SP
1895{
1896 struct be_adapter *adapter = dev;
1897
3c8def97 1898 event_handle(adapter, &adapter->tx_eq, false);
6b7c5b94
SP
1899
1900 return IRQ_HANDLED;
1901}
1902
2e588f84 1903static inline bool do_gro(struct be_rx_compl_info *rxcp)
6b7c5b94 1904{
2e588f84 1905 return (rxcp->tcpf && !rxcp->err) ? true : false;
6b7c5b94
SP
1906}
1907
49b05221 1908static int be_poll_rx(struct napi_struct *napi, int budget)
6b7c5b94
SP
1909{
1910 struct be_eq_obj *rx_eq = container_of(napi, struct be_eq_obj, napi);
3abcdeda
SP
1911 struct be_rx_obj *rxo = container_of(rx_eq, struct be_rx_obj, rx_eq);
1912 struct be_adapter *adapter = rxo->adapter;
1913 struct be_queue_info *rx_cq = &rxo->cq;
2e588f84 1914 struct be_rx_compl_info *rxcp;
6b7c5b94
SP
1915 u32 work_done;
1916
ac124ff9 1917 rx_stats(rxo)->rx_polls++;
6b7c5b94 1918 for (work_done = 0; work_done < budget; work_done++) {
3abcdeda 1919 rxcp = be_rx_compl_get(rxo);
6b7c5b94
SP
1920 if (!rxcp)
1921 break;
1922
12004ae9
SP
1923 /* Is it a flush compl that has no data */
1924 if (unlikely(rxcp->num_rcvd == 0))
1925 goto loop_continue;
1926
1927 /* Discard compl with partial DMA Lancer B0 */
1928 if (unlikely(!rxcp->pkt_size)) {
1929 be_rx_compl_discard(adapter, rxo, rxcp);
1930 goto loop_continue;
1931 }
1932
1933 /* On BE drop pkts that arrive due to imperfect filtering in
1934 * promiscuous mode on some skews
1935 */
1936 if (unlikely(rxcp->port != adapter->port_num &&
1937 !lancer_chip(adapter))) {
009dd872 1938 be_rx_compl_discard(adapter, rxo, rxcp);
12004ae9 1939 goto loop_continue;
64642811 1940 }
009dd872 1941
12004ae9
SP
1942 if (do_gro(rxcp))
1943 be_rx_compl_process_gro(adapter, rxo, rxcp);
1944 else
1945 be_rx_compl_process(adapter, rxo, rxcp);
1946loop_continue:
2e588f84 1947 be_rx_stats_update(rxo, rxcp);
6b7c5b94
SP
1948 }
1949
9372cacb
PR
1950 be_cq_notify(adapter, rx_cq->id, false, work_done);
1951
6b7c5b94 1952 /* Refill the queue */
857c9905 1953 if (work_done && atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
1829b086 1954 be_post_rx_frags(rxo, GFP_ATOMIC);
6b7c5b94
SP
1955
1956 /* All consumed */
1957 if (work_done < budget) {
1958 napi_complete(napi);
9372cacb
PR
1959 /* Arm CQ */
1960 be_cq_notify(adapter, rx_cq->id, true, 0);
6b7c5b94
SP
1961 }
1962 return work_done;
1963}
1964
f31e50a8
SP
1965/* As TX and MCC share the same EQ check for both TX and MCC completions.
1966 * For TX/MCC we don't honour budget; consume everything
1967 */
1968static int be_poll_tx_mcc(struct napi_struct *napi, int budget)
6b7c5b94 1969{
f31e50a8
SP
1970 struct be_eq_obj *tx_eq = container_of(napi, struct be_eq_obj, napi);
1971 struct be_adapter *adapter =
1972 container_of(tx_eq, struct be_adapter, tx_eq);
93c86700 1973 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
3c8def97 1974 struct be_tx_obj *txo;
6b7c5b94 1975 struct be_eth_tx_compl *txcp;
3c8def97
SP
1976 int tx_compl, mcc_compl, status = 0;
1977 u8 i;
1978 u16 num_wrbs;
1979
1980 for_all_tx_queues(adapter, txo, i) {
1981 tx_compl = 0;
1982 num_wrbs = 0;
1983 while ((txcp = be_tx_compl_get(&txo->cq))) {
1984 num_wrbs += be_tx_compl_process(adapter, txo,
1985 AMAP_GET_BITS(struct amap_eth_tx_compl,
1986 wrb_index, txcp));
1987 tx_compl++;
1988 }
1989 if (tx_compl) {
1990 be_cq_notify(adapter, txo->cq.id, true, tx_compl);
1991
1992 atomic_sub(num_wrbs, &txo->q.used);
6b7c5b94 1993
3c8def97
SP
1994 /* As Tx wrbs have been freed up, wake up netdev queue
1995 * if it was stopped due to lack of tx wrbs. */
1996 if (__netif_subqueue_stopped(adapter->netdev, i) &&
1997 atomic_read(&txo->q.used) < txo->q.len / 2) {
1998 netif_wake_subqueue(adapter->netdev, i);
1999 }
2000
ab1594e9 2001 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
ac124ff9 2002 tx_stats(txo)->tx_compl += tx_compl;
ab1594e9 2003 u64_stats_update_end(&tx_stats(txo)->sync_compl);
3c8def97 2004 }
6b7c5b94
SP
2005 }
2006
f31e50a8
SP
2007 mcc_compl = be_process_mcc(adapter, &status);
2008
f31e50a8 2009 if (mcc_compl) {
f31e50a8
SP
2010 be_cq_notify(adapter, mcc_obj->cq.id, true, mcc_compl);
2011 }
2012
3c8def97 2013 napi_complete(napi);
6b7c5b94 2014
93c86700
PR
2015 /* Arm CQ again to regenerate EQEs for Lancer in INTx mode */
2016 if (lancer_chip(adapter) && !msix_enabled(adapter)) {
2017 for_all_tx_queues(adapter, txo, i)
2018 be_cq_notify(adapter, txo->cq.id, true, 0);
2019
2020 be_cq_notify(adapter, mcc_obj->cq.id, true, 0);
2021 }
2022
3c8def97 2023 be_eq_notify(adapter, tx_eq->q.id, true, false, 0);
ab1594e9 2024 adapter->drv_stats.tx_events++;
6b7c5b94
SP
2025 return 1;
2026}
2027
d053de91 2028void be_detect_dump_ue(struct be_adapter *adapter)
7c185276 2029{
e1cfb67a
PR
2030 u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
2031 u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
7c185276
AK
2032 u32 i;
2033
72f02485
SP
2034 if (adapter->eeh_err || adapter->ue_detected)
2035 return;
2036
e1cfb67a
PR
2037 if (lancer_chip(adapter)) {
2038 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2039 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2040 sliport_err1 = ioread32(adapter->db +
2041 SLIPORT_ERROR1_OFFSET);
2042 sliport_err2 = ioread32(adapter->db +
2043 SLIPORT_ERROR2_OFFSET);
2044 }
2045 } else {
2046 pci_read_config_dword(adapter->pdev,
2047 PCICFG_UE_STATUS_LOW, &ue_lo);
2048 pci_read_config_dword(adapter->pdev,
2049 PCICFG_UE_STATUS_HIGH, &ue_hi);
2050 pci_read_config_dword(adapter->pdev,
2051 PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask);
2052 pci_read_config_dword(adapter->pdev,
2053 PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
2054
2055 ue_lo = (ue_lo & (~ue_lo_mask));
2056 ue_hi = (ue_hi & (~ue_hi_mask));
2057 }
7c185276 2058
e1cfb67a
PR
2059 if (ue_lo || ue_hi ||
2060 sliport_status & SLIPORT_STATUS_ERR_MASK) {
d053de91 2061 adapter->ue_detected = true;
7acc2087 2062 adapter->eeh_err = true;
434b3648
SP
2063 dev_err(&adapter->pdev->dev,
2064 "Unrecoverable error in the card\n");
d053de91
AK
2065 }
2066
e1cfb67a
PR
2067 if (ue_lo) {
2068 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
2069 if (ue_lo & 1)
7c185276
AK
2070 dev_err(&adapter->pdev->dev,
2071 "UE: %s bit set\n", ue_status_low_desc[i]);
2072 }
2073 }
e1cfb67a
PR
2074 if (ue_hi) {
2075 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
2076 if (ue_hi & 1)
7c185276
AK
2077 dev_err(&adapter->pdev->dev,
2078 "UE: %s bit set\n", ue_status_hi_desc[i]);
2079 }
2080 }
2081
e1cfb67a
PR
2082 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2083 dev_err(&adapter->pdev->dev,
2084 "sliport status 0x%x\n", sliport_status);
2085 dev_err(&adapter->pdev->dev,
2086 "sliport error1 0x%x\n", sliport_err1);
2087 dev_err(&adapter->pdev->dev,
2088 "sliport error2 0x%x\n", sliport_err2);
2089 }
7c185276
AK
2090}
2091
8d56ff11
SP
2092static void be_msix_disable(struct be_adapter *adapter)
2093{
ac6a0c4a 2094 if (msix_enabled(adapter)) {
8d56ff11 2095 pci_disable_msix(adapter->pdev);
ac6a0c4a 2096 adapter->num_msix_vec = 0;
3abcdeda
SP
2097 }
2098}
2099
6b7c5b94
SP
2100static void be_msix_enable(struct be_adapter *adapter)
2101{
3abcdeda 2102#define BE_MIN_MSIX_VECTORS (1 + 1) /* Rx + Tx */
ac6a0c4a 2103 int i, status, num_vec;
6b7c5b94 2104
ac6a0c4a 2105 num_vec = be_num_rxqs_want(adapter) + 1;
3abcdeda 2106
ac6a0c4a 2107 for (i = 0; i < num_vec; i++)
6b7c5b94
SP
2108 adapter->msix_entries[i].entry = i;
2109
ac6a0c4a 2110 status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
3abcdeda
SP
2111 if (status == 0) {
2112 goto done;
2113 } else if (status >= BE_MIN_MSIX_VECTORS) {
ac6a0c4a 2114 num_vec = status;
3abcdeda 2115 if (pci_enable_msix(adapter->pdev, adapter->msix_entries,
ac6a0c4a 2116 num_vec) == 0)
3abcdeda 2117 goto done;
3abcdeda
SP
2118 }
2119 return;
2120done:
ac6a0c4a
SP
2121 adapter->num_msix_vec = num_vec;
2122 return;
6b7c5b94
SP
2123}
2124
f9449ab7 2125static int be_sriov_enable(struct be_adapter *adapter)
ba343c77 2126{
344dbf10 2127 be_check_sriov_fn_type(adapter);
11ac75ed 2128
6dedec81 2129#ifdef CONFIG_PCI_IOV
ba343c77 2130 if (be_physfn(adapter) && num_vfs) {
81be8f0a 2131 int status, pos;
11ac75ed 2132 u16 dev_vfs;
81be8f0a
AK
2133
2134 pos = pci_find_ext_capability(adapter->pdev,
2135 PCI_EXT_CAP_ID_SRIOV);
2136 pci_read_config_word(adapter->pdev,
11ac75ed 2137 pos + PCI_SRIOV_TOTAL_VF, &dev_vfs);
81be8f0a 2138
11ac75ed
SP
2139 adapter->num_vfs = min_t(u16, num_vfs, dev_vfs);
2140 if (adapter->num_vfs != num_vfs)
81be8f0a 2141 dev_info(&adapter->pdev->dev,
11ac75ed
SP
2142 "Device supports %d VFs and not %d\n",
2143 adapter->num_vfs, num_vfs);
6dedec81 2144
11ac75ed
SP
2145 status = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
2146 if (status)
2147 adapter->num_vfs = 0;
f9449ab7 2148
11ac75ed 2149 if (adapter->num_vfs) {
f9449ab7
SP
2150 adapter->vf_cfg = kcalloc(num_vfs,
2151 sizeof(struct be_vf_cfg),
2152 GFP_KERNEL);
2153 if (!adapter->vf_cfg)
2154 return -ENOMEM;
2155 }
ba343c77
SB
2156 }
2157#endif
f9449ab7 2158 return 0;
ba343c77
SB
2159}
2160
2161static void be_sriov_disable(struct be_adapter *adapter)
2162{
2163#ifdef CONFIG_PCI_IOV
11ac75ed 2164 if (sriov_enabled(adapter)) {
ba343c77 2165 pci_disable_sriov(adapter->pdev);
f9449ab7 2166 kfree(adapter->vf_cfg);
11ac75ed 2167 adapter->num_vfs = 0;
ba343c77
SB
2168 }
2169#endif
2170}
2171
fe6d2a38
SP
2172static inline int be_msix_vec_get(struct be_adapter *adapter,
2173 struct be_eq_obj *eq_obj)
6b7c5b94 2174{
ecd62107 2175 return adapter->msix_entries[eq_obj->eq_idx].vector;
6b7c5b94
SP
2176}
2177
b628bde2
SP
2178static int be_request_irq(struct be_adapter *adapter,
2179 struct be_eq_obj *eq_obj,
3abcdeda 2180 void *handler, char *desc, void *context)
6b7c5b94
SP
2181{
2182 struct net_device *netdev = adapter->netdev;
b628bde2
SP
2183 int vec;
2184
2185 sprintf(eq_obj->desc, "%s-%s", netdev->name, desc);
fe6d2a38 2186 vec = be_msix_vec_get(adapter, eq_obj);
3abcdeda 2187 return request_irq(vec, handler, 0, eq_obj->desc, context);
b628bde2
SP
2188}
2189
3abcdeda
SP
2190static void be_free_irq(struct be_adapter *adapter, struct be_eq_obj *eq_obj,
2191 void *context)
b628bde2 2192{
fe6d2a38 2193 int vec = be_msix_vec_get(adapter, eq_obj);
3abcdeda 2194 free_irq(vec, context);
b628bde2 2195}
6b7c5b94 2196
b628bde2
SP
2197static int be_msix_register(struct be_adapter *adapter)
2198{
3abcdeda
SP
2199 struct be_rx_obj *rxo;
2200 int status, i;
2201 char qname[10];
b628bde2 2202
3abcdeda
SP
2203 status = be_request_irq(adapter, &adapter->tx_eq, be_msix_tx_mcc, "tx",
2204 adapter);
6b7c5b94
SP
2205 if (status)
2206 goto err;
2207
3abcdeda
SP
2208 for_all_rx_queues(adapter, rxo, i) {
2209 sprintf(qname, "rxq%d", i);
2210 status = be_request_irq(adapter, &rxo->rx_eq, be_msix_rx,
2211 qname, rxo);
2212 if (status)
2213 goto err_msix;
2214 }
b628bde2 2215
6b7c5b94 2216 return 0;
b628bde2 2217
3abcdeda
SP
2218err_msix:
2219 be_free_irq(adapter, &adapter->tx_eq, adapter);
2220
2221 for (i--, rxo = &adapter->rx_obj[i]; i >= 0; i--, rxo--)
2222 be_free_irq(adapter, &rxo->rx_eq, rxo);
2223
6b7c5b94
SP
2224err:
2225 dev_warn(&adapter->pdev->dev,
2226 "MSIX Request IRQ failed - err %d\n", status);
ac6a0c4a 2227 be_msix_disable(adapter);
6b7c5b94
SP
2228 return status;
2229}
2230
2231static int be_irq_register(struct be_adapter *adapter)
2232{
2233 struct net_device *netdev = adapter->netdev;
2234 int status;
2235
ac6a0c4a 2236 if (msix_enabled(adapter)) {
6b7c5b94
SP
2237 status = be_msix_register(adapter);
2238 if (status == 0)
2239 goto done;
ba343c77
SB
2240 /* INTx is not supported for VF */
2241 if (!be_physfn(adapter))
2242 return status;
6b7c5b94
SP
2243 }
2244
2245 /* INTx */
2246 netdev->irq = adapter->pdev->irq;
2247 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
2248 adapter);
2249 if (status) {
2250 dev_err(&adapter->pdev->dev,
2251 "INTx request IRQ failed - err %d\n", status);
2252 return status;
2253 }
2254done:
2255 adapter->isr_registered = true;
2256 return 0;
2257}
2258
2259static void be_irq_unregister(struct be_adapter *adapter)
2260{
2261 struct net_device *netdev = adapter->netdev;
3abcdeda
SP
2262 struct be_rx_obj *rxo;
2263 int i;
6b7c5b94
SP
2264
2265 if (!adapter->isr_registered)
2266 return;
2267
2268 /* INTx */
ac6a0c4a 2269 if (!msix_enabled(adapter)) {
6b7c5b94
SP
2270 free_irq(netdev->irq, adapter);
2271 goto done;
2272 }
2273
2274 /* MSIx */
3abcdeda
SP
2275 be_free_irq(adapter, &adapter->tx_eq, adapter);
2276
2277 for_all_rx_queues(adapter, rxo, i)
2278 be_free_irq(adapter, &rxo->rx_eq, rxo);
2279
6b7c5b94
SP
2280done:
2281 adapter->isr_registered = false;
6b7c5b94
SP
2282}
2283
482c9e79
SP
2284static void be_rx_queues_clear(struct be_adapter *adapter)
2285{
2286 struct be_queue_info *q;
2287 struct be_rx_obj *rxo;
2288 int i;
2289
2290 for_all_rx_queues(adapter, rxo, i) {
2291 q = &rxo->q;
2292 if (q->created) {
2293 be_cmd_rxq_destroy(adapter, q);
2294 /* After the rxq is invalidated, wait for a grace time
2295 * of 1ms for all dma to end and the flush compl to
2296 * arrive
2297 */
2298 mdelay(1);
2299 be_rx_q_clean(adapter, rxo);
2300 }
2301
2302 /* Clear any residual events */
2303 q = &rxo->rx_eq.q;
2304 if (q->created)
2305 be_eq_clean(adapter, &rxo->rx_eq);
2306 }
2307}
2308
889cd4b2
SP
2309static int be_close(struct net_device *netdev)
2310{
2311 struct be_adapter *adapter = netdev_priv(netdev);
3abcdeda 2312 struct be_rx_obj *rxo;
3c8def97 2313 struct be_tx_obj *txo;
889cd4b2 2314 struct be_eq_obj *tx_eq = &adapter->tx_eq;
3abcdeda 2315 int vec, i;
889cd4b2 2316
889cd4b2
SP
2317 be_async_mcc_disable(adapter);
2318
fe6d2a38
SP
2319 if (!lancer_chip(adapter))
2320 be_intr_set(adapter, false);
889cd4b2 2321
63fcb27f
PR
2322 for_all_rx_queues(adapter, rxo, i)
2323 napi_disable(&rxo->rx_eq.napi);
2324
2325 napi_disable(&tx_eq->napi);
2326
2327 if (lancer_chip(adapter)) {
63fcb27f
PR
2328 be_cq_notify(adapter, adapter->mcc_obj.cq.id, false, 0);
2329 for_all_rx_queues(adapter, rxo, i)
2330 be_cq_notify(adapter, rxo->cq.id, false, 0);
3c8def97
SP
2331 for_all_tx_queues(adapter, txo, i)
2332 be_cq_notify(adapter, txo->cq.id, false, 0);
63fcb27f
PR
2333 }
2334
ac6a0c4a 2335 if (msix_enabled(adapter)) {
fe6d2a38 2336 vec = be_msix_vec_get(adapter, tx_eq);
889cd4b2 2337 synchronize_irq(vec);
3abcdeda
SP
2338
2339 for_all_rx_queues(adapter, rxo, i) {
fe6d2a38 2340 vec = be_msix_vec_get(adapter, &rxo->rx_eq);
3abcdeda
SP
2341 synchronize_irq(vec);
2342 }
889cd4b2
SP
2343 } else {
2344 synchronize_irq(netdev->irq);
2345 }
2346 be_irq_unregister(adapter);
2347
889cd4b2
SP
2348 /* Wait for all pending tx completions to arrive so that
2349 * all tx skbs are freed.
2350 */
3c8def97
SP
2351 for_all_tx_queues(adapter, txo, i)
2352 be_tx_compl_clean(adapter, txo);
889cd4b2 2353
482c9e79
SP
2354 be_rx_queues_clear(adapter);
2355 return 0;
2356}
2357
2358static int be_rx_queues_setup(struct be_adapter *adapter)
2359{
2360 struct be_rx_obj *rxo;
e9008ee9
PR
2361 int rc, i, j;
2362 u8 rsstable[128];
482c9e79
SP
2363
2364 for_all_rx_queues(adapter, rxo, i) {
2365 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
2366 rx_frag_size, BE_MAX_JUMBO_FRAME_SIZE,
2367 adapter->if_handle,
2368 (i > 0) ? 1 : 0/* rss enable */, &rxo->rss_id);
2369 if (rc)
2370 return rc;
2371 }
2372
2373 if (be_multi_rxq(adapter)) {
e9008ee9
PR
2374 for (j = 0; j < 128; j += adapter->num_rx_qs - 1) {
2375 for_all_rss_queues(adapter, rxo, i) {
2376 if ((j + i) >= 128)
2377 break;
2378 rsstable[j + i] = rxo->rss_id;
2379 }
2380 }
2381 rc = be_cmd_rss_config(adapter, rsstable, 128);
482c9e79 2382
482c9e79
SP
2383 if (rc)
2384 return rc;
2385 }
2386
2387 /* First time posting */
2388 for_all_rx_queues(adapter, rxo, i) {
2389 be_post_rx_frags(rxo, GFP_KERNEL);
2390 napi_enable(&rxo->rx_eq.napi);
2391 }
889cd4b2
SP
2392 return 0;
2393}
2394
6b7c5b94
SP
2395static int be_open(struct net_device *netdev)
2396{
2397 struct be_adapter *adapter = netdev_priv(netdev);
6b7c5b94 2398 struct be_eq_obj *tx_eq = &adapter->tx_eq;
3abcdeda 2399 struct be_rx_obj *rxo;
3abcdeda 2400 int status, i;
5fb379ee 2401
482c9e79
SP
2402 status = be_rx_queues_setup(adapter);
2403 if (status)
2404 goto err;
2405
5fb379ee
SP
2406 napi_enable(&tx_eq->napi);
2407
2408 be_irq_register(adapter);
2409
fe6d2a38
SP
2410 if (!lancer_chip(adapter))
2411 be_intr_set(adapter, true);
5fb379ee
SP
2412
2413 /* The evt queues are created in unarmed state; arm them */
3abcdeda
SP
2414 for_all_rx_queues(adapter, rxo, i) {
2415 be_eq_notify(adapter, rxo->rx_eq.q.id, true, false, 0);
2416 be_cq_notify(adapter, rxo->cq.id, true, 0);
2417 }
8788fdc2 2418 be_eq_notify(adapter, tx_eq->q.id, true, false, 0);
5fb379ee 2419
7a1e9b20
SP
2420 /* Now that interrupts are on we can process async mcc */
2421 be_async_mcc_enable(adapter);
2422
889cd4b2
SP
2423 return 0;
2424err:
2425 be_close(adapter->netdev);
2426 return -EIO;
5fb379ee
SP
2427}
2428
71d8d1b5
AK
2429static int be_setup_wol(struct be_adapter *adapter, bool enable)
2430{
2431 struct be_dma_mem cmd;
2432 int status = 0;
2433 u8 mac[ETH_ALEN];
2434
2435 memset(mac, 0, ETH_ALEN);
2436
2437 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
2b7bcebf
IV
2438 cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2439 GFP_KERNEL);
71d8d1b5
AK
2440 if (cmd.va == NULL)
2441 return -1;
2442 memset(cmd.va, 0, cmd.size);
2443
2444 if (enable) {
2445 status = pci_write_config_dword(adapter->pdev,
2446 PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2447 if (status) {
2448 dev_err(&adapter->pdev->dev,
2381a55c 2449 "Could not enable Wake-on-lan\n");
2b7bcebf
IV
2450 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2451 cmd.dma);
71d8d1b5
AK
2452 return status;
2453 }
2454 status = be_cmd_enable_magic_wol(adapter,
2455 adapter->netdev->dev_addr, &cmd);
2456 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2457 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2458 } else {
2459 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2460 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2461 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2462 }
2463
2b7bcebf 2464 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
71d8d1b5
AK
2465 return status;
2466}
2467
6d87f5c3
AK
2468/*
2469 * Generate a seed MAC address from the PF MAC Address using jhash.
2470 * MAC Address for VFs are assigned incrementally starting from the seed.
2471 * These addresses are programmed in the ASIC by the PF and the VF driver
2472 * queries for the MAC address during its probe.
2473 */
2474static inline int be_vf_eth_addr_config(struct be_adapter *adapter)
2475{
f9449ab7 2476 u32 vf;
3abcdeda 2477 int status = 0;
6d87f5c3 2478 u8 mac[ETH_ALEN];
11ac75ed 2479 struct be_vf_cfg *vf_cfg;
6d87f5c3
AK
2480
2481 be_vf_eth_addr_generate(adapter, mac);
2482
11ac75ed 2483 for_all_vfs(adapter, vf_cfg, vf) {
590c391d
PR
2484 if (lancer_chip(adapter)) {
2485 status = be_cmd_set_mac_list(adapter, mac, 1, vf + 1);
2486 } else {
2487 status = be_cmd_pmac_add(adapter, mac,
11ac75ed
SP
2488 vf_cfg->if_handle,
2489 &vf_cfg->pmac_id, vf + 1);
590c391d
PR
2490 }
2491
6d87f5c3
AK
2492 if (status)
2493 dev_err(&adapter->pdev->dev,
590c391d 2494 "Mac address assignment failed for VF %d\n", vf);
6d87f5c3 2495 else
11ac75ed 2496 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
6d87f5c3
AK
2497
2498 mac[5] += 1;
2499 }
2500 return status;
2501}
2502
f9449ab7 2503static void be_vf_clear(struct be_adapter *adapter)
6d87f5c3 2504{
11ac75ed 2505 struct be_vf_cfg *vf_cfg;
6d87f5c3
AK
2506 u32 vf;
2507
11ac75ed 2508 for_all_vfs(adapter, vf_cfg, vf) {
590c391d
PR
2509 if (lancer_chip(adapter))
2510 be_cmd_set_mac_list(adapter, NULL, 0, vf + 1);
2511 else
11ac75ed
SP
2512 be_cmd_pmac_del(adapter, vf_cfg->if_handle,
2513 vf_cfg->pmac_id, vf + 1);
f9449ab7 2514
11ac75ed
SP
2515 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
2516 }
6d87f5c3
AK
2517}
2518
a54769f5
SP
2519static int be_clear(struct be_adapter *adapter)
2520{
11ac75ed 2521 if (sriov_enabled(adapter))
f9449ab7
SP
2522 be_vf_clear(adapter);
2523
2524 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
a54769f5
SP
2525
2526 be_mcc_queues_destroy(adapter);
2527 be_rx_queues_destroy(adapter);
2528 be_tx_queues_destroy(adapter);
a54769f5
SP
2529
2530 /* tell fw we're done with firing cmds */
2531 be_cmd_fw_clean(adapter);
2532 return 0;
2533}
2534
30128031
SP
2535static void be_vf_setup_init(struct be_adapter *adapter)
2536{
11ac75ed 2537 struct be_vf_cfg *vf_cfg;
30128031
SP
2538 int vf;
2539
11ac75ed
SP
2540 for_all_vfs(adapter, vf_cfg, vf) {
2541 vf_cfg->if_handle = -1;
2542 vf_cfg->pmac_id = -1;
30128031
SP
2543 }
2544}
2545
f9449ab7
SP
2546static int be_vf_setup(struct be_adapter *adapter)
2547{
11ac75ed 2548 struct be_vf_cfg *vf_cfg;
f9449ab7
SP
2549 u32 cap_flags, en_flags, vf;
2550 u16 lnk_speed;
2551 int status;
2552
30128031
SP
2553 be_vf_setup_init(adapter);
2554
590c391d
PR
2555 cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2556 BE_IF_FLAGS_MULTICAST;
11ac75ed 2557 for_all_vfs(adapter, vf_cfg, vf) {
f9449ab7 2558 status = be_cmd_if_create(adapter, cap_flags, en_flags, NULL,
11ac75ed 2559 &vf_cfg->if_handle, NULL, vf + 1);
f9449ab7
SP
2560 if (status)
2561 goto err;
f9449ab7
SP
2562 }
2563
590c391d
PR
2564 status = be_vf_eth_addr_config(adapter);
2565 if (status)
2566 goto err;
f9449ab7 2567
11ac75ed 2568 for_all_vfs(adapter, vf_cfg, vf) {
f9449ab7 2569 status = be_cmd_link_status_query(adapter, NULL, &lnk_speed,
11ac75ed 2570 vf + 1);
f9449ab7
SP
2571 if (status)
2572 goto err;
11ac75ed 2573 vf_cfg->tx_rate = lnk_speed * 10;
f9449ab7
SP
2574 }
2575 return 0;
2576err:
2577 return status;
2578}
2579
30128031
SP
2580static void be_setup_init(struct be_adapter *adapter)
2581{
2582 adapter->vlan_prio_bmap = 0xff;
2583 adapter->link_speed = -1;
2584 adapter->if_handle = -1;
2585 adapter->be3_native = false;
2586 adapter->promiscuous = false;
2587 adapter->eq_next_idx = 0;
2588}
2589
590c391d
PR
2590static int be_configure_mac_from_list(struct be_adapter *adapter, u8 *mac)
2591{
2592 u32 pmac_id;
2593 int status = be_cmd_get_mac_from_list(adapter, 0, &pmac_id);
2594 if (status != 0)
2595 goto do_none;
2596 status = be_cmd_mac_addr_query(adapter, mac,
2597 MAC_ADDRESS_TYPE_NETWORK,
2598 false, adapter->if_handle, pmac_id);
2599 if (status != 0)
2600 goto do_none;
2601 status = be_cmd_pmac_add(adapter, mac, adapter->if_handle,
2602 &adapter->pmac_id, 0);
2603do_none:
2604 return status;
2605}
2606
5fb379ee
SP
2607static int be_setup(struct be_adapter *adapter)
2608{
5fb379ee 2609 struct net_device *netdev = adapter->netdev;
f9449ab7 2610 u32 cap_flags, en_flags;
a54769f5 2611 u32 tx_fc, rx_fc;
293c4a7d 2612 int status, i;
ba343c77 2613 u8 mac[ETH_ALEN];
293c4a7d 2614 struct be_tx_obj *txo;
ba343c77 2615
30128031 2616 be_setup_init(adapter);
6b7c5b94 2617
f9449ab7 2618 be_cmd_req_native_mode(adapter);
73d540f2 2619
f9449ab7 2620 status = be_tx_queues_create(adapter);
6b7c5b94 2621 if (status != 0)
a54769f5 2622 goto err;
6b7c5b94 2623
f9449ab7 2624 status = be_rx_queues_create(adapter);
6b7c5b94 2625 if (status != 0)
a54769f5 2626 goto err;
6b7c5b94 2627
f9449ab7 2628 status = be_mcc_queues_create(adapter);
6b7c5b94 2629 if (status != 0)
a54769f5 2630 goto err;
6b7c5b94 2631
f9449ab7
SP
2632 memset(mac, 0, ETH_ALEN);
2633 status = be_cmd_mac_addr_query(adapter, mac, MAC_ADDRESS_TYPE_NETWORK,
590c391d 2634 true /*permanent */, 0, 0);
f9449ab7
SP
2635 if (status)
2636 return status;
2637 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2638 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2903dd65 2639
f9449ab7
SP
2640 en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2641 BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
2642 cap_flags = en_flags | BE_IF_FLAGS_MCAST_PROMISCUOUS |
5d5adb93
PR
2643 BE_IF_FLAGS_VLAN_PROMISCUOUS | BE_IF_FLAGS_PROMISCUOUS;
2644
f9449ab7
SP
2645 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS) {
2646 cap_flags |= BE_IF_FLAGS_RSS;
2647 en_flags |= BE_IF_FLAGS_RSS;
2648 }
2649 status = be_cmd_if_create(adapter, cap_flags, en_flags,
2650 netdev->dev_addr, &adapter->if_handle,
2651 &adapter->pmac_id, 0);
5fb379ee 2652 if (status != 0)
a54769f5 2653 goto err;
6b7c5b94 2654
293c4a7d
PR
2655 for_all_tx_queues(adapter, txo, i) {
2656 status = be_cmd_txq_create(adapter, &txo->q, &txo->cq);
2657 if (status)
2658 goto err;
2659 }
2660
590c391d
PR
2661 /* The VF's permanent mac queried from card is incorrect.
2662 * For BEx: Query the mac configued by the PF using if_handle
2663 * For Lancer: Get and use mac_list to obtain mac address.
2664 */
2665 if (!be_physfn(adapter)) {
2666 if (lancer_chip(adapter))
2667 status = be_configure_mac_from_list(adapter, mac);
2668 else
2669 status = be_cmd_mac_addr_query(adapter, mac,
2670 MAC_ADDRESS_TYPE_NETWORK, false,
2671 adapter->if_handle, 0);
f9449ab7
SP
2672 if (!status) {
2673 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2674 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2675 }
2676 }
0dffc83e 2677
04b71175 2678 be_cmd_get_fw_ver(adapter, adapter->fw_ver, NULL);
5a56eb10 2679
a54769f5
SP
2680 status = be_vid_config(adapter, false, 0);
2681 if (status)
2682 goto err;
7ab8b0b4 2683
a54769f5 2684 be_set_rx_mode(adapter->netdev);
5fb379ee 2685
a54769f5 2686 status = be_cmd_get_flow_control(adapter, &tx_fc, &rx_fc);
590c391d
PR
2687 /* For Lancer: It is legal for this cmd to fail on VF */
2688 if (status && (be_physfn(adapter) || !lancer_chip(adapter)))
a54769f5 2689 goto err;
590c391d 2690
a54769f5
SP
2691 if (rx_fc != adapter->rx_fc || tx_fc != adapter->tx_fc) {
2692 status = be_cmd_set_flow_control(adapter, adapter->tx_fc,
2693 adapter->rx_fc);
590c391d
PR
2694 /* For Lancer: It is legal for this cmd to fail on VF */
2695 if (status && (be_physfn(adapter) || !lancer_chip(adapter)))
a54769f5
SP
2696 goto err;
2697 }
2dc1deb6 2698
a54769f5 2699 pcie_set_readrq(adapter->pdev, 4096);
5fb379ee 2700
11ac75ed 2701 if (sriov_enabled(adapter)) {
f9449ab7
SP
2702 status = be_vf_setup(adapter);
2703 if (status)
2704 goto err;
2705 }
2706
2707 return 0;
a54769f5
SP
2708err:
2709 be_clear(adapter);
2710 return status;
2711}
6b7c5b94 2712
66268739
IV
2713#ifdef CONFIG_NET_POLL_CONTROLLER
2714static void be_netpoll(struct net_device *netdev)
2715{
2716 struct be_adapter *adapter = netdev_priv(netdev);
2717 struct be_rx_obj *rxo;
2718 int i;
2719
2720 event_handle(adapter, &adapter->tx_eq, false);
2721 for_all_rx_queues(adapter, rxo, i)
2722 event_handle(adapter, &rxo->rx_eq, true);
2723}
2724#endif
2725
84517482 2726#define FW_FILE_HDR_SIGN "ServerEngines Corp. "
fa9a6fed 2727static bool be_flash_redboot(struct be_adapter *adapter,
3f0d4560
AK
2728 const u8 *p, u32 img_start, int image_size,
2729 int hdr_size)
fa9a6fed
SB
2730{
2731 u32 crc_offset;
2732 u8 flashed_crc[4];
2733 int status;
3f0d4560
AK
2734
2735 crc_offset = hdr_size + img_start + image_size - 4;
2736
fa9a6fed 2737 p += crc_offset;
3f0d4560
AK
2738
2739 status = be_cmd_get_flash_crc(adapter, flashed_crc,
f510fc64 2740 (image_size - 4));
fa9a6fed
SB
2741 if (status) {
2742 dev_err(&adapter->pdev->dev,
2743 "could not get crc from flash, not flashing redboot\n");
2744 return false;
2745 }
2746
2747 /*update redboot only if crc does not match*/
2748 if (!memcmp(flashed_crc, p, 4))
2749 return false;
2750 else
2751 return true;
fa9a6fed
SB
2752}
2753
306f1348
SP
2754static bool phy_flashing_required(struct be_adapter *adapter)
2755{
2756 int status = 0;
2757 struct be_phy_info phy_info;
2758
2759 status = be_cmd_get_phy_info(adapter, &phy_info);
2760 if (status)
2761 return false;
2762 if ((phy_info.phy_type == TN_8022) &&
2763 (phy_info.interface_type == PHY_TYPE_BASET_10GB)) {
2764 return true;
2765 }
2766 return false;
2767}
2768
3f0d4560 2769static int be_flash_data(struct be_adapter *adapter,
84517482 2770 const struct firmware *fw,
3f0d4560
AK
2771 struct be_dma_mem *flash_cmd, int num_of_images)
2772
84517482 2773{
3f0d4560
AK
2774 int status = 0, i, filehdr_size = 0;
2775 u32 total_bytes = 0, flash_op;
84517482
AK
2776 int num_bytes;
2777 const u8 *p = fw->data;
2778 struct be_cmd_write_flashrom *req = flash_cmd->va;
215faf9c 2779 const struct flash_comp *pflashcomp;
9fe96934 2780 int num_comp;
3f0d4560 2781
306f1348 2782 static const struct flash_comp gen3_flash_types[10] = {
3f0d4560
AK
2783 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, IMG_TYPE_ISCSI_ACTIVE,
2784 FLASH_IMAGE_MAX_SIZE_g3},
2785 { FLASH_REDBOOT_START_g3, IMG_TYPE_REDBOOT,
2786 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3},
2787 { FLASH_iSCSI_BIOS_START_g3, IMG_TYPE_BIOS,
2788 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2789 { FLASH_PXE_BIOS_START_g3, IMG_TYPE_PXE_BIOS,
2790 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2791 { FLASH_FCoE_BIOS_START_g3, IMG_TYPE_FCOE_BIOS,
2792 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2793 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, IMG_TYPE_ISCSI_BACKUP,
2794 FLASH_IMAGE_MAX_SIZE_g3},
2795 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, IMG_TYPE_FCOE_FW_ACTIVE,
2796 FLASH_IMAGE_MAX_SIZE_g3},
2797 { FLASH_FCoE_BACKUP_IMAGE_START_g3, IMG_TYPE_FCOE_FW_BACKUP,
9fe96934
SB
2798 FLASH_IMAGE_MAX_SIZE_g3},
2799 { FLASH_NCSI_START_g3, IMG_TYPE_NCSI_FW,
306f1348
SP
2800 FLASH_NCSI_IMAGE_MAX_SIZE_g3},
2801 { FLASH_PHY_FW_START_g3, IMG_TYPE_PHY_FW,
2802 FLASH_PHY_FW_IMAGE_MAX_SIZE_g3}
3f0d4560 2803 };
215faf9c 2804 static const struct flash_comp gen2_flash_types[8] = {
3f0d4560
AK
2805 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, IMG_TYPE_ISCSI_ACTIVE,
2806 FLASH_IMAGE_MAX_SIZE_g2},
2807 { FLASH_REDBOOT_START_g2, IMG_TYPE_REDBOOT,
2808 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2},
2809 { FLASH_iSCSI_BIOS_START_g2, IMG_TYPE_BIOS,
2810 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2811 { FLASH_PXE_BIOS_START_g2, IMG_TYPE_PXE_BIOS,
2812 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2813 { FLASH_FCoE_BIOS_START_g2, IMG_TYPE_FCOE_BIOS,
2814 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2815 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, IMG_TYPE_ISCSI_BACKUP,
2816 FLASH_IMAGE_MAX_SIZE_g2},
2817 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, IMG_TYPE_FCOE_FW_ACTIVE,
2818 FLASH_IMAGE_MAX_SIZE_g2},
2819 { FLASH_FCoE_BACKUP_IMAGE_START_g2, IMG_TYPE_FCOE_FW_BACKUP,
2820 FLASH_IMAGE_MAX_SIZE_g2}
2821 };
2822
2823 if (adapter->generation == BE_GEN3) {
2824 pflashcomp = gen3_flash_types;
2825 filehdr_size = sizeof(struct flash_file_hdr_g3);
215faf9c 2826 num_comp = ARRAY_SIZE(gen3_flash_types);
3f0d4560
AK
2827 } else {
2828 pflashcomp = gen2_flash_types;
2829 filehdr_size = sizeof(struct flash_file_hdr_g2);
215faf9c 2830 num_comp = ARRAY_SIZE(gen2_flash_types);
84517482 2831 }
9fe96934
SB
2832 for (i = 0; i < num_comp; i++) {
2833 if ((pflashcomp[i].optype == IMG_TYPE_NCSI_FW) &&
2834 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
2835 continue;
306f1348
SP
2836 if (pflashcomp[i].optype == IMG_TYPE_PHY_FW) {
2837 if (!phy_flashing_required(adapter))
2838 continue;
2839 }
3f0d4560
AK
2840 if ((pflashcomp[i].optype == IMG_TYPE_REDBOOT) &&
2841 (!be_flash_redboot(adapter, fw->data,
fae21a4d
AK
2842 pflashcomp[i].offset, pflashcomp[i].size, filehdr_size +
2843 (num_of_images * sizeof(struct image_hdr)))))
3f0d4560
AK
2844 continue;
2845 p = fw->data;
2846 p += filehdr_size + pflashcomp[i].offset
2847 + (num_of_images * sizeof(struct image_hdr));
306f1348
SP
2848 if (p + pflashcomp[i].size > fw->data + fw->size)
2849 return -1;
2850 total_bytes = pflashcomp[i].size;
3f0d4560
AK
2851 while (total_bytes) {
2852 if (total_bytes > 32*1024)
2853 num_bytes = 32*1024;
2854 else
2855 num_bytes = total_bytes;
2856 total_bytes -= num_bytes;
306f1348
SP
2857 if (!total_bytes) {
2858 if (pflashcomp[i].optype == IMG_TYPE_PHY_FW)
2859 flash_op = FLASHROM_OPER_PHY_FLASH;
2860 else
2861 flash_op = FLASHROM_OPER_FLASH;
2862 } else {
2863 if (pflashcomp[i].optype == IMG_TYPE_PHY_FW)
2864 flash_op = FLASHROM_OPER_PHY_SAVE;
2865 else
2866 flash_op = FLASHROM_OPER_SAVE;
2867 }
3f0d4560
AK
2868 memcpy(req->params.data_buf, p, num_bytes);
2869 p += num_bytes;
2870 status = be_cmd_write_flashrom(adapter, flash_cmd,
2871 pflashcomp[i].optype, flash_op, num_bytes);
2872 if (status) {
306f1348
SP
2873 if ((status == ILLEGAL_IOCTL_REQ) &&
2874 (pflashcomp[i].optype ==
2875 IMG_TYPE_PHY_FW))
2876 break;
3f0d4560
AK
2877 dev_err(&adapter->pdev->dev,
2878 "cmd to write to flash rom failed.\n");
2879 return -1;
2880 }
84517482 2881 }
84517482 2882 }
84517482
AK
2883 return 0;
2884}
2885
3f0d4560
AK
2886static int get_ufigen_type(struct flash_file_hdr_g2 *fhdr)
2887{
2888 if (fhdr == NULL)
2889 return 0;
2890 if (fhdr->build[0] == '3')
2891 return BE_GEN3;
2892 else if (fhdr->build[0] == '2')
2893 return BE_GEN2;
2894 else
2895 return 0;
2896}
2897
485bf569
SN
2898static int lancer_fw_download(struct be_adapter *adapter,
2899 const struct firmware *fw)
84517482 2900{
485bf569
SN
2901#define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024)
2902#define LANCER_FW_DOWNLOAD_LOCATION "/prg"
84517482 2903 struct be_dma_mem flash_cmd;
485bf569
SN
2904 const u8 *data_ptr = NULL;
2905 u8 *dest_image_ptr = NULL;
2906 size_t image_size = 0;
2907 u32 chunk_size = 0;
2908 u32 data_written = 0;
2909 u32 offset = 0;
2910 int status = 0;
2911 u8 add_status = 0;
84517482 2912
485bf569 2913 if (!IS_ALIGNED(fw->size, sizeof(u32))) {
d9efd2af 2914 dev_err(&adapter->pdev->dev,
485bf569
SN
2915 "FW Image not properly aligned. "
2916 "Length must be 4 byte aligned.\n");
2917 status = -EINVAL;
2918 goto lancer_fw_exit;
d9efd2af
SB
2919 }
2920
485bf569
SN
2921 flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
2922 + LANCER_FW_DOWNLOAD_CHUNK;
2923 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
2924 &flash_cmd.dma, GFP_KERNEL);
2925 if (!flash_cmd.va) {
2926 status = -ENOMEM;
2927 dev_err(&adapter->pdev->dev,
2928 "Memory allocation failure while flashing\n");
2929 goto lancer_fw_exit;
2930 }
84517482 2931
485bf569
SN
2932 dest_image_ptr = flash_cmd.va +
2933 sizeof(struct lancer_cmd_req_write_object);
2934 image_size = fw->size;
2935 data_ptr = fw->data;
2936
2937 while (image_size) {
2938 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
2939
2940 /* Copy the image chunk content. */
2941 memcpy(dest_image_ptr, data_ptr, chunk_size);
2942
2943 status = lancer_cmd_write_object(adapter, &flash_cmd,
2944 chunk_size, offset, LANCER_FW_DOWNLOAD_LOCATION,
2945 &data_written, &add_status);
2946
2947 if (status)
2948 break;
2949
2950 offset += data_written;
2951 data_ptr += data_written;
2952 image_size -= data_written;
2953 }
2954
2955 if (!status) {
2956 /* Commit the FW written */
2957 status = lancer_cmd_write_object(adapter, &flash_cmd,
2958 0, offset, LANCER_FW_DOWNLOAD_LOCATION,
2959 &data_written, &add_status);
2960 }
2961
2962 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
2963 flash_cmd.dma);
2964 if (status) {
2965 dev_err(&adapter->pdev->dev,
2966 "Firmware load error. "
2967 "Status code: 0x%x Additional Status: 0x%x\n",
2968 status, add_status);
2969 goto lancer_fw_exit;
2970 }
2971
2972 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
2973lancer_fw_exit:
2974 return status;
2975}
2976
2977static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
2978{
2979 struct flash_file_hdr_g2 *fhdr;
2980 struct flash_file_hdr_g3 *fhdr3;
2981 struct image_hdr *img_hdr_ptr = NULL;
2982 struct be_dma_mem flash_cmd;
2983 const u8 *p;
2984 int status = 0, i = 0, num_imgs = 0;
84517482
AK
2985
2986 p = fw->data;
3f0d4560 2987 fhdr = (struct flash_file_hdr_g2 *) p;
84517482 2988
84517482 2989 flash_cmd.size = sizeof(struct be_cmd_write_flashrom) + 32*1024;
2b7bcebf
IV
2990 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
2991 &flash_cmd.dma, GFP_KERNEL);
84517482
AK
2992 if (!flash_cmd.va) {
2993 status = -ENOMEM;
2994 dev_err(&adapter->pdev->dev,
2995 "Memory allocation failure while flashing\n");
485bf569 2996 goto be_fw_exit;
84517482
AK
2997 }
2998
3f0d4560
AK
2999 if ((adapter->generation == BE_GEN3) &&
3000 (get_ufigen_type(fhdr) == BE_GEN3)) {
3001 fhdr3 = (struct flash_file_hdr_g3 *) fw->data;
8b93b710
AK
3002 num_imgs = le32_to_cpu(fhdr3->num_imgs);
3003 for (i = 0; i < num_imgs; i++) {
3f0d4560
AK
3004 img_hdr_ptr = (struct image_hdr *) (fw->data +
3005 (sizeof(struct flash_file_hdr_g3) +
8b93b710
AK
3006 i * sizeof(struct image_hdr)));
3007 if (le32_to_cpu(img_hdr_ptr->imageid) == 1)
3008 status = be_flash_data(adapter, fw, &flash_cmd,
3009 num_imgs);
3f0d4560
AK
3010 }
3011 } else if ((adapter->generation == BE_GEN2) &&
3012 (get_ufigen_type(fhdr) == BE_GEN2)) {
3013 status = be_flash_data(adapter, fw, &flash_cmd, 0);
3014 } else {
3015 dev_err(&adapter->pdev->dev,
3016 "UFI and Interface are not compatible for flashing\n");
3017 status = -1;
84517482
AK
3018 }
3019
2b7bcebf
IV
3020 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3021 flash_cmd.dma);
84517482
AK
3022 if (status) {
3023 dev_err(&adapter->pdev->dev, "Firmware load error\n");
485bf569 3024 goto be_fw_exit;
84517482
AK
3025 }
3026
af901ca1 3027 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
84517482 3028
485bf569
SN
3029be_fw_exit:
3030 return status;
3031}
3032
3033int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
3034{
3035 const struct firmware *fw;
3036 int status;
3037
3038 if (!netif_running(adapter->netdev)) {
3039 dev_err(&adapter->pdev->dev,
3040 "Firmware load not allowed (interface is down)\n");
3041 return -1;
3042 }
3043
3044 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
3045 if (status)
3046 goto fw_exit;
3047
3048 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
3049
3050 if (lancer_chip(adapter))
3051 status = lancer_fw_download(adapter, fw);
3052 else
3053 status = be_fw_download(adapter, fw);
3054
84517482
AK
3055fw_exit:
3056 release_firmware(fw);
3057 return status;
3058}
3059
6b7c5b94
SP
3060static struct net_device_ops be_netdev_ops = {
3061 .ndo_open = be_open,
3062 .ndo_stop = be_close,
3063 .ndo_start_xmit = be_xmit,
a54769f5 3064 .ndo_set_rx_mode = be_set_rx_mode,
6b7c5b94
SP
3065 .ndo_set_mac_address = be_mac_addr_set,
3066 .ndo_change_mtu = be_change_mtu,
ab1594e9 3067 .ndo_get_stats64 = be_get_stats64,
6b7c5b94 3068 .ndo_validate_addr = eth_validate_addr,
6b7c5b94
SP
3069 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
3070 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
64600ea5 3071 .ndo_set_vf_mac = be_set_vf_mac,
1da87b7f 3072 .ndo_set_vf_vlan = be_set_vf_vlan,
e1d18735 3073 .ndo_set_vf_tx_rate = be_set_vf_tx_rate,
66268739
IV
3074 .ndo_get_vf_config = be_get_vf_config,
3075#ifdef CONFIG_NET_POLL_CONTROLLER
3076 .ndo_poll_controller = be_netpoll,
3077#endif
6b7c5b94
SP
3078};
3079
3080static void be_netdev_init(struct net_device *netdev)
3081{
3082 struct be_adapter *adapter = netdev_priv(netdev);
3abcdeda
SP
3083 struct be_rx_obj *rxo;
3084 int i;
6b7c5b94 3085
6332c8d3 3086 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
8b8ddc68
MM
3087 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
3088 NETIF_F_HW_VLAN_TX;
3089 if (be_multi_rxq(adapter))
3090 netdev->hw_features |= NETIF_F_RXHASH;
6332c8d3
MM
3091
3092 netdev->features |= netdev->hw_features |
8b8ddc68 3093 NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
4b972914 3094
eb8a50d9 3095 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
79032644 3096 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
51c59870 3097
6b7c5b94
SP
3098 netdev->flags |= IFF_MULTICAST;
3099
c190e3c8
AK
3100 netif_set_gso_max_size(netdev, 65535);
3101
6b7c5b94
SP
3102 BE_SET_NETDEV_OPS(netdev, &be_netdev_ops);
3103
3104 SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
3105
3abcdeda
SP
3106 for_all_rx_queues(adapter, rxo, i)
3107 netif_napi_add(netdev, &rxo->rx_eq.napi, be_poll_rx,
3108 BE_NAPI_WEIGHT);
3109
5fb379ee 3110 netif_napi_add(netdev, &adapter->tx_eq.napi, be_poll_tx_mcc,
6b7c5b94 3111 BE_NAPI_WEIGHT);
6b7c5b94
SP
3112}
3113
3114static void be_unmap_pci_bars(struct be_adapter *adapter)
3115{
8788fdc2
SP
3116 if (adapter->csr)
3117 iounmap(adapter->csr);
3118 if (adapter->db)
3119 iounmap(adapter->db);
6b7c5b94
SP
3120}
3121
3122static int be_map_pci_bars(struct be_adapter *adapter)
3123{
3124 u8 __iomem *addr;
db3ea781 3125 int db_reg;
6b7c5b94 3126
fe6d2a38
SP
3127 if (lancer_chip(adapter)) {
3128 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 0),
3129 pci_resource_len(adapter->pdev, 0));
3130 if (addr == NULL)
3131 return -ENOMEM;
3132 adapter->db = addr;
3133 return 0;
3134 }
3135
ba343c77
SB
3136 if (be_physfn(adapter)) {
3137 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 2),
3138 pci_resource_len(adapter->pdev, 2));
3139 if (addr == NULL)
3140 return -ENOMEM;
3141 adapter->csr = addr;
3142 }
6b7c5b94 3143
ba343c77 3144 if (adapter->generation == BE_GEN2) {
ba343c77
SB
3145 db_reg = 4;
3146 } else {
ba343c77
SB
3147 if (be_physfn(adapter))
3148 db_reg = 4;
3149 else
3150 db_reg = 0;
3151 }
3152 addr = ioremap_nocache(pci_resource_start(adapter->pdev, db_reg),
3153 pci_resource_len(adapter->pdev, db_reg));
6b7c5b94
SP
3154 if (addr == NULL)
3155 goto pci_map_err;
ba343c77
SB
3156 adapter->db = addr;
3157
6b7c5b94
SP
3158 return 0;
3159pci_map_err:
3160 be_unmap_pci_bars(adapter);
3161 return -ENOMEM;
3162}
3163
3164
3165static void be_ctrl_cleanup(struct be_adapter *adapter)
3166{
8788fdc2 3167 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
6b7c5b94
SP
3168
3169 be_unmap_pci_bars(adapter);
3170
3171 if (mem->va)
2b7bcebf
IV
3172 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3173 mem->dma);
e7b909a6 3174
5b8821b7 3175 mem = &adapter->rx_filter;
e7b909a6 3176 if (mem->va)
2b7bcebf
IV
3177 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3178 mem->dma);
6b7c5b94
SP
3179}
3180
6b7c5b94
SP
3181static int be_ctrl_init(struct be_adapter *adapter)
3182{
8788fdc2
SP
3183 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
3184 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
5b8821b7 3185 struct be_dma_mem *rx_filter = &adapter->rx_filter;
6b7c5b94 3186 int status;
6b7c5b94
SP
3187
3188 status = be_map_pci_bars(adapter);
3189 if (status)
e7b909a6 3190 goto done;
6b7c5b94
SP
3191
3192 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
2b7bcebf
IV
3193 mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
3194 mbox_mem_alloc->size,
3195 &mbox_mem_alloc->dma,
3196 GFP_KERNEL);
6b7c5b94 3197 if (!mbox_mem_alloc->va) {
e7b909a6
SP
3198 status = -ENOMEM;
3199 goto unmap_pci_bars;
6b7c5b94
SP
3200 }
3201 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
3202 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
3203 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
3204 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
e7b909a6 3205
5b8821b7
SP
3206 rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
3207 rx_filter->va = dma_alloc_coherent(&adapter->pdev->dev, rx_filter->size,
3208 &rx_filter->dma, GFP_KERNEL);
3209 if (rx_filter->va == NULL) {
e7b909a6
SP
3210 status = -ENOMEM;
3211 goto free_mbox;
3212 }
5b8821b7 3213 memset(rx_filter->va, 0, rx_filter->size);
e7b909a6 3214
2984961c 3215 mutex_init(&adapter->mbox_lock);
8788fdc2
SP
3216 spin_lock_init(&adapter->mcc_lock);
3217 spin_lock_init(&adapter->mcc_cq_lock);
a8f447bd 3218
dd131e76 3219 init_completion(&adapter->flash_compl);
cf588477 3220 pci_save_state(adapter->pdev);
6b7c5b94 3221 return 0;
e7b909a6
SP
3222
3223free_mbox:
2b7bcebf
IV
3224 dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
3225 mbox_mem_alloc->va, mbox_mem_alloc->dma);
e7b909a6
SP
3226
3227unmap_pci_bars:
3228 be_unmap_pci_bars(adapter);
3229
3230done:
3231 return status;
6b7c5b94
SP
3232}
3233
3234static void be_stats_cleanup(struct be_adapter *adapter)
3235{
3abcdeda 3236 struct be_dma_mem *cmd = &adapter->stats_cmd;
6b7c5b94
SP
3237
3238 if (cmd->va)
2b7bcebf
IV
3239 dma_free_coherent(&adapter->pdev->dev, cmd->size,
3240 cmd->va, cmd->dma);
6b7c5b94
SP
3241}
3242
3243static int be_stats_init(struct be_adapter *adapter)
3244{
3abcdeda 3245 struct be_dma_mem *cmd = &adapter->stats_cmd;
6b7c5b94 3246
005d5696 3247 if (adapter->generation == BE_GEN2) {
89a88ab8 3248 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
005d5696
SX
3249 } else {
3250 if (lancer_chip(adapter))
3251 cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
3252 else
3253 cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
3254 }
2b7bcebf
IV
3255 cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
3256 GFP_KERNEL);
6b7c5b94
SP
3257 if (cmd->va == NULL)
3258 return -1;
d291b9af 3259 memset(cmd->va, 0, cmd->size);
6b7c5b94
SP
3260 return 0;
3261}
3262
3263static void __devexit be_remove(struct pci_dev *pdev)
3264{
3265 struct be_adapter *adapter = pci_get_drvdata(pdev);
8d56ff11 3266
6b7c5b94
SP
3267 if (!adapter)
3268 return;
3269
f203af70
SK
3270 cancel_delayed_work_sync(&adapter->work);
3271
6b7c5b94
SP
3272 unregister_netdev(adapter->netdev);
3273
5fb379ee
SP
3274 be_clear(adapter);
3275
6b7c5b94
SP
3276 be_stats_cleanup(adapter);
3277
3278 be_ctrl_cleanup(adapter);
3279
ba343c77
SB
3280 be_sriov_disable(adapter);
3281
8d56ff11 3282 be_msix_disable(adapter);
6b7c5b94
SP
3283
3284 pci_set_drvdata(pdev, NULL);
3285 pci_release_regions(pdev);
3286 pci_disable_device(pdev);
3287
3288 free_netdev(adapter->netdev);
3289}
3290
2243e2e9 3291static int be_get_config(struct be_adapter *adapter)
6b7c5b94 3292{
6b7c5b94
SP
3293 int status;
3294
3abcdeda
SP
3295 status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
3296 &adapter->function_mode, &adapter->function_caps);
43a04fdc
SP
3297 if (status)
3298 return status;
3299
752961a1 3300 if (adapter->function_mode & FLEX10_MODE)
82903e4b
AK
3301 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/4;
3302 else
3303 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
3304
9e1453c5
AK
3305 status = be_cmd_get_cntl_attributes(adapter);
3306 if (status)
3307 return status;
3308
2243e2e9 3309 return 0;
6b7c5b94
SP
3310}
3311
fe6d2a38
SP
3312static int be_dev_family_check(struct be_adapter *adapter)
3313{
3314 struct pci_dev *pdev = adapter->pdev;
3315 u32 sli_intf = 0, if_type;
3316
3317 switch (pdev->device) {
3318 case BE_DEVICE_ID1:
3319 case OC_DEVICE_ID1:
3320 adapter->generation = BE_GEN2;
3321 break;
3322 case BE_DEVICE_ID2:
3323 case OC_DEVICE_ID2:
ecedb6ae 3324 case OC_DEVICE_ID5:
fe6d2a38
SP
3325 adapter->generation = BE_GEN3;
3326 break;
3327 case OC_DEVICE_ID3:
12f4d0a8 3328 case OC_DEVICE_ID4:
fe6d2a38
SP
3329 pci_read_config_dword(pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3330 if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
3331 SLI_INTF_IF_TYPE_SHIFT;
3332
3333 if (((sli_intf & SLI_INTF_VALID_MASK) != SLI_INTF_VALID) ||
3334 if_type != 0x02) {
3335 dev_err(&pdev->dev, "SLI_INTF reg val is not valid\n");
3336 return -EINVAL;
3337 }
fe6d2a38
SP
3338 adapter->sli_family = ((sli_intf & SLI_INTF_FAMILY_MASK) >>
3339 SLI_INTF_FAMILY_SHIFT);
3340 adapter->generation = BE_GEN3;
3341 break;
3342 default:
3343 adapter->generation = 0;
3344 }
3345 return 0;
3346}
3347
37eed1cb
PR
3348static int lancer_wait_ready(struct be_adapter *adapter)
3349{
d8110f62 3350#define SLIPORT_READY_TIMEOUT 30
37eed1cb
PR
3351 u32 sliport_status;
3352 int status = 0, i;
3353
3354 for (i = 0; i < SLIPORT_READY_TIMEOUT; i++) {
3355 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3356 if (sliport_status & SLIPORT_STATUS_RDY_MASK)
3357 break;
3358
d8110f62 3359 msleep(1000);
37eed1cb
PR
3360 }
3361
3362 if (i == SLIPORT_READY_TIMEOUT)
3363 status = -1;
3364
3365 return status;
3366}
3367
3368static int lancer_test_and_set_rdy_state(struct be_adapter *adapter)
3369{
3370 int status;
3371 u32 sliport_status, err, reset_needed;
3372 status = lancer_wait_ready(adapter);
3373 if (!status) {
3374 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3375 err = sliport_status & SLIPORT_STATUS_ERR_MASK;
3376 reset_needed = sliport_status & SLIPORT_STATUS_RN_MASK;
3377 if (err && reset_needed) {
3378 iowrite32(SLI_PORT_CONTROL_IP_MASK,
3379 adapter->db + SLIPORT_CONTROL_OFFSET);
3380
3381 /* check adapter has corrected the error */
3382 status = lancer_wait_ready(adapter);
3383 sliport_status = ioread32(adapter->db +
3384 SLIPORT_STATUS_OFFSET);
3385 sliport_status &= (SLIPORT_STATUS_ERR_MASK |
3386 SLIPORT_STATUS_RN_MASK);
3387 if (status || sliport_status)
3388 status = -1;
3389 } else if (err || reset_needed) {
3390 status = -1;
3391 }
3392 }
3393 return status;
3394}
3395
d8110f62
PR
3396static void lancer_test_and_recover_fn_err(struct be_adapter *adapter)
3397{
3398 int status;
3399 u32 sliport_status;
3400
3401 if (adapter->eeh_err || adapter->ue_detected)
3402 return;
3403
3404 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3405
3406 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
3407 dev_err(&adapter->pdev->dev,
3408 "Adapter in error state."
3409 "Trying to recover.\n");
3410
3411 status = lancer_test_and_set_rdy_state(adapter);
3412 if (status)
3413 goto err;
3414
3415 netif_device_detach(adapter->netdev);
3416
3417 if (netif_running(adapter->netdev))
3418 be_close(adapter->netdev);
3419
3420 be_clear(adapter);
3421
3422 adapter->fw_timeout = false;
3423
3424 status = be_setup(adapter);
3425 if (status)
3426 goto err;
3427
3428 if (netif_running(adapter->netdev)) {
3429 status = be_open(adapter->netdev);
3430 if (status)
3431 goto err;
3432 }
3433
3434 netif_device_attach(adapter->netdev);
3435
3436 dev_err(&adapter->pdev->dev,
3437 "Adapter error recovery succeeded\n");
3438 }
3439 return;
3440err:
3441 dev_err(&adapter->pdev->dev,
3442 "Adapter error recovery failed\n");
3443}
3444
3445static void be_worker(struct work_struct *work)
3446{
3447 struct be_adapter *adapter =
3448 container_of(work, struct be_adapter, work.work);
3449 struct be_rx_obj *rxo;
3450 int i;
3451
3452 if (lancer_chip(adapter))
3453 lancer_test_and_recover_fn_err(adapter);
3454
3455 be_detect_dump_ue(adapter);
3456
3457 /* when interrupts are not yet enabled, just reap any pending
3458 * mcc completions */
3459 if (!netif_running(adapter->netdev)) {
3460 int mcc_compl, status = 0;
3461
3462 mcc_compl = be_process_mcc(adapter, &status);
3463
3464 if (mcc_compl) {
3465 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
3466 be_cq_notify(adapter, mcc_obj->cq.id, false, mcc_compl);
3467 }
3468
3469 goto reschedule;
3470 }
3471
3472 if (!adapter->stats_cmd_sent) {
3473 if (lancer_chip(adapter))
3474 lancer_cmd_get_pport_stats(adapter,
3475 &adapter->stats_cmd);
3476 else
3477 be_cmd_get_stats(adapter, &adapter->stats_cmd);
3478 }
3479
3480 for_all_rx_queues(adapter, rxo, i) {
3481 be_rx_eqd_update(adapter, rxo);
3482
3483 if (rxo->rx_post_starved) {
3484 rxo->rx_post_starved = false;
3485 be_post_rx_frags(rxo, GFP_KERNEL);
3486 }
3487 }
3488
3489reschedule:
3490 adapter->work_counter++;
3491 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
3492}
3493
6b7c5b94
SP
3494static int __devinit be_probe(struct pci_dev *pdev,
3495 const struct pci_device_id *pdev_id)
3496{
3497 int status = 0;
3498 struct be_adapter *adapter;
3499 struct net_device *netdev;
6b7c5b94
SP
3500
3501 status = pci_enable_device(pdev);
3502 if (status)
3503 goto do_none;
3504
3505 status = pci_request_regions(pdev, DRV_NAME);
3506 if (status)
3507 goto disable_dev;
3508 pci_set_master(pdev);
3509
3c8def97 3510 netdev = alloc_etherdev_mq(sizeof(struct be_adapter), MAX_TX_QS);
6b7c5b94
SP
3511 if (netdev == NULL) {
3512 status = -ENOMEM;
3513 goto rel_reg;
3514 }
3515 adapter = netdev_priv(netdev);
3516 adapter->pdev = pdev;
3517 pci_set_drvdata(pdev, adapter);
fe6d2a38
SP
3518
3519 status = be_dev_family_check(adapter);
63657b9c 3520 if (status)
fe6d2a38
SP
3521 goto free_netdev;
3522
6b7c5b94 3523 adapter->netdev = netdev;
2243e2e9 3524 SET_NETDEV_DEV(netdev, &pdev->dev);
6b7c5b94 3525
2b7bcebf 3526 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
6b7c5b94
SP
3527 if (!status) {
3528 netdev->features |= NETIF_F_HIGHDMA;
3529 } else {
2b7bcebf 3530 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
6b7c5b94
SP
3531 if (status) {
3532 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
3533 goto free_netdev;
3534 }
3535 }
3536
f9449ab7
SP
3537 status = be_sriov_enable(adapter);
3538 if (status)
3539 goto free_netdev;
ba343c77 3540
6b7c5b94
SP
3541 status = be_ctrl_init(adapter);
3542 if (status)
f9449ab7 3543 goto disable_sriov;
6b7c5b94 3544
37eed1cb 3545 if (lancer_chip(adapter)) {
d8110f62
PR
3546 status = lancer_wait_ready(adapter);
3547 if (!status) {
3548 iowrite32(SLI_PORT_CONTROL_IP_MASK,
3549 adapter->db + SLIPORT_CONTROL_OFFSET);
3550 status = lancer_test_and_set_rdy_state(adapter);
3551 }
37eed1cb
PR
3552 if (status) {
3553 dev_err(&pdev->dev, "Adapter in non recoverable error\n");
48f5a191 3554 goto ctrl_clean;
37eed1cb
PR
3555 }
3556 }
3557
2243e2e9 3558 /* sync up with fw's ready state */
ba343c77
SB
3559 if (be_physfn(adapter)) {
3560 status = be_cmd_POST(adapter);
3561 if (status)
3562 goto ctrl_clean;
ba343c77 3563 }
6b7c5b94 3564
2243e2e9
SP
3565 /* tell fw we're ready to fire cmds */
3566 status = be_cmd_fw_init(adapter);
6b7c5b94 3567 if (status)
2243e2e9
SP
3568 goto ctrl_clean;
3569
a4b4dfab
AK
3570 status = be_cmd_reset_function(adapter);
3571 if (status)
3572 goto ctrl_clean;
556ae191 3573
2243e2e9
SP
3574 status = be_stats_init(adapter);
3575 if (status)
3576 goto ctrl_clean;
3577
3578 status = be_get_config(adapter);
6b7c5b94
SP
3579 if (status)
3580 goto stats_clean;
6b7c5b94 3581
b9ab82c7
SP
3582 /* The INTR bit may be set in the card when probed by a kdump kernel
3583 * after a crash.
3584 */
3585 if (!lancer_chip(adapter))
3586 be_intr_set(adapter, false);
3587
3abcdeda
SP
3588 be_msix_enable(adapter);
3589
6b7c5b94 3590 INIT_DELAYED_WORK(&adapter->work, be_worker);
a54769f5 3591 adapter->rx_fc = adapter->tx_fc = true;
6b7c5b94 3592
5fb379ee
SP
3593 status = be_setup(adapter);
3594 if (status)
3abcdeda 3595 goto msix_disable;
2243e2e9 3596
3abcdeda 3597 be_netdev_init(netdev);
6b7c5b94
SP
3598 status = register_netdev(netdev);
3599 if (status != 0)
5fb379ee 3600 goto unsetup;
6b7c5b94 3601
c4ca2374 3602 dev_info(&pdev->dev, "%s port %d\n", nic_name(pdev), adapter->port_num);
34b1ef04 3603
f203af70 3604 schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
6b7c5b94
SP
3605 return 0;
3606
5fb379ee
SP
3607unsetup:
3608 be_clear(adapter);
3abcdeda
SP
3609msix_disable:
3610 be_msix_disable(adapter);
6b7c5b94
SP
3611stats_clean:
3612 be_stats_cleanup(adapter);
3613ctrl_clean:
3614 be_ctrl_cleanup(adapter);
f9449ab7 3615disable_sriov:
ba343c77 3616 be_sriov_disable(adapter);
f9449ab7 3617free_netdev:
fe6d2a38 3618 free_netdev(netdev);
8d56ff11 3619 pci_set_drvdata(pdev, NULL);
6b7c5b94
SP
3620rel_reg:
3621 pci_release_regions(pdev);
3622disable_dev:
3623 pci_disable_device(pdev);
3624do_none:
c4ca2374 3625 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
6b7c5b94
SP
3626 return status;
3627}
3628
3629static int be_suspend(struct pci_dev *pdev, pm_message_t state)
3630{
3631 struct be_adapter *adapter = pci_get_drvdata(pdev);
3632 struct net_device *netdev = adapter->netdev;
3633
a4ca055f 3634 cancel_delayed_work_sync(&adapter->work);
71d8d1b5
AK
3635 if (adapter->wol)
3636 be_setup_wol(adapter, true);
3637
6b7c5b94
SP
3638 netif_device_detach(netdev);
3639 if (netif_running(netdev)) {
3640 rtnl_lock();
3641 be_close(netdev);
3642 rtnl_unlock();
3643 }
9b0365f1 3644 be_clear(adapter);
6b7c5b94 3645
a4ca055f 3646 be_msix_disable(adapter);
6b7c5b94
SP
3647 pci_save_state(pdev);
3648 pci_disable_device(pdev);
3649 pci_set_power_state(pdev, pci_choose_state(pdev, state));
3650 return 0;
3651}
3652
3653static int be_resume(struct pci_dev *pdev)
3654{
3655 int status = 0;
3656 struct be_adapter *adapter = pci_get_drvdata(pdev);
3657 struct net_device *netdev = adapter->netdev;
3658
3659 netif_device_detach(netdev);
3660
3661 status = pci_enable_device(pdev);
3662 if (status)
3663 return status;
3664
3665 pci_set_power_state(pdev, 0);
3666 pci_restore_state(pdev);
3667
a4ca055f 3668 be_msix_enable(adapter);
2243e2e9
SP
3669 /* tell fw we're ready to fire cmds */
3670 status = be_cmd_fw_init(adapter);
3671 if (status)
3672 return status;
3673
9b0365f1 3674 be_setup(adapter);
6b7c5b94
SP
3675 if (netif_running(netdev)) {
3676 rtnl_lock();
3677 be_open(netdev);
3678 rtnl_unlock();
3679 }
3680 netif_device_attach(netdev);
71d8d1b5
AK
3681
3682 if (adapter->wol)
3683 be_setup_wol(adapter, false);
a4ca055f
AK
3684
3685 schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
6b7c5b94
SP
3686 return 0;
3687}
3688
82456b03
SP
3689/*
3690 * An FLR will stop BE from DMAing any data.
3691 */
3692static void be_shutdown(struct pci_dev *pdev)
3693{
3694 struct be_adapter *adapter = pci_get_drvdata(pdev);
82456b03 3695
2d5d4154
AK
3696 if (!adapter)
3697 return;
82456b03 3698
0f4a6828 3699 cancel_delayed_work_sync(&adapter->work);
a4ca055f 3700
2d5d4154 3701 netif_device_detach(adapter->netdev);
82456b03 3702
82456b03
SP
3703 if (adapter->wol)
3704 be_setup_wol(adapter, true);
3705
57841869
AK
3706 be_cmd_reset_function(adapter);
3707
82456b03 3708 pci_disable_device(pdev);
82456b03
SP
3709}
3710
cf588477
SP
3711static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
3712 pci_channel_state_t state)
3713{
3714 struct be_adapter *adapter = pci_get_drvdata(pdev);
3715 struct net_device *netdev = adapter->netdev;
3716
3717 dev_err(&adapter->pdev->dev, "EEH error detected\n");
3718
3719 adapter->eeh_err = true;
3720
3721 netif_device_detach(netdev);
3722
3723 if (netif_running(netdev)) {
3724 rtnl_lock();
3725 be_close(netdev);
3726 rtnl_unlock();
3727 }
3728 be_clear(adapter);
3729
3730 if (state == pci_channel_io_perm_failure)
3731 return PCI_ERS_RESULT_DISCONNECT;
3732
3733 pci_disable_device(pdev);
3734
3735 return PCI_ERS_RESULT_NEED_RESET;
3736}
3737
3738static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
3739{
3740 struct be_adapter *adapter = pci_get_drvdata(pdev);
3741 int status;
3742
3743 dev_info(&adapter->pdev->dev, "EEH reset\n");
3744 adapter->eeh_err = false;
6589ade0
SP
3745 adapter->ue_detected = false;
3746 adapter->fw_timeout = false;
cf588477
SP
3747
3748 status = pci_enable_device(pdev);
3749 if (status)
3750 return PCI_ERS_RESULT_DISCONNECT;
3751
3752 pci_set_master(pdev);
3753 pci_set_power_state(pdev, 0);
3754 pci_restore_state(pdev);
3755
3756 /* Check if card is ok and fw is ready */
3757 status = be_cmd_POST(adapter);
3758 if (status)
3759 return PCI_ERS_RESULT_DISCONNECT;
3760
3761 return PCI_ERS_RESULT_RECOVERED;
3762}
3763
3764static void be_eeh_resume(struct pci_dev *pdev)
3765{
3766 int status = 0;
3767 struct be_adapter *adapter = pci_get_drvdata(pdev);
3768 struct net_device *netdev = adapter->netdev;
3769
3770 dev_info(&adapter->pdev->dev, "EEH resume\n");
3771
3772 pci_save_state(pdev);
3773
3774 /* tell fw we're ready to fire cmds */
3775 status = be_cmd_fw_init(adapter);
3776 if (status)
3777 goto err;
3778
3779 status = be_setup(adapter);
3780 if (status)
3781 goto err;
3782
3783 if (netif_running(netdev)) {
3784 status = be_open(netdev);
3785 if (status)
3786 goto err;
3787 }
3788 netif_device_attach(netdev);
3789 return;
3790err:
3791 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
cf588477
SP
3792}
3793
3794static struct pci_error_handlers be_eeh_handlers = {
3795 .error_detected = be_eeh_err_detected,
3796 .slot_reset = be_eeh_reset,
3797 .resume = be_eeh_resume,
3798};
3799
6b7c5b94
SP
3800static struct pci_driver be_driver = {
3801 .name = DRV_NAME,
3802 .id_table = be_dev_ids,
3803 .probe = be_probe,
3804 .remove = be_remove,
3805 .suspend = be_suspend,
cf588477 3806 .resume = be_resume,
82456b03 3807 .shutdown = be_shutdown,
cf588477 3808 .err_handler = &be_eeh_handlers
6b7c5b94
SP
3809};
3810
3811static int __init be_init_module(void)
3812{
8e95a202
JP
3813 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
3814 rx_frag_size != 2048) {
6b7c5b94
SP
3815 printk(KERN_WARNING DRV_NAME
3816 " : Module param rx_frag_size must be 2048/4096/8192."
3817 " Using 2048\n");
3818 rx_frag_size = 2048;
3819 }
6b7c5b94
SP
3820
3821 return pci_register_driver(&be_driver);
3822}
3823module_init(be_init_module);
3824
3825static void __exit be_exit_module(void)
3826{
3827 pci_unregister_driver(&be_driver);
3828}
3829module_exit(be_exit_module);