be2net: remove wrong and unnecessary calls to netif_carrier_off()
[linux-2.6-block.git] / drivers / net / benet / be_main.c
CommitLineData
6b7c5b94 1/*
d2145cde 2 * Copyright (C) 2005 - 2011 Emulex
6b7c5b94
SP
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
d2145cde 11 * linux-drivers@emulex.com
6b7c5b94 12 *
d2145cde
AK
13 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
6b7c5b94
SP
16 */
17
70c71606 18#include <linux/prefetch.h>
6b7c5b94 19#include "be.h"
8788fdc2 20#include "be_cmds.h"
65f71b8b 21#include <asm/div64.h>
6b7c5b94
SP
22
23MODULE_VERSION(DRV_VER);
24MODULE_DEVICE_TABLE(pci, be_dev_ids);
25MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
26MODULE_AUTHOR("ServerEngines Corporation");
27MODULE_LICENSE("GPL");
28
2e588f84 29static ushort rx_frag_size = 2048;
ba343c77 30static unsigned int num_vfs;
2e588f84 31module_param(rx_frag_size, ushort, S_IRUGO);
ba343c77 32module_param(num_vfs, uint, S_IRUGO);
6b7c5b94 33MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
ba343c77 34MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
6b7c5b94 35
6b7c5b94 36static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
c4ca2374 37 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
59fd5d87 38 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
c4ca2374
AK
39 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
40 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
fe6d2a38 41 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
12f4d0a8 42 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
6b7c5b94
SP
43 { 0 }
44};
45MODULE_DEVICE_TABLE(pci, be_dev_ids);
7c185276 46/* UE Status Low CSR */
42c8b11e 47static const char * const ue_status_low_desc[] = {
7c185276
AK
48 "CEV",
49 "CTX",
50 "DBUF",
51 "ERX",
52 "Host",
53 "MPU",
54 "NDMA",
55 "PTC ",
56 "RDMA ",
57 "RXF ",
58 "RXIPS ",
59 "RXULP0 ",
60 "RXULP1 ",
61 "RXULP2 ",
62 "TIM ",
63 "TPOST ",
64 "TPRE ",
65 "TXIPS ",
66 "TXULP0 ",
67 "TXULP1 ",
68 "UC ",
69 "WDMA ",
70 "TXULP2 ",
71 "HOST1 ",
72 "P0_OB_LINK ",
73 "P1_OB_LINK ",
74 "HOST_GPIO ",
75 "MBOX ",
76 "AXGMAC0",
77 "AXGMAC1",
78 "JTAG",
79 "MPU_INTPEND"
80};
81/* UE Status High CSR */
42c8b11e 82static const char * const ue_status_hi_desc[] = {
7c185276
AK
83 "LPCMEMHOST",
84 "MGMT_MAC",
85 "PCS0ONLINE",
86 "MPU_IRAM",
87 "PCS1ONLINE",
88 "PCTL0",
89 "PCTL1",
90 "PMEM",
91 "RR",
92 "TXPB",
93 "RXPP",
94 "XAUI",
95 "TXP",
96 "ARM",
97 "IPC",
98 "HOST2",
99 "HOST3",
100 "HOST4",
101 "HOST5",
102 "HOST6",
103 "HOST7",
104 "HOST8",
105 "HOST9",
42c8b11e 106 "NETC",
7c185276
AK
107 "Unknown",
108 "Unknown",
109 "Unknown",
110 "Unknown",
111 "Unknown",
112 "Unknown",
113 "Unknown",
114 "Unknown"
115};
6b7c5b94
SP
116
117static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
118{
119 struct be_dma_mem *mem = &q->dma_mem;
120 if (mem->va)
2b7bcebf
IV
121 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
122 mem->dma);
6b7c5b94
SP
123}
124
125static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
126 u16 len, u16 entry_size)
127{
128 struct be_dma_mem *mem = &q->dma_mem;
129
130 memset(q, 0, sizeof(*q));
131 q->len = len;
132 q->entry_size = entry_size;
133 mem->size = len * entry_size;
2b7bcebf
IV
134 mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
135 GFP_KERNEL);
6b7c5b94
SP
136 if (!mem->va)
137 return -1;
138 memset(mem->va, 0, mem->size);
139 return 0;
140}
141
8788fdc2 142static void be_intr_set(struct be_adapter *adapter, bool enable)
6b7c5b94 143{
8788fdc2 144 u8 __iomem *addr = adapter->pcicfg + PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET;
6b7c5b94
SP
145 u32 reg = ioread32(addr);
146 u32 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
5f0b849e 147
cf588477
SP
148 if (adapter->eeh_err)
149 return;
150
5f0b849e 151 if (!enabled && enable)
6b7c5b94 152 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
5f0b849e 153 else if (enabled && !enable)
6b7c5b94 154 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
5f0b849e 155 else
6b7c5b94 156 return;
5f0b849e 157
6b7c5b94
SP
158 iowrite32(reg, addr);
159}
160
8788fdc2 161static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
6b7c5b94
SP
162{
163 u32 val = 0;
164 val |= qid & DB_RQ_RING_ID_MASK;
165 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
f3eb62d2
SP
166
167 wmb();
8788fdc2 168 iowrite32(val, adapter->db + DB_RQ_OFFSET);
6b7c5b94
SP
169}
170
8788fdc2 171static void be_txq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
6b7c5b94
SP
172{
173 u32 val = 0;
174 val |= qid & DB_TXULP_RING_ID_MASK;
175 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
f3eb62d2
SP
176
177 wmb();
8788fdc2 178 iowrite32(val, adapter->db + DB_TXULP1_OFFSET);
6b7c5b94
SP
179}
180
8788fdc2 181static void be_eq_notify(struct be_adapter *adapter, u16 qid,
6b7c5b94
SP
182 bool arm, bool clear_int, u16 num_popped)
183{
184 u32 val = 0;
185 val |= qid & DB_EQ_RING_ID_MASK;
fe6d2a38
SP
186 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
187 DB_EQ_RING_ID_EXT_MASK_SHIFT);
cf588477
SP
188
189 if (adapter->eeh_err)
190 return;
191
6b7c5b94
SP
192 if (arm)
193 val |= 1 << DB_EQ_REARM_SHIFT;
194 if (clear_int)
195 val |= 1 << DB_EQ_CLR_SHIFT;
196 val |= 1 << DB_EQ_EVNT_SHIFT;
197 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
8788fdc2 198 iowrite32(val, adapter->db + DB_EQ_OFFSET);
6b7c5b94
SP
199}
200
8788fdc2 201void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
6b7c5b94
SP
202{
203 u32 val = 0;
204 val |= qid & DB_CQ_RING_ID_MASK;
fe6d2a38
SP
205 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
206 DB_CQ_RING_ID_EXT_MASK_SHIFT);
cf588477
SP
207
208 if (adapter->eeh_err)
209 return;
210
6b7c5b94
SP
211 if (arm)
212 val |= 1 << DB_CQ_REARM_SHIFT;
213 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
8788fdc2 214 iowrite32(val, adapter->db + DB_CQ_OFFSET);
6b7c5b94
SP
215}
216
6b7c5b94
SP
217static int be_mac_addr_set(struct net_device *netdev, void *p)
218{
219 struct be_adapter *adapter = netdev_priv(netdev);
220 struct sockaddr *addr = p;
221 int status = 0;
222
ca9e4988
AK
223 if (!is_valid_ether_addr(addr->sa_data))
224 return -EADDRNOTAVAIL;
225
ba343c77
SB
226 /* MAC addr configuration will be done in hardware for VFs
227 * by their corresponding PFs. Just copy to netdev addr here
228 */
229 if (!be_physfn(adapter))
230 goto netdev_addr;
231
f8617e08
AK
232 status = be_cmd_pmac_del(adapter, adapter->if_handle,
233 adapter->pmac_id, 0);
a65027e4
SP
234 if (status)
235 return status;
6b7c5b94 236
a65027e4 237 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
f8617e08 238 adapter->if_handle, &adapter->pmac_id, 0);
ba343c77 239netdev_addr:
6b7c5b94
SP
240 if (!status)
241 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
242
243 return status;
244}
245
89a88ab8
AK
246static void populate_be2_stats(struct be_adapter *adapter)
247{
ac124ff9
SP
248 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
249 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
250 struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
89a88ab8 251 struct be_port_rxf_stats_v0 *port_stats =
ac124ff9
SP
252 &rxf_stats->port[adapter->port_num];
253 struct be_drv_stats *drvs = &adapter->drv_stats;
89a88ab8 254
ac124ff9 255 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
89a88ab8
AK
256 drvs->rx_pause_frames = port_stats->rx_pause_frames;
257 drvs->rx_crc_errors = port_stats->rx_crc_errors;
258 drvs->rx_control_frames = port_stats->rx_control_frames;
259 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
260 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
261 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
262 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
263 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
264 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
265 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
266 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
267 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
268 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
269 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
ac124ff9 270 drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
89a88ab8
AK
271 drvs->rx_dropped_header_too_small =
272 port_stats->rx_dropped_header_too_small;
ac124ff9 273 drvs->rx_address_match_errors = port_stats->rx_address_match_errors;
89a88ab8
AK
274 drvs->rx_alignment_symbol_errors =
275 port_stats->rx_alignment_symbol_errors;
276
277 drvs->tx_pauseframes = port_stats->tx_pauseframes;
278 drvs->tx_controlframes = port_stats->tx_controlframes;
279
280 if (adapter->port_num)
ac124ff9 281 drvs->jabber_events = rxf_stats->port1_jabber_events;
89a88ab8 282 else
ac124ff9 283 drvs->jabber_events = rxf_stats->port0_jabber_events;
89a88ab8
AK
284 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
285 drvs->rx_drops_no_txpb = rxf_stats->rx_drops_no_txpb;
286 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
287 drvs->rx_drops_invalid_ring = rxf_stats->rx_drops_invalid_ring;
288 drvs->forwarded_packets = rxf_stats->forwarded_packets;
289 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
ac124ff9
SP
290 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
291 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
89a88ab8
AK
292 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
293}
294
295static void populate_be3_stats(struct be_adapter *adapter)
296{
ac124ff9
SP
297 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
298 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
299 struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
89a88ab8 300 struct be_port_rxf_stats_v1 *port_stats =
ac124ff9
SP
301 &rxf_stats->port[adapter->port_num];
302 struct be_drv_stats *drvs = &adapter->drv_stats;
89a88ab8 303
ac124ff9 304 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
89a88ab8
AK
305 drvs->rx_pause_frames = port_stats->rx_pause_frames;
306 drvs->rx_crc_errors = port_stats->rx_crc_errors;
307 drvs->rx_control_frames = port_stats->rx_control_frames;
308 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
309 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
310 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
311 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
312 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
313 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
314 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
315 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
316 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
317 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
318 drvs->rx_dropped_header_too_small =
319 port_stats->rx_dropped_header_too_small;
320 drvs->rx_input_fifo_overflow_drop =
321 port_stats->rx_input_fifo_overflow_drop;
ac124ff9 322 drvs->rx_address_match_errors = port_stats->rx_address_match_errors;
89a88ab8
AK
323 drvs->rx_alignment_symbol_errors =
324 port_stats->rx_alignment_symbol_errors;
ac124ff9 325 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
89a88ab8
AK
326 drvs->tx_pauseframes = port_stats->tx_pauseframes;
327 drvs->tx_controlframes = port_stats->tx_controlframes;
328 drvs->jabber_events = port_stats->jabber_events;
329 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
330 drvs->rx_drops_no_txpb = rxf_stats->rx_drops_no_txpb;
331 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
332 drvs->rx_drops_invalid_ring = rxf_stats->rx_drops_invalid_ring;
333 drvs->forwarded_packets = rxf_stats->forwarded_packets;
334 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
ac124ff9
SP
335 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
336 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
89a88ab8
AK
337 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
338}
339
005d5696
SX
340static void populate_lancer_stats(struct be_adapter *adapter)
341{
89a88ab8 342
005d5696 343 struct be_drv_stats *drvs = &adapter->drv_stats;
ac124ff9
SP
344 struct lancer_pport_stats *pport_stats =
345 pport_stats_from_cmd(adapter);
346
347 be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
348 drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
349 drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
350 drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
005d5696 351 drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
ac124ff9 352 drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
005d5696
SX
353 drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
354 drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
355 drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
356 drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
357 drvs->rx_dropped_tcp_length =
358 pport_stats->rx_dropped_invalid_tcp_length;
359 drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
360 drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
361 drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
362 drvs->rx_dropped_header_too_small =
363 pport_stats->rx_dropped_header_too_small;
364 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
365 drvs->rx_address_match_errors = pport_stats->rx_address_match_errors;
ac124ff9 366 drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
005d5696 367 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
ac124ff9
SP
368 drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
369 drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
005d5696 370 drvs->jabber_events = pport_stats->rx_jabbers;
005d5696 371 drvs->rx_drops_invalid_ring = pport_stats->rx_drops_invalid_queue;
ac124ff9
SP
372 drvs->forwarded_packets = pport_stats->num_forwards_lo;
373 drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
005d5696 374 drvs->rx_drops_too_many_frags =
ac124ff9 375 pport_stats->rx_drops_too_many_frags_lo;
005d5696 376}
89a88ab8
AK
377
378void be_parse_stats(struct be_adapter *adapter)
379{
ac124ff9
SP
380 struct be_erx_stats_v1 *erx = be_erx_stats_from_cmd(adapter);
381 struct be_rx_obj *rxo;
382 int i;
383
005d5696
SX
384 if (adapter->generation == BE_GEN3) {
385 if (lancer_chip(adapter))
386 populate_lancer_stats(adapter);
387 else
388 populate_be3_stats(adapter);
389 } else {
89a88ab8 390 populate_be2_stats(adapter);
005d5696 391 }
ac124ff9
SP
392
393 /* as erx_v1 is longer than v0, ok to use v1 defn for v0 access */
394 for_all_rx_queues(adapter, rxo, i)
395 rx_stats(rxo)->rx_drops_no_frags =
396 erx->rx_drops_no_fragments[rxo->q.id];
89a88ab8
AK
397}
398
ab1594e9
SP
399static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
400 struct rtnl_link_stats64 *stats)
6b7c5b94 401{
ab1594e9 402 struct be_adapter *adapter = netdev_priv(netdev);
89a88ab8 403 struct be_drv_stats *drvs = &adapter->drv_stats;
3abcdeda 404 struct be_rx_obj *rxo;
3c8def97 405 struct be_tx_obj *txo;
ab1594e9
SP
406 u64 pkts, bytes;
407 unsigned int start;
3abcdeda 408 int i;
6b7c5b94 409
3abcdeda 410 for_all_rx_queues(adapter, rxo, i) {
ab1594e9
SP
411 const struct be_rx_stats *rx_stats = rx_stats(rxo);
412 do {
413 start = u64_stats_fetch_begin_bh(&rx_stats->sync);
414 pkts = rx_stats(rxo)->rx_pkts;
415 bytes = rx_stats(rxo)->rx_bytes;
416 } while (u64_stats_fetch_retry_bh(&rx_stats->sync, start));
417 stats->rx_packets += pkts;
418 stats->rx_bytes += bytes;
419 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
420 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
421 rx_stats(rxo)->rx_drops_no_frags;
3abcdeda
SP
422 }
423
3c8def97 424 for_all_tx_queues(adapter, txo, i) {
ab1594e9
SP
425 const struct be_tx_stats *tx_stats = tx_stats(txo);
426 do {
427 start = u64_stats_fetch_begin_bh(&tx_stats->sync);
428 pkts = tx_stats(txo)->tx_pkts;
429 bytes = tx_stats(txo)->tx_bytes;
430 } while (u64_stats_fetch_retry_bh(&tx_stats->sync, start));
431 stats->tx_packets += pkts;
432 stats->tx_bytes += bytes;
3c8def97 433 }
6b7c5b94
SP
434
435 /* bad pkts received */
ab1594e9 436 stats->rx_errors = drvs->rx_crc_errors +
89a88ab8
AK
437 drvs->rx_alignment_symbol_errors +
438 drvs->rx_in_range_errors +
439 drvs->rx_out_range_errors +
440 drvs->rx_frame_too_long +
441 drvs->rx_dropped_too_small +
442 drvs->rx_dropped_too_short +
443 drvs->rx_dropped_header_too_small +
444 drvs->rx_dropped_tcp_length +
ab1594e9 445 drvs->rx_dropped_runt;
68110868 446
6b7c5b94 447 /* detailed rx errors */
ab1594e9 448 stats->rx_length_errors = drvs->rx_in_range_errors +
89a88ab8
AK
449 drvs->rx_out_range_errors +
450 drvs->rx_frame_too_long;
68110868 451
ab1594e9 452 stats->rx_crc_errors = drvs->rx_crc_errors;
6b7c5b94
SP
453
454 /* frame alignment errors */
ab1594e9 455 stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
68110868 456
6b7c5b94
SP
457 /* receiver fifo overrun */
458 /* drops_no_pbuf is no per i/f, it's per BE card */
ab1594e9 459 stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
89a88ab8
AK
460 drvs->rx_input_fifo_overflow_drop +
461 drvs->rx_drops_no_pbuf;
ab1594e9 462 return stats;
6b7c5b94
SP
463}
464
8788fdc2 465void be_link_status_update(struct be_adapter *adapter, bool link_up)
6b7c5b94 466{
6b7c5b94
SP
467 struct net_device *netdev = adapter->netdev;
468
6b7c5b94 469 /* If link came up or went down */
a8f447bd 470 if (adapter->link_up != link_up) {
0dffc83e 471 adapter->link_speed = -1;
a8f447bd 472 if (link_up) {
6b7c5b94
SP
473 netif_carrier_on(netdev);
474 printk(KERN_INFO "%s: Link up\n", netdev->name);
a8f447bd 475 } else {
a8f447bd
SP
476 netif_carrier_off(netdev);
477 printk(KERN_INFO "%s: Link down\n", netdev->name);
6b7c5b94 478 }
a8f447bd 479 adapter->link_up = link_up;
6b7c5b94 480 }
6b7c5b94
SP
481}
482
3c8def97 483static void be_tx_stats_update(struct be_tx_obj *txo,
91992e44 484 u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
6b7c5b94 485{
3c8def97
SP
486 struct be_tx_stats *stats = tx_stats(txo);
487
ab1594e9 488 u64_stats_update_begin(&stats->sync);
ac124ff9
SP
489 stats->tx_reqs++;
490 stats->tx_wrbs += wrb_cnt;
491 stats->tx_bytes += copied;
492 stats->tx_pkts += (gso_segs ? gso_segs : 1);
6b7c5b94 493 if (stopped)
ac124ff9 494 stats->tx_stops++;
ab1594e9 495 u64_stats_update_end(&stats->sync);
6b7c5b94
SP
496}
497
498/* Determine number of WRB entries needed to xmit data in an skb */
fe6d2a38
SP
499static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
500 bool *dummy)
6b7c5b94 501{
ebc8d2ab
DM
502 int cnt = (skb->len > skb->data_len);
503
504 cnt += skb_shinfo(skb)->nr_frags;
505
6b7c5b94
SP
506 /* to account for hdr wrb */
507 cnt++;
fe6d2a38
SP
508 if (lancer_chip(adapter) || !(cnt & 1)) {
509 *dummy = false;
510 } else {
6b7c5b94
SP
511 /* add a dummy to make it an even num */
512 cnt++;
513 *dummy = true;
fe6d2a38 514 }
6b7c5b94
SP
515 BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
516 return cnt;
517}
518
519static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
520{
521 wrb->frag_pa_hi = upper_32_bits(addr);
522 wrb->frag_pa_lo = addr & 0xFFFFFFFF;
523 wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
524}
525
cc4ce020
SK
526static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
527 struct sk_buff *skb, u32 wrb_cnt, u32 len)
6b7c5b94 528{
cc4ce020
SK
529 u8 vlan_prio = 0;
530 u16 vlan_tag = 0;
531
6b7c5b94
SP
532 memset(hdr, 0, sizeof(*hdr));
533
534 AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
535
49e4b847 536 if (skb_is_gso(skb)) {
6b7c5b94
SP
537 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
538 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
539 hdr, skb_shinfo(skb)->gso_size);
fe6d2a38 540 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
49e4b847 541 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
fe6d2a38
SP
542 if (lancer_chip(adapter) && adapter->sli_family ==
543 LANCER_A0_SLI_FAMILY) {
544 AMAP_SET_BITS(struct amap_eth_hdr_wrb, ipcs, hdr, 1);
545 if (is_tcp_pkt(skb))
546 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
547 tcpcs, hdr, 1);
548 else if (is_udp_pkt(skb))
549 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
550 udpcs, hdr, 1);
551 }
6b7c5b94
SP
552 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
553 if (is_tcp_pkt(skb))
554 AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
555 else if (is_udp_pkt(skb))
556 AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
557 }
558
4c5102f9 559 if (vlan_tx_tag_present(skb)) {
6b7c5b94 560 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
cc4ce020
SK
561 vlan_tag = vlan_tx_tag_get(skb);
562 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
563 /* If vlan priority provided by OS is NOT in available bmap */
564 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
565 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
566 adapter->recommended_prio;
567 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
6b7c5b94
SP
568 }
569
570 AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
571 AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, 1);
572 AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
573 AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
574}
575
2b7bcebf 576static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
7101e111
SP
577 bool unmap_single)
578{
579 dma_addr_t dma;
580
581 be_dws_le_to_cpu(wrb, sizeof(*wrb));
582
583 dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
b681ee77 584 if (wrb->frag_len) {
7101e111 585 if (unmap_single)
2b7bcebf
IV
586 dma_unmap_single(dev, dma, wrb->frag_len,
587 DMA_TO_DEVICE);
7101e111 588 else
2b7bcebf 589 dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
7101e111
SP
590 }
591}
6b7c5b94 592
3c8def97 593static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
6b7c5b94
SP
594 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb)
595{
7101e111
SP
596 dma_addr_t busaddr;
597 int i, copied = 0;
2b7bcebf 598 struct device *dev = &adapter->pdev->dev;
6b7c5b94 599 struct sk_buff *first_skb = skb;
6b7c5b94
SP
600 struct be_eth_wrb *wrb;
601 struct be_eth_hdr_wrb *hdr;
7101e111
SP
602 bool map_single = false;
603 u16 map_head;
6b7c5b94 604
6b7c5b94
SP
605 hdr = queue_head_node(txq);
606 queue_head_inc(txq);
7101e111 607 map_head = txq->head;
6b7c5b94 608
ebc8d2ab 609 if (skb->len > skb->data_len) {
e743d313 610 int len = skb_headlen(skb);
2b7bcebf
IV
611 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
612 if (dma_mapping_error(dev, busaddr))
7101e111
SP
613 goto dma_err;
614 map_single = true;
ebc8d2ab
DM
615 wrb = queue_head_node(txq);
616 wrb_fill(wrb, busaddr, len);
617 be_dws_cpu_to_le(wrb, sizeof(*wrb));
618 queue_head_inc(txq);
619 copied += len;
620 }
6b7c5b94 621
ebc8d2ab
DM
622 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
623 struct skb_frag_struct *frag =
624 &skb_shinfo(skb)->frags[i];
2b7bcebf
IV
625 busaddr = dma_map_page(dev, frag->page, frag->page_offset,
626 frag->size, DMA_TO_DEVICE);
627 if (dma_mapping_error(dev, busaddr))
7101e111 628 goto dma_err;
ebc8d2ab
DM
629 wrb = queue_head_node(txq);
630 wrb_fill(wrb, busaddr, frag->size);
631 be_dws_cpu_to_le(wrb, sizeof(*wrb));
632 queue_head_inc(txq);
633 copied += frag->size;
6b7c5b94
SP
634 }
635
636 if (dummy_wrb) {
637 wrb = queue_head_node(txq);
638 wrb_fill(wrb, 0, 0);
639 be_dws_cpu_to_le(wrb, sizeof(*wrb));
640 queue_head_inc(txq);
641 }
642
cc4ce020 643 wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied);
6b7c5b94
SP
644 be_dws_cpu_to_le(hdr, sizeof(*hdr));
645
646 return copied;
7101e111
SP
647dma_err:
648 txq->head = map_head;
649 while (copied) {
650 wrb = queue_head_node(txq);
2b7bcebf 651 unmap_tx_frag(dev, wrb, map_single);
7101e111
SP
652 map_single = false;
653 copied -= wrb->frag_len;
654 queue_head_inc(txq);
655 }
656 return 0;
6b7c5b94
SP
657}
658
61357325 659static netdev_tx_t be_xmit(struct sk_buff *skb,
b31c50a7 660 struct net_device *netdev)
6b7c5b94
SP
661{
662 struct be_adapter *adapter = netdev_priv(netdev);
3c8def97
SP
663 struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
664 struct be_queue_info *txq = &txo->q;
6b7c5b94
SP
665 u32 wrb_cnt = 0, copied = 0;
666 u32 start = txq->head;
667 bool dummy_wrb, stopped = false;
668
fe6d2a38 669 wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
6b7c5b94 670
3c8def97 671 copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb);
c190e3c8
AK
672 if (copied) {
673 /* record the sent skb in the sent_skb table */
3c8def97
SP
674 BUG_ON(txo->sent_skb_list[start]);
675 txo->sent_skb_list[start] = skb;
c190e3c8
AK
676
677 /* Ensure txq has space for the next skb; Else stop the queue
678 * *BEFORE* ringing the tx doorbell, so that we serialze the
679 * tx compls of the current transmit which'll wake up the queue
680 */
7101e111 681 atomic_add(wrb_cnt, &txq->used);
c190e3c8
AK
682 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
683 txq->len) {
3c8def97 684 netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
c190e3c8
AK
685 stopped = true;
686 }
6b7c5b94 687
c190e3c8 688 be_txq_notify(adapter, txq->id, wrb_cnt);
6b7c5b94 689
3c8def97 690 be_tx_stats_update(txo, wrb_cnt, copied,
91992e44 691 skb_shinfo(skb)->gso_segs, stopped);
c190e3c8
AK
692 } else {
693 txq->head = start;
694 dev_kfree_skb_any(skb);
6b7c5b94 695 }
6b7c5b94
SP
696 return NETDEV_TX_OK;
697}
698
699static int be_change_mtu(struct net_device *netdev, int new_mtu)
700{
701 struct be_adapter *adapter = netdev_priv(netdev);
702 if (new_mtu < BE_MIN_MTU ||
34a89b8c
AK
703 new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
704 (ETH_HLEN + ETH_FCS_LEN))) {
6b7c5b94
SP
705 dev_info(&adapter->pdev->dev,
706 "MTU must be between %d and %d bytes\n",
34a89b8c
AK
707 BE_MIN_MTU,
708 (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
6b7c5b94
SP
709 return -EINVAL;
710 }
711 dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
712 netdev->mtu, new_mtu);
713 netdev->mtu = new_mtu;
714 return 0;
715}
716
717/*
82903e4b
AK
718 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
719 * If the user configures more, place BE in vlan promiscuous mode.
6b7c5b94 720 */
1da87b7f 721static int be_vid_config(struct be_adapter *adapter, bool vf, u32 vf_num)
6b7c5b94 722{
6b7c5b94
SP
723 u16 vtag[BE_NUM_VLANS_SUPPORTED];
724 u16 ntags = 0, i;
82903e4b 725 int status = 0;
1da87b7f
AK
726 u32 if_handle;
727
728 if (vf) {
729 if_handle = adapter->vf_cfg[vf_num].vf_if_handle;
730 vtag[0] = cpu_to_le16(adapter->vf_cfg[vf_num].vf_vlan_tag);
731 status = be_cmd_vlan_config(adapter, if_handle, vtag, 1, 1, 0);
732 }
6b7c5b94 733
82903e4b 734 if (adapter->vlans_added <= adapter->max_vlans) {
6b7c5b94 735 /* Construct VLAN Table to give to HW */
b738127d 736 for (i = 0; i < VLAN_N_VID; i++) {
6b7c5b94
SP
737 if (adapter->vlan_tag[i]) {
738 vtag[ntags] = cpu_to_le16(i);
739 ntags++;
740 }
741 }
b31c50a7
SP
742 status = be_cmd_vlan_config(adapter, adapter->if_handle,
743 vtag, ntags, 1, 0);
6b7c5b94 744 } else {
b31c50a7
SP
745 status = be_cmd_vlan_config(adapter, adapter->if_handle,
746 NULL, 0, 1, 1);
6b7c5b94 747 }
1da87b7f 748
b31c50a7 749 return status;
6b7c5b94
SP
750}
751
6b7c5b94
SP
752static void be_vlan_add_vid(struct net_device *netdev, u16 vid)
753{
754 struct be_adapter *adapter = netdev_priv(netdev);
755
1da87b7f 756 adapter->vlans_added++;
ba343c77
SB
757 if (!be_physfn(adapter))
758 return;
759
6b7c5b94 760 adapter->vlan_tag[vid] = 1;
82903e4b 761 if (adapter->vlans_added <= (adapter->max_vlans + 1))
1da87b7f 762 be_vid_config(adapter, false, 0);
6b7c5b94
SP
763}
764
765static void be_vlan_rem_vid(struct net_device *netdev, u16 vid)
766{
767 struct be_adapter *adapter = netdev_priv(netdev);
768
1da87b7f 769 adapter->vlans_added--;
1da87b7f 770
ba343c77
SB
771 if (!be_physfn(adapter))
772 return;
773
6b7c5b94 774 adapter->vlan_tag[vid] = 0;
82903e4b 775 if (adapter->vlans_added <= adapter->max_vlans)
1da87b7f 776 be_vid_config(adapter, false, 0);
6b7c5b94
SP
777}
778
24307eef 779static void be_set_multicast_list(struct net_device *netdev)
6b7c5b94
SP
780{
781 struct be_adapter *adapter = netdev_priv(netdev);
6b7c5b94 782
24307eef 783 if (netdev->flags & IFF_PROMISC) {
ecd0bf0f 784 be_cmd_promiscuous_config(adapter, true);
24307eef
SP
785 adapter->promiscuous = true;
786 goto done;
6b7c5b94
SP
787 }
788
25985edc 789 /* BE was previously in promiscuous mode; disable it */
24307eef
SP
790 if (adapter->promiscuous) {
791 adapter->promiscuous = false;
ecd0bf0f 792 be_cmd_promiscuous_config(adapter, false);
6b7c5b94
SP
793 }
794
e7b909a6 795 /* Enable multicast promisc if num configured exceeds what we support */
4cd24eaf
JP
796 if (netdev->flags & IFF_ALLMULTI ||
797 netdev_mc_count(netdev) > BE_MAX_MC) {
0ddf477b 798 be_cmd_multicast_set(adapter, adapter->if_handle, NULL,
e7b909a6 799 &adapter->mc_cmd_mem);
24307eef 800 goto done;
6b7c5b94 801 }
6b7c5b94 802
0ddf477b 803 be_cmd_multicast_set(adapter, adapter->if_handle, netdev,
f31e50a8 804 &adapter->mc_cmd_mem);
24307eef
SP
805done:
806 return;
6b7c5b94
SP
807}
808
ba343c77
SB
809static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
810{
811 struct be_adapter *adapter = netdev_priv(netdev);
812 int status;
813
814 if (!adapter->sriov_enabled)
815 return -EPERM;
816
817 if (!is_valid_ether_addr(mac) || (vf >= num_vfs))
818 return -EINVAL;
819
64600ea5
AK
820 if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID)
821 status = be_cmd_pmac_del(adapter,
822 adapter->vf_cfg[vf].vf_if_handle,
f8617e08 823 adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
ba343c77 824
64600ea5
AK
825 status = be_cmd_pmac_add(adapter, mac,
826 adapter->vf_cfg[vf].vf_if_handle,
f8617e08 827 &adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
64600ea5
AK
828
829 if (status)
ba343c77
SB
830 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
831 mac, vf);
64600ea5
AK
832 else
833 memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
834
ba343c77
SB
835 return status;
836}
837
64600ea5
AK
838static int be_get_vf_config(struct net_device *netdev, int vf,
839 struct ifla_vf_info *vi)
840{
841 struct be_adapter *adapter = netdev_priv(netdev);
842
843 if (!adapter->sriov_enabled)
844 return -EPERM;
845
846 if (vf >= num_vfs)
847 return -EINVAL;
848
849 vi->vf = vf;
e1d18735 850 vi->tx_rate = adapter->vf_cfg[vf].vf_tx_rate;
1da87b7f 851 vi->vlan = adapter->vf_cfg[vf].vf_vlan_tag;
64600ea5
AK
852 vi->qos = 0;
853 memcpy(&vi->mac, adapter->vf_cfg[vf].vf_mac_addr, ETH_ALEN);
854
855 return 0;
856}
857
1da87b7f
AK
858static int be_set_vf_vlan(struct net_device *netdev,
859 int vf, u16 vlan, u8 qos)
860{
861 struct be_adapter *adapter = netdev_priv(netdev);
862 int status = 0;
863
864 if (!adapter->sriov_enabled)
865 return -EPERM;
866
867 if ((vf >= num_vfs) || (vlan > 4095))
868 return -EINVAL;
869
870 if (vlan) {
871 adapter->vf_cfg[vf].vf_vlan_tag = vlan;
872 adapter->vlans_added++;
873 } else {
874 adapter->vf_cfg[vf].vf_vlan_tag = 0;
875 adapter->vlans_added--;
876 }
877
878 status = be_vid_config(adapter, true, vf);
879
880 if (status)
881 dev_info(&adapter->pdev->dev,
882 "VLAN %d config on VF %d failed\n", vlan, vf);
883 return status;
884}
885
e1d18735
AK
886static int be_set_vf_tx_rate(struct net_device *netdev,
887 int vf, int rate)
888{
889 struct be_adapter *adapter = netdev_priv(netdev);
890 int status = 0;
891
892 if (!adapter->sriov_enabled)
893 return -EPERM;
894
895 if ((vf >= num_vfs) || (rate < 0))
896 return -EINVAL;
897
898 if (rate > 10000)
899 rate = 10000;
900
901 adapter->vf_cfg[vf].vf_tx_rate = rate;
856c4012 902 status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
e1d18735
AK
903
904 if (status)
905 dev_info(&adapter->pdev->dev,
906 "tx rate %d on VF %d failed\n", rate, vf);
907 return status;
908}
909
ac124ff9 910static void be_rx_eqd_update(struct be_adapter *adapter, struct be_rx_obj *rxo)
6b7c5b94 911{
ac124ff9
SP
912 struct be_eq_obj *rx_eq = &rxo->rx_eq;
913 struct be_rx_stats *stats = rx_stats(rxo);
4097f663 914 ulong now = jiffies;
ac124ff9 915 ulong delta = now - stats->rx_jiffies;
ab1594e9
SP
916 u64 pkts;
917 unsigned int start, eqd;
ac124ff9
SP
918
919 if (!rx_eq->enable_aic)
920 return;
6b7c5b94 921
4097f663 922 /* Wrapped around */
3abcdeda
SP
923 if (time_before(now, stats->rx_jiffies)) {
924 stats->rx_jiffies = now;
4097f663
SP
925 return;
926 }
6b7c5b94 927
ac124ff9
SP
928 /* Update once a second */
929 if (delta < HZ)
6b7c5b94
SP
930 return;
931
ab1594e9
SP
932 do {
933 start = u64_stats_fetch_begin_bh(&stats->sync);
934 pkts = stats->rx_pkts;
935 } while (u64_stats_fetch_retry_bh(&stats->sync, start));
936
937 stats->rx_pps = (pkts - stats->rx_pkts_prev) / (delta / HZ);
938 stats->rx_pkts_prev = pkts;
3abcdeda 939 stats->rx_jiffies = now;
ac124ff9
SP
940 eqd = stats->rx_pps / 110000;
941 eqd = eqd << 3;
942 if (eqd > rx_eq->max_eqd)
943 eqd = rx_eq->max_eqd;
944 if (eqd < rx_eq->min_eqd)
945 eqd = rx_eq->min_eqd;
946 if (eqd < 10)
947 eqd = 0;
948 if (eqd != rx_eq->cur_eqd) {
949 be_cmd_modify_eqd(adapter, rx_eq->q.id, eqd);
950 rx_eq->cur_eqd = eqd;
951 }
6b7c5b94
SP
952}
953
3abcdeda 954static void be_rx_stats_update(struct be_rx_obj *rxo,
2e588f84 955 struct be_rx_compl_info *rxcp)
4097f663 956{
ac124ff9 957 struct be_rx_stats *stats = rx_stats(rxo);
1ef78abe 958
ab1594e9 959 u64_stats_update_begin(&stats->sync);
3abcdeda 960 stats->rx_compl++;
2e588f84 961 stats->rx_bytes += rxcp->pkt_size;
3abcdeda 962 stats->rx_pkts++;
2e588f84 963 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
3abcdeda 964 stats->rx_mcast_pkts++;
2e588f84 965 if (rxcp->err)
ac124ff9 966 stats->rx_compl_err++;
ab1594e9 967 u64_stats_update_end(&stats->sync);
4097f663
SP
968}
969
2e588f84 970static inline bool csum_passed(struct be_rx_compl_info *rxcp)
728a9972 971{
19fad86f
PR
972 /* L4 checksum is not reliable for non TCP/UDP packets.
973 * Also ignore ipcksm for ipv6 pkts */
2e588f84
SP
974 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
975 (rxcp->ip_csum || rxcp->ipv6);
728a9972
AK
976}
977
6b7c5b94 978static struct be_rx_page_info *
3abcdeda
SP
979get_rx_page_info(struct be_adapter *adapter,
980 struct be_rx_obj *rxo,
981 u16 frag_idx)
6b7c5b94
SP
982{
983 struct be_rx_page_info *rx_page_info;
3abcdeda 984 struct be_queue_info *rxq = &rxo->q;
6b7c5b94 985
3abcdeda 986 rx_page_info = &rxo->page_info_tbl[frag_idx];
6b7c5b94
SP
987 BUG_ON(!rx_page_info->page);
988
205859a2 989 if (rx_page_info->last_page_user) {
2b7bcebf
IV
990 dma_unmap_page(&adapter->pdev->dev,
991 dma_unmap_addr(rx_page_info, bus),
992 adapter->big_page_size, DMA_FROM_DEVICE);
205859a2
AK
993 rx_page_info->last_page_user = false;
994 }
6b7c5b94
SP
995
996 atomic_dec(&rxq->used);
997 return rx_page_info;
998}
999
1000/* Throwaway the data in the Rx completion */
1001static void be_rx_compl_discard(struct be_adapter *adapter,
3abcdeda 1002 struct be_rx_obj *rxo,
2e588f84 1003 struct be_rx_compl_info *rxcp)
6b7c5b94 1004{
3abcdeda 1005 struct be_queue_info *rxq = &rxo->q;
6b7c5b94 1006 struct be_rx_page_info *page_info;
2e588f84 1007 u16 i, num_rcvd = rxcp->num_rcvd;
6b7c5b94 1008
e80d9da6 1009 for (i = 0; i < num_rcvd; i++) {
2e588f84 1010 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
e80d9da6
PR
1011 put_page(page_info->page);
1012 memset(page_info, 0, sizeof(*page_info));
2e588f84 1013 index_inc(&rxcp->rxq_idx, rxq->len);
6b7c5b94
SP
1014 }
1015}
1016
1017/*
1018 * skb_fill_rx_data forms a complete skb for an ether frame
1019 * indicated by rxcp.
1020 */
3abcdeda 1021static void skb_fill_rx_data(struct be_adapter *adapter, struct be_rx_obj *rxo,
2e588f84 1022 struct sk_buff *skb, struct be_rx_compl_info *rxcp)
6b7c5b94 1023{
3abcdeda 1024 struct be_queue_info *rxq = &rxo->q;
6b7c5b94 1025 struct be_rx_page_info *page_info;
2e588f84
SP
1026 u16 i, j;
1027 u16 hdr_len, curr_frag_len, remaining;
6b7c5b94 1028 u8 *start;
6b7c5b94 1029
2e588f84 1030 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
6b7c5b94
SP
1031 start = page_address(page_info->page) + page_info->page_offset;
1032 prefetch(start);
1033
1034 /* Copy data in the first descriptor of this completion */
2e588f84 1035 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
6b7c5b94
SP
1036
1037 /* Copy the header portion into skb_data */
2e588f84 1038 hdr_len = min(BE_HDR_LEN, curr_frag_len);
6b7c5b94
SP
1039 memcpy(skb->data, start, hdr_len);
1040 skb->len = curr_frag_len;
1041 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
1042 /* Complete packet has now been moved to data */
1043 put_page(page_info->page);
1044 skb->data_len = 0;
1045 skb->tail += curr_frag_len;
1046 } else {
1047 skb_shinfo(skb)->nr_frags = 1;
1048 skb_shinfo(skb)->frags[0].page = page_info->page;
1049 skb_shinfo(skb)->frags[0].page_offset =
1050 page_info->page_offset + hdr_len;
1051 skb_shinfo(skb)->frags[0].size = curr_frag_len - hdr_len;
1052 skb->data_len = curr_frag_len - hdr_len;
1053 skb->tail += hdr_len;
1054 }
205859a2 1055 page_info->page = NULL;
6b7c5b94 1056
2e588f84
SP
1057 if (rxcp->pkt_size <= rx_frag_size) {
1058 BUG_ON(rxcp->num_rcvd != 1);
1059 return;
6b7c5b94
SP
1060 }
1061
1062 /* More frags present for this completion */
2e588f84
SP
1063 index_inc(&rxcp->rxq_idx, rxq->len);
1064 remaining = rxcp->pkt_size - curr_frag_len;
1065 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
1066 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
1067 curr_frag_len = min(remaining, rx_frag_size);
6b7c5b94 1068
bd46cb6c
AK
1069 /* Coalesce all frags from the same physical page in one slot */
1070 if (page_info->page_offset == 0) {
1071 /* Fresh page */
1072 j++;
1073 skb_shinfo(skb)->frags[j].page = page_info->page;
1074 skb_shinfo(skb)->frags[j].page_offset =
1075 page_info->page_offset;
1076 skb_shinfo(skb)->frags[j].size = 0;
1077 skb_shinfo(skb)->nr_frags++;
1078 } else {
1079 put_page(page_info->page);
1080 }
1081
1082 skb_shinfo(skb)->frags[j].size += curr_frag_len;
6b7c5b94
SP
1083 skb->len += curr_frag_len;
1084 skb->data_len += curr_frag_len;
6b7c5b94 1085
2e588f84
SP
1086 remaining -= curr_frag_len;
1087 index_inc(&rxcp->rxq_idx, rxq->len);
205859a2 1088 page_info->page = NULL;
6b7c5b94 1089 }
bd46cb6c 1090 BUG_ON(j > MAX_SKB_FRAGS);
6b7c5b94
SP
1091}
1092
5be93b9a 1093/* Process the RX completion indicated by rxcp when GRO is disabled */
6b7c5b94 1094static void be_rx_compl_process(struct be_adapter *adapter,
3abcdeda 1095 struct be_rx_obj *rxo,
2e588f84 1096 struct be_rx_compl_info *rxcp)
6b7c5b94 1097{
6332c8d3 1098 struct net_device *netdev = adapter->netdev;
6b7c5b94 1099 struct sk_buff *skb;
89420424 1100
6332c8d3 1101 skb = netdev_alloc_skb_ip_align(netdev, BE_HDR_LEN);
a058a632 1102 if (unlikely(!skb)) {
ac124ff9 1103 rx_stats(rxo)->rx_drops_no_skbs++;
3abcdeda 1104 be_rx_compl_discard(adapter, rxo, rxcp);
6b7c5b94
SP
1105 return;
1106 }
1107
2e588f84 1108 skb_fill_rx_data(adapter, rxo, skb, rxcp);
6b7c5b94 1109
6332c8d3 1110 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
728a9972 1111 skb->ip_summed = CHECKSUM_UNNECESSARY;
c6ce2f4b
SK
1112 else
1113 skb_checksum_none_assert(skb);
6b7c5b94
SP
1114
1115 skb->truesize = skb->len + sizeof(struct sk_buff);
6332c8d3 1116 skb->protocol = eth_type_trans(skb, netdev);
4b972914
AK
1117 if (adapter->netdev->features & NETIF_F_RXHASH)
1118 skb->rxhash = rxcp->rss_hash;
1119
6b7c5b94 1120
4c5102f9
AK
1121 if (unlikely(rxcp->vlanf))
1122 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1123
1124 netif_receive_skb(skb);
6b7c5b94
SP
1125}
1126
5be93b9a
AK
1127/* Process the RX completion indicated by rxcp when GRO is enabled */
1128static void be_rx_compl_process_gro(struct be_adapter *adapter,
3abcdeda 1129 struct be_rx_obj *rxo,
2e588f84 1130 struct be_rx_compl_info *rxcp)
6b7c5b94
SP
1131{
1132 struct be_rx_page_info *page_info;
5be93b9a 1133 struct sk_buff *skb = NULL;
3abcdeda
SP
1134 struct be_queue_info *rxq = &rxo->q;
1135 struct be_eq_obj *eq_obj = &rxo->rx_eq;
2e588f84
SP
1136 u16 remaining, curr_frag_len;
1137 u16 i, j;
3968fa1e 1138
5be93b9a
AK
1139 skb = napi_get_frags(&eq_obj->napi);
1140 if (!skb) {
3abcdeda 1141 be_rx_compl_discard(adapter, rxo, rxcp);
5be93b9a
AK
1142 return;
1143 }
1144
2e588f84
SP
1145 remaining = rxcp->pkt_size;
1146 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
1147 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
6b7c5b94
SP
1148
1149 curr_frag_len = min(remaining, rx_frag_size);
1150
bd46cb6c
AK
1151 /* Coalesce all frags from the same physical page in one slot */
1152 if (i == 0 || page_info->page_offset == 0) {
1153 /* First frag or Fresh page */
1154 j++;
5be93b9a
AK
1155 skb_shinfo(skb)->frags[j].page = page_info->page;
1156 skb_shinfo(skb)->frags[j].page_offset =
1157 page_info->page_offset;
1158 skb_shinfo(skb)->frags[j].size = 0;
bd46cb6c
AK
1159 } else {
1160 put_page(page_info->page);
1161 }
5be93b9a 1162 skb_shinfo(skb)->frags[j].size += curr_frag_len;
6b7c5b94 1163
bd46cb6c 1164 remaining -= curr_frag_len;
2e588f84 1165 index_inc(&rxcp->rxq_idx, rxq->len);
6b7c5b94
SP
1166 memset(page_info, 0, sizeof(*page_info));
1167 }
bd46cb6c 1168 BUG_ON(j > MAX_SKB_FRAGS);
6b7c5b94 1169
5be93b9a 1170 skb_shinfo(skb)->nr_frags = j + 1;
2e588f84
SP
1171 skb->len = rxcp->pkt_size;
1172 skb->data_len = rxcp->pkt_size;
1173 skb->truesize += rxcp->pkt_size;
5be93b9a 1174 skb->ip_summed = CHECKSUM_UNNECESSARY;
4b972914
AK
1175 if (adapter->netdev->features & NETIF_F_RXHASH)
1176 skb->rxhash = rxcp->rss_hash;
5be93b9a 1177
4c5102f9
AK
1178 if (unlikely(rxcp->vlanf))
1179 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1180
1181 napi_gro_frags(&eq_obj->napi);
2e588f84
SP
1182}
1183
1184static void be_parse_rx_compl_v1(struct be_adapter *adapter,
1185 struct be_eth_rx_compl *compl,
1186 struct be_rx_compl_info *rxcp)
1187{
1188 rxcp->pkt_size =
1189 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1190 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1191 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1192 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
9ecb42fd 1193 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
2e588f84
SP
1194 rxcp->ip_csum =
1195 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1196 rxcp->l4_csum =
1197 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1198 rxcp->ipv6 =
1199 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
1200 rxcp->rxq_idx =
1201 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, fragndx, compl);
1202 rxcp->num_rcvd =
1203 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1204 rxcp->pkt_type =
1205 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
4b972914
AK
1206 rxcp->rss_hash =
1207 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, rxcp);
15d72184
SP
1208 if (rxcp->vlanf) {
1209 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm,
3c709f8f
DM
1210 compl);
1211 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
1212 compl);
15d72184 1213 }
2e588f84
SP
1214}
1215
1216static void be_parse_rx_compl_v0(struct be_adapter *adapter,
1217 struct be_eth_rx_compl *compl,
1218 struct be_rx_compl_info *rxcp)
1219{
1220 rxcp->pkt_size =
1221 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1222 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1223 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1224 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
9ecb42fd 1225 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
2e588f84
SP
1226 rxcp->ip_csum =
1227 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1228 rxcp->l4_csum =
1229 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1230 rxcp->ipv6 =
1231 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
1232 rxcp->rxq_idx =
1233 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, fragndx, compl);
1234 rxcp->num_rcvd =
1235 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1236 rxcp->pkt_type =
1237 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
4b972914
AK
1238 rxcp->rss_hash =
1239 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, rxcp);
15d72184
SP
1240 if (rxcp->vlanf) {
1241 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm,
3c709f8f
DM
1242 compl);
1243 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
1244 compl);
15d72184 1245 }
2e588f84
SP
1246}
1247
1248static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1249{
1250 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1251 struct be_rx_compl_info *rxcp = &rxo->rxcp;
1252 struct be_adapter *adapter = rxo->adapter;
6b7c5b94 1253
2e588f84
SP
1254 /* For checking the valid bit it is Ok to use either definition as the
1255 * valid bit is at the same position in both v0 and v1 Rx compl */
1256 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
1257 return NULL;
6b7c5b94 1258
2e588f84
SP
1259 rmb();
1260 be_dws_le_to_cpu(compl, sizeof(*compl));
6b7c5b94 1261
2e588f84
SP
1262 if (adapter->be3_native)
1263 be_parse_rx_compl_v1(adapter, compl, rxcp);
1264 else
1265 be_parse_rx_compl_v0(adapter, compl, rxcp);
6b7c5b94 1266
15d72184
SP
1267 if (rxcp->vlanf) {
1268 /* vlanf could be wrongly set in some cards.
1269 * ignore if vtm is not set */
1270 if ((adapter->function_mode & 0x400) && !rxcp->vtm)
1271 rxcp->vlanf = 0;
6b7c5b94 1272
15d72184 1273 if (!lancer_chip(adapter))
3c709f8f 1274 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
6b7c5b94 1275
3c709f8f
DM
1276 if (((adapter->pvid & VLAN_VID_MASK) ==
1277 (rxcp->vlan_tag & VLAN_VID_MASK)) &&
1278 !adapter->vlan_tag[rxcp->vlan_tag])
15d72184
SP
1279 rxcp->vlanf = 0;
1280 }
2e588f84
SP
1281
1282 /* As the compl has been parsed, reset it; we wont touch it again */
1283 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
6b7c5b94 1284
3abcdeda 1285 queue_tail_inc(&rxo->cq);
6b7c5b94
SP
1286 return rxcp;
1287}
1288
1829b086 1289static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
6b7c5b94 1290{
6b7c5b94 1291 u32 order = get_order(size);
1829b086 1292
6b7c5b94 1293 if (order > 0)
1829b086
ED
1294 gfp |= __GFP_COMP;
1295 return alloc_pages(gfp, order);
6b7c5b94
SP
1296}
1297
1298/*
1299 * Allocate a page, split it to fragments of size rx_frag_size and post as
1300 * receive buffers to BE
1301 */
1829b086 1302static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
6b7c5b94 1303{
3abcdeda
SP
1304 struct be_adapter *adapter = rxo->adapter;
1305 struct be_rx_page_info *page_info_tbl = rxo->page_info_tbl;
26d92f92 1306 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
3abcdeda 1307 struct be_queue_info *rxq = &rxo->q;
6b7c5b94
SP
1308 struct page *pagep = NULL;
1309 struct be_eth_rx_d *rxd;
1310 u64 page_dmaaddr = 0, frag_dmaaddr;
1311 u32 posted, page_offset = 0;
1312
3abcdeda 1313 page_info = &rxo->page_info_tbl[rxq->head];
6b7c5b94
SP
1314 for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1315 if (!pagep) {
1829b086 1316 pagep = be_alloc_pages(adapter->big_page_size, gfp);
6b7c5b94 1317 if (unlikely(!pagep)) {
ac124ff9 1318 rx_stats(rxo)->rx_post_fail++;
6b7c5b94
SP
1319 break;
1320 }
2b7bcebf
IV
1321 page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
1322 0, adapter->big_page_size,
1323 DMA_FROM_DEVICE);
6b7c5b94
SP
1324 page_info->page_offset = 0;
1325 } else {
1326 get_page(pagep);
1327 page_info->page_offset = page_offset + rx_frag_size;
1328 }
1329 page_offset = page_info->page_offset;
1330 page_info->page = pagep;
fac6da5b 1331 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
6b7c5b94
SP
1332 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1333
1334 rxd = queue_head_node(rxq);
1335 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1336 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
6b7c5b94
SP
1337
1338 /* Any space left in the current big page for another frag? */
1339 if ((page_offset + rx_frag_size + rx_frag_size) >
1340 adapter->big_page_size) {
1341 pagep = NULL;
1342 page_info->last_page_user = true;
1343 }
26d92f92
SP
1344
1345 prev_page_info = page_info;
1346 queue_head_inc(rxq);
6b7c5b94
SP
1347 page_info = &page_info_tbl[rxq->head];
1348 }
1349 if (pagep)
26d92f92 1350 prev_page_info->last_page_user = true;
6b7c5b94
SP
1351
1352 if (posted) {
6b7c5b94 1353 atomic_add(posted, &rxq->used);
8788fdc2 1354 be_rxq_notify(adapter, rxq->id, posted);
ea1dae11
SP
1355 } else if (atomic_read(&rxq->used) == 0) {
1356 /* Let be_worker replenish when memory is available */
3abcdeda 1357 rxo->rx_post_starved = true;
6b7c5b94 1358 }
6b7c5b94
SP
1359}
1360
5fb379ee 1361static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
6b7c5b94 1362{
6b7c5b94
SP
1363 struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1364
1365 if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1366 return NULL;
1367
f3eb62d2 1368 rmb();
6b7c5b94
SP
1369 be_dws_le_to_cpu(txcp, sizeof(*txcp));
1370
1371 txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1372
1373 queue_tail_inc(tx_cq);
1374 return txcp;
1375}
1376
3c8def97
SP
1377static u16 be_tx_compl_process(struct be_adapter *adapter,
1378 struct be_tx_obj *txo, u16 last_index)
6b7c5b94 1379{
3c8def97 1380 struct be_queue_info *txq = &txo->q;
a73b796e 1381 struct be_eth_wrb *wrb;
3c8def97 1382 struct sk_buff **sent_skbs = txo->sent_skb_list;
6b7c5b94 1383 struct sk_buff *sent_skb;
ec43b1a6
SP
1384 u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1385 bool unmap_skb_hdr = true;
6b7c5b94 1386
ec43b1a6 1387 sent_skb = sent_skbs[txq->tail];
6b7c5b94 1388 BUG_ON(!sent_skb);
ec43b1a6
SP
1389 sent_skbs[txq->tail] = NULL;
1390
1391 /* skip header wrb */
a73b796e 1392 queue_tail_inc(txq);
6b7c5b94 1393
ec43b1a6 1394 do {
6b7c5b94 1395 cur_index = txq->tail;
a73b796e 1396 wrb = queue_tail_node(txq);
2b7bcebf
IV
1397 unmap_tx_frag(&adapter->pdev->dev, wrb,
1398 (unmap_skb_hdr && skb_headlen(sent_skb)));
ec43b1a6
SP
1399 unmap_skb_hdr = false;
1400
6b7c5b94
SP
1401 num_wrbs++;
1402 queue_tail_inc(txq);
ec43b1a6 1403 } while (cur_index != last_index);
6b7c5b94 1404
6b7c5b94 1405 kfree_skb(sent_skb);
4d586b82 1406 return num_wrbs;
6b7c5b94
SP
1407}
1408
859b1e4e
SP
1409static inline struct be_eq_entry *event_get(struct be_eq_obj *eq_obj)
1410{
1411 struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
1412
1413 if (!eqe->evt)
1414 return NULL;
1415
f3eb62d2 1416 rmb();
859b1e4e
SP
1417 eqe->evt = le32_to_cpu(eqe->evt);
1418 queue_tail_inc(&eq_obj->q);
1419 return eqe;
1420}
1421
1422static int event_handle(struct be_adapter *adapter,
3c8def97
SP
1423 struct be_eq_obj *eq_obj,
1424 bool rearm)
859b1e4e
SP
1425{
1426 struct be_eq_entry *eqe;
1427 u16 num = 0;
1428
1429 while ((eqe = event_get(eq_obj)) != NULL) {
1430 eqe->evt = 0;
1431 num++;
1432 }
1433
1434 /* Deal with any spurious interrupts that come
1435 * without events
1436 */
3c8def97
SP
1437 if (!num)
1438 rearm = true;
1439
1440 be_eq_notify(adapter, eq_obj->q.id, rearm, true, num);
859b1e4e
SP
1441 if (num)
1442 napi_schedule(&eq_obj->napi);
1443
1444 return num;
1445}
1446
1447/* Just read and notify events without processing them.
1448 * Used at the time of destroying event queues */
1449static void be_eq_clean(struct be_adapter *adapter,
1450 struct be_eq_obj *eq_obj)
1451{
1452 struct be_eq_entry *eqe;
1453 u16 num = 0;
1454
1455 while ((eqe = event_get(eq_obj)) != NULL) {
1456 eqe->evt = 0;
1457 num++;
1458 }
1459
1460 if (num)
1461 be_eq_notify(adapter, eq_obj->q.id, false, true, num);
1462}
1463
3abcdeda 1464static void be_rx_q_clean(struct be_adapter *adapter, struct be_rx_obj *rxo)
6b7c5b94
SP
1465{
1466 struct be_rx_page_info *page_info;
3abcdeda
SP
1467 struct be_queue_info *rxq = &rxo->q;
1468 struct be_queue_info *rx_cq = &rxo->cq;
2e588f84 1469 struct be_rx_compl_info *rxcp;
6b7c5b94
SP
1470 u16 tail;
1471
1472 /* First cleanup pending rx completions */
3abcdeda
SP
1473 while ((rxcp = be_rx_compl_get(rxo)) != NULL) {
1474 be_rx_compl_discard(adapter, rxo, rxcp);
64642811 1475 be_cq_notify(adapter, rx_cq->id, false, 1);
6b7c5b94
SP
1476 }
1477
1478 /* Then free posted rx buffer that were not used */
1479 tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
cdab23b7 1480 for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
3abcdeda 1481 page_info = get_rx_page_info(adapter, rxo, tail);
6b7c5b94
SP
1482 put_page(page_info->page);
1483 memset(page_info, 0, sizeof(*page_info));
1484 }
1485 BUG_ON(atomic_read(&rxq->used));
482c9e79 1486 rxq->tail = rxq->head = 0;
6b7c5b94
SP
1487}
1488
3c8def97
SP
1489static void be_tx_compl_clean(struct be_adapter *adapter,
1490 struct be_tx_obj *txo)
6b7c5b94 1491{
3c8def97
SP
1492 struct be_queue_info *tx_cq = &txo->cq;
1493 struct be_queue_info *txq = &txo->q;
a8e9179a 1494 struct be_eth_tx_compl *txcp;
4d586b82 1495 u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
3c8def97 1496 struct sk_buff **sent_skbs = txo->sent_skb_list;
b03388d6
SP
1497 struct sk_buff *sent_skb;
1498 bool dummy_wrb;
a8e9179a
SP
1499
1500 /* Wait for a max of 200ms for all the tx-completions to arrive. */
1501 do {
1502 while ((txcp = be_tx_compl_get(tx_cq))) {
1503 end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl,
1504 wrb_index, txcp);
3c8def97 1505 num_wrbs += be_tx_compl_process(adapter, txo, end_idx);
a8e9179a
SP
1506 cmpl++;
1507 }
1508 if (cmpl) {
1509 be_cq_notify(adapter, tx_cq->id, false, cmpl);
4d586b82 1510 atomic_sub(num_wrbs, &txq->used);
a8e9179a 1511 cmpl = 0;
4d586b82 1512 num_wrbs = 0;
a8e9179a
SP
1513 }
1514
1515 if (atomic_read(&txq->used) == 0 || ++timeo > 200)
1516 break;
1517
1518 mdelay(1);
1519 } while (true);
1520
1521 if (atomic_read(&txq->used))
1522 dev_err(&adapter->pdev->dev, "%d pending tx-completions\n",
1523 atomic_read(&txq->used));
b03388d6
SP
1524
1525 /* free posted tx for which compls will never arrive */
1526 while (atomic_read(&txq->used)) {
1527 sent_skb = sent_skbs[txq->tail];
1528 end_idx = txq->tail;
1529 index_adv(&end_idx,
fe6d2a38
SP
1530 wrb_cnt_for_skb(adapter, sent_skb, &dummy_wrb) - 1,
1531 txq->len);
3c8def97 1532 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
4d586b82 1533 atomic_sub(num_wrbs, &txq->used);
b03388d6 1534 }
6b7c5b94
SP
1535}
1536
5fb379ee
SP
1537static void be_mcc_queues_destroy(struct be_adapter *adapter)
1538{
1539 struct be_queue_info *q;
5fb379ee 1540
8788fdc2 1541 q = &adapter->mcc_obj.q;
5fb379ee 1542 if (q->created)
8788fdc2 1543 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
5fb379ee
SP
1544 be_queue_free(adapter, q);
1545
8788fdc2 1546 q = &adapter->mcc_obj.cq;
5fb379ee 1547 if (q->created)
8788fdc2 1548 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
5fb379ee
SP
1549 be_queue_free(adapter, q);
1550}
1551
1552/* Must be called only after TX qs are created as MCC shares TX EQ */
1553static int be_mcc_queues_create(struct be_adapter *adapter)
1554{
1555 struct be_queue_info *q, *cq;
5fb379ee
SP
1556
1557 /* Alloc MCC compl queue */
8788fdc2 1558 cq = &adapter->mcc_obj.cq;
5fb379ee 1559 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
efd2e40a 1560 sizeof(struct be_mcc_compl)))
5fb379ee
SP
1561 goto err;
1562
1563 /* Ask BE to create MCC compl queue; share TX's eq */
8788fdc2 1564 if (be_cmd_cq_create(adapter, cq, &adapter->tx_eq.q, false, true, 0))
5fb379ee
SP
1565 goto mcc_cq_free;
1566
1567 /* Alloc MCC queue */
8788fdc2 1568 q = &adapter->mcc_obj.q;
5fb379ee
SP
1569 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
1570 goto mcc_cq_destroy;
1571
1572 /* Ask BE to create MCC queue */
8788fdc2 1573 if (be_cmd_mccq_create(adapter, q, cq))
5fb379ee
SP
1574 goto mcc_q_free;
1575
1576 return 0;
1577
1578mcc_q_free:
1579 be_queue_free(adapter, q);
1580mcc_cq_destroy:
8788fdc2 1581 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
5fb379ee
SP
1582mcc_cq_free:
1583 be_queue_free(adapter, cq);
1584err:
1585 return -1;
1586}
1587
6b7c5b94
SP
1588static void be_tx_queues_destroy(struct be_adapter *adapter)
1589{
1590 struct be_queue_info *q;
3c8def97
SP
1591 struct be_tx_obj *txo;
1592 u8 i;
6b7c5b94 1593
3c8def97
SP
1594 for_all_tx_queues(adapter, txo, i) {
1595 q = &txo->q;
1596 if (q->created)
1597 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
1598 be_queue_free(adapter, q);
6b7c5b94 1599
3c8def97
SP
1600 q = &txo->cq;
1601 if (q->created)
1602 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1603 be_queue_free(adapter, q);
1604 }
6b7c5b94 1605
859b1e4e
SP
1606 /* Clear any residual events */
1607 be_eq_clean(adapter, &adapter->tx_eq);
1608
6b7c5b94
SP
1609 q = &adapter->tx_eq.q;
1610 if (q->created)
8788fdc2 1611 be_cmd_q_destroy(adapter, q, QTYPE_EQ);
6b7c5b94
SP
1612 be_queue_free(adapter, q);
1613}
1614
3c8def97 1615/* One TX event queue is shared by all TX compl qs */
6b7c5b94
SP
1616static int be_tx_queues_create(struct be_adapter *adapter)
1617{
1618 struct be_queue_info *eq, *q, *cq;
3c8def97
SP
1619 struct be_tx_obj *txo;
1620 u8 i;
6b7c5b94
SP
1621
1622 adapter->tx_eq.max_eqd = 0;
1623 adapter->tx_eq.min_eqd = 0;
1624 adapter->tx_eq.cur_eqd = 96;
1625 adapter->tx_eq.enable_aic = false;
3c8def97 1626
6b7c5b94 1627 eq = &adapter->tx_eq.q;
3c8def97
SP
1628 if (be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1629 sizeof(struct be_eq_entry)))
6b7c5b94
SP
1630 return -1;
1631
8788fdc2 1632 if (be_cmd_eq_create(adapter, eq, adapter->tx_eq.cur_eqd))
3c8def97 1633 goto err;
ecd62107 1634 adapter->tx_eq.eq_idx = adapter->eq_next_idx++;
fe6d2a38 1635
3c8def97
SP
1636 for_all_tx_queues(adapter, txo, i) {
1637 cq = &txo->cq;
1638 if (be_queue_alloc(adapter, cq, TX_CQ_LEN,
6b7c5b94 1639 sizeof(struct be_eth_tx_compl)))
3c8def97 1640 goto err;
6b7c5b94 1641
3c8def97
SP
1642 if (be_cmd_cq_create(adapter, cq, eq, false, false, 3))
1643 goto err;
6b7c5b94 1644
3c8def97
SP
1645 q = &txo->q;
1646 if (be_queue_alloc(adapter, q, TX_Q_LEN,
1647 sizeof(struct be_eth_wrb)))
1648 goto err;
6b7c5b94 1649
3c8def97
SP
1650 if (be_cmd_txq_create(adapter, q, cq))
1651 goto err;
1652 }
6b7c5b94
SP
1653 return 0;
1654
3c8def97
SP
1655err:
1656 be_tx_queues_destroy(adapter);
6b7c5b94
SP
1657 return -1;
1658}
1659
1660static void be_rx_queues_destroy(struct be_adapter *adapter)
1661{
1662 struct be_queue_info *q;
3abcdeda
SP
1663 struct be_rx_obj *rxo;
1664 int i;
1665
1666 for_all_rx_queues(adapter, rxo, i) {
482c9e79 1667 be_queue_free(adapter, &rxo->q);
3abcdeda
SP
1668
1669 q = &rxo->cq;
1670 if (q->created)
1671 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1672 be_queue_free(adapter, q);
1673
3abcdeda 1674 q = &rxo->rx_eq.q;
482c9e79 1675 if (q->created)
3abcdeda 1676 be_cmd_q_destroy(adapter, q, QTYPE_EQ);
3abcdeda 1677 be_queue_free(adapter, q);
6b7c5b94 1678 }
6b7c5b94
SP
1679}
1680
ac6a0c4a
SP
1681static u32 be_num_rxqs_want(struct be_adapter *adapter)
1682{
c814fd36 1683 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
ac6a0c4a
SP
1684 !adapter->sriov_enabled && !(adapter->function_mode & 0x400)) {
1685 return 1 + MAX_RSS_QS; /* one default non-RSS queue */
1686 } else {
1687 dev_warn(&adapter->pdev->dev,
1688 "No support for multiple RX queues\n");
1689 return 1;
1690 }
1691}
1692
6b7c5b94
SP
1693static int be_rx_queues_create(struct be_adapter *adapter)
1694{
1695 struct be_queue_info *eq, *q, *cq;
3abcdeda
SP
1696 struct be_rx_obj *rxo;
1697 int rc, i;
6b7c5b94 1698
ac6a0c4a
SP
1699 adapter->num_rx_qs = min(be_num_rxqs_want(adapter),
1700 msix_enabled(adapter) ?
1701 adapter->num_msix_vec - 1 : 1);
1702 if (adapter->num_rx_qs != MAX_RX_QS)
1703 dev_warn(&adapter->pdev->dev,
1704 "Can create only %d RX queues", adapter->num_rx_qs);
1705
6b7c5b94 1706 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
3abcdeda
SP
1707 for_all_rx_queues(adapter, rxo, i) {
1708 rxo->adapter = adapter;
1709 rxo->rx_eq.max_eqd = BE_MAX_EQD;
1710 rxo->rx_eq.enable_aic = true;
1711
1712 /* EQ */
1713 eq = &rxo->rx_eq.q;
1714 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1715 sizeof(struct be_eq_entry));
1716 if (rc)
1717 goto err;
1718
1719 rc = be_cmd_eq_create(adapter, eq, rxo->rx_eq.cur_eqd);
1720 if (rc)
1721 goto err;
1722
ecd62107 1723 rxo->rx_eq.eq_idx = adapter->eq_next_idx++;
fe6d2a38 1724
3abcdeda
SP
1725 /* CQ */
1726 cq = &rxo->cq;
1727 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
1728 sizeof(struct be_eth_rx_compl));
1729 if (rc)
1730 goto err;
1731
1732 rc = be_cmd_cq_create(adapter, cq, eq, false, false, 3);
1733 if (rc)
1734 goto err;
482c9e79
SP
1735
1736 /* Rx Q - will be created in be_open() */
3abcdeda
SP
1737 q = &rxo->q;
1738 rc = be_queue_alloc(adapter, q, RX_Q_LEN,
1739 sizeof(struct be_eth_rx_d));
1740 if (rc)
1741 goto err;
1742
3abcdeda 1743 }
6b7c5b94
SP
1744
1745 return 0;
3abcdeda
SP
1746err:
1747 be_rx_queues_destroy(adapter);
1748 return -1;
6b7c5b94 1749}
6b7c5b94 1750
fe6d2a38 1751static bool event_peek(struct be_eq_obj *eq_obj)
b628bde2 1752{
fe6d2a38
SP
1753 struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
1754 if (!eqe->evt)
1755 return false;
1756 else
1757 return true;
b628bde2
SP
1758}
1759
6b7c5b94
SP
1760static irqreturn_t be_intx(int irq, void *dev)
1761{
1762 struct be_adapter *adapter = dev;
3abcdeda 1763 struct be_rx_obj *rxo;
fe6d2a38 1764 int isr, i, tx = 0 , rx = 0;
6b7c5b94 1765
fe6d2a38
SP
1766 if (lancer_chip(adapter)) {
1767 if (event_peek(&adapter->tx_eq))
3c8def97 1768 tx = event_handle(adapter, &adapter->tx_eq, false);
fe6d2a38
SP
1769 for_all_rx_queues(adapter, rxo, i) {
1770 if (event_peek(&rxo->rx_eq))
3c8def97 1771 rx |= event_handle(adapter, &rxo->rx_eq, true);
fe6d2a38 1772 }
6b7c5b94 1773
fe6d2a38
SP
1774 if (!(tx || rx))
1775 return IRQ_NONE;
3abcdeda 1776
fe6d2a38
SP
1777 } else {
1778 isr = ioread32(adapter->csr + CEV_ISR0_OFFSET +
1779 (adapter->tx_eq.q.id / 8) * CEV_ISR_SIZE);
1780 if (!isr)
1781 return IRQ_NONE;
1782
ecd62107 1783 if ((1 << adapter->tx_eq.eq_idx & isr))
3c8def97 1784 event_handle(adapter, &adapter->tx_eq, false);
fe6d2a38
SP
1785
1786 for_all_rx_queues(adapter, rxo, i) {
ecd62107 1787 if ((1 << rxo->rx_eq.eq_idx & isr))
3c8def97 1788 event_handle(adapter, &rxo->rx_eq, true);
fe6d2a38 1789 }
3abcdeda 1790 }
c001c213 1791
8788fdc2 1792 return IRQ_HANDLED;
6b7c5b94
SP
1793}
1794
1795static irqreturn_t be_msix_rx(int irq, void *dev)
1796{
3abcdeda
SP
1797 struct be_rx_obj *rxo = dev;
1798 struct be_adapter *adapter = rxo->adapter;
6b7c5b94 1799
3c8def97 1800 event_handle(adapter, &rxo->rx_eq, true);
6b7c5b94
SP
1801
1802 return IRQ_HANDLED;
1803}
1804
5fb379ee 1805static irqreturn_t be_msix_tx_mcc(int irq, void *dev)
6b7c5b94
SP
1806{
1807 struct be_adapter *adapter = dev;
1808
3c8def97 1809 event_handle(adapter, &adapter->tx_eq, false);
6b7c5b94
SP
1810
1811 return IRQ_HANDLED;
1812}
1813
2e588f84 1814static inline bool do_gro(struct be_rx_compl_info *rxcp)
6b7c5b94 1815{
2e588f84 1816 return (rxcp->tcpf && !rxcp->err) ? true : false;
6b7c5b94
SP
1817}
1818
49b05221 1819static int be_poll_rx(struct napi_struct *napi, int budget)
6b7c5b94
SP
1820{
1821 struct be_eq_obj *rx_eq = container_of(napi, struct be_eq_obj, napi);
3abcdeda
SP
1822 struct be_rx_obj *rxo = container_of(rx_eq, struct be_rx_obj, rx_eq);
1823 struct be_adapter *adapter = rxo->adapter;
1824 struct be_queue_info *rx_cq = &rxo->cq;
2e588f84 1825 struct be_rx_compl_info *rxcp;
6b7c5b94
SP
1826 u32 work_done;
1827
ac124ff9 1828 rx_stats(rxo)->rx_polls++;
6b7c5b94 1829 for (work_done = 0; work_done < budget; work_done++) {
3abcdeda 1830 rxcp = be_rx_compl_get(rxo);
6b7c5b94
SP
1831 if (!rxcp)
1832 break;
1833
e80d9da6 1834 /* Ignore flush completions */
009dd872 1835 if (rxcp->num_rcvd && rxcp->pkt_size) {
2e588f84 1836 if (do_gro(rxcp))
64642811
SP
1837 be_rx_compl_process_gro(adapter, rxo, rxcp);
1838 else
1839 be_rx_compl_process(adapter, rxo, rxcp);
009dd872
PR
1840 } else if (rxcp->pkt_size == 0) {
1841 be_rx_compl_discard(adapter, rxo, rxcp);
64642811 1842 }
009dd872 1843
2e588f84 1844 be_rx_stats_update(rxo, rxcp);
6b7c5b94
SP
1845 }
1846
6b7c5b94 1847 /* Refill the queue */
3abcdeda 1848 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
1829b086 1849 be_post_rx_frags(rxo, GFP_ATOMIC);
6b7c5b94
SP
1850
1851 /* All consumed */
1852 if (work_done < budget) {
1853 napi_complete(napi);
8788fdc2 1854 be_cq_notify(adapter, rx_cq->id, true, work_done);
6b7c5b94
SP
1855 } else {
1856 /* More to be consumed; continue with interrupts disabled */
8788fdc2 1857 be_cq_notify(adapter, rx_cq->id, false, work_done);
6b7c5b94
SP
1858 }
1859 return work_done;
1860}
1861
f31e50a8
SP
1862/* As TX and MCC share the same EQ check for both TX and MCC completions.
1863 * For TX/MCC we don't honour budget; consume everything
1864 */
1865static int be_poll_tx_mcc(struct napi_struct *napi, int budget)
6b7c5b94 1866{
f31e50a8
SP
1867 struct be_eq_obj *tx_eq = container_of(napi, struct be_eq_obj, napi);
1868 struct be_adapter *adapter =
1869 container_of(tx_eq, struct be_adapter, tx_eq);
3c8def97 1870 struct be_tx_obj *txo;
6b7c5b94 1871 struct be_eth_tx_compl *txcp;
3c8def97
SP
1872 int tx_compl, mcc_compl, status = 0;
1873 u8 i;
1874 u16 num_wrbs;
1875
1876 for_all_tx_queues(adapter, txo, i) {
1877 tx_compl = 0;
1878 num_wrbs = 0;
1879 while ((txcp = be_tx_compl_get(&txo->cq))) {
1880 num_wrbs += be_tx_compl_process(adapter, txo,
1881 AMAP_GET_BITS(struct amap_eth_tx_compl,
1882 wrb_index, txcp));
1883 tx_compl++;
1884 }
1885 if (tx_compl) {
1886 be_cq_notify(adapter, txo->cq.id, true, tx_compl);
1887
1888 atomic_sub(num_wrbs, &txo->q.used);
6b7c5b94 1889
3c8def97
SP
1890 /* As Tx wrbs have been freed up, wake up netdev queue
1891 * if it was stopped due to lack of tx wrbs. */
1892 if (__netif_subqueue_stopped(adapter->netdev, i) &&
1893 atomic_read(&txo->q.used) < txo->q.len / 2) {
1894 netif_wake_subqueue(adapter->netdev, i);
1895 }
1896
ab1594e9 1897 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
ac124ff9 1898 tx_stats(txo)->tx_compl += tx_compl;
ab1594e9 1899 u64_stats_update_end(&tx_stats(txo)->sync_compl);
3c8def97 1900 }
6b7c5b94
SP
1901 }
1902
f31e50a8
SP
1903 mcc_compl = be_process_mcc(adapter, &status);
1904
f31e50a8
SP
1905 if (mcc_compl) {
1906 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
1907 be_cq_notify(adapter, mcc_obj->cq.id, true, mcc_compl);
1908 }
1909
3c8def97 1910 napi_complete(napi);
6b7c5b94 1911
3c8def97 1912 be_eq_notify(adapter, tx_eq->q.id, true, false, 0);
ab1594e9 1913 adapter->drv_stats.tx_events++;
6b7c5b94
SP
1914 return 1;
1915}
1916
d053de91 1917void be_detect_dump_ue(struct be_adapter *adapter)
7c185276
AK
1918{
1919 u32 ue_status_lo, ue_status_hi, ue_status_lo_mask, ue_status_hi_mask;
1920 u32 i;
1921
1922 pci_read_config_dword(adapter->pdev,
1923 PCICFG_UE_STATUS_LOW, &ue_status_lo);
1924 pci_read_config_dword(adapter->pdev,
1925 PCICFG_UE_STATUS_HIGH, &ue_status_hi);
1926 pci_read_config_dword(adapter->pdev,
1927 PCICFG_UE_STATUS_LOW_MASK, &ue_status_lo_mask);
1928 pci_read_config_dword(adapter->pdev,
1929 PCICFG_UE_STATUS_HI_MASK, &ue_status_hi_mask);
1930
1931 ue_status_lo = (ue_status_lo & (~ue_status_lo_mask));
1932 ue_status_hi = (ue_status_hi & (~ue_status_hi_mask));
1933
d053de91
AK
1934 if (ue_status_lo || ue_status_hi) {
1935 adapter->ue_detected = true;
7acc2087 1936 adapter->eeh_err = true;
d053de91
AK
1937 dev_err(&adapter->pdev->dev, "UE Detected!!\n");
1938 }
1939
7c185276
AK
1940 if (ue_status_lo) {
1941 for (i = 0; ue_status_lo; ue_status_lo >>= 1, i++) {
1942 if (ue_status_lo & 1)
1943 dev_err(&adapter->pdev->dev,
1944 "UE: %s bit set\n", ue_status_low_desc[i]);
1945 }
1946 }
1947 if (ue_status_hi) {
1948 for (i = 0; ue_status_hi; ue_status_hi >>= 1, i++) {
1949 if (ue_status_hi & 1)
1950 dev_err(&adapter->pdev->dev,
1951 "UE: %s bit set\n", ue_status_hi_desc[i]);
1952 }
1953 }
1954
1955}
1956
ea1dae11
SP
1957static void be_worker(struct work_struct *work)
1958{
1959 struct be_adapter *adapter =
1960 container_of(work, struct be_adapter, work.work);
3abcdeda
SP
1961 struct be_rx_obj *rxo;
1962 int i;
ea1dae11 1963
16da8250
SP
1964 if (!adapter->ue_detected && !lancer_chip(adapter))
1965 be_detect_dump_ue(adapter);
1966
f203af70
SK
1967 /* when interrupts are not yet enabled, just reap any pending
1968 * mcc completions */
1969 if (!netif_running(adapter->netdev)) {
1970 int mcc_compl, status = 0;
1971
1972 mcc_compl = be_process_mcc(adapter, &status);
1973
1974 if (mcc_compl) {
1975 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
1976 be_cq_notify(adapter, mcc_obj->cq.id, false, mcc_compl);
1977 }
9b037f38 1978
f203af70
SK
1979 goto reschedule;
1980 }
1981
005d5696
SX
1982 if (!adapter->stats_cmd_sent) {
1983 if (lancer_chip(adapter))
1984 lancer_cmd_get_pport_stats(adapter,
1985 &adapter->stats_cmd);
1986 else
1987 be_cmd_get_stats(adapter, &adapter->stats_cmd);
1988 }
3c8def97 1989
3abcdeda 1990 for_all_rx_queues(adapter, rxo, i) {
3abcdeda
SP
1991 be_rx_eqd_update(adapter, rxo);
1992
1993 if (rxo->rx_post_starved) {
1994 rxo->rx_post_starved = false;
1829b086 1995 be_post_rx_frags(rxo, GFP_KERNEL);
3abcdeda 1996 }
ea1dae11
SP
1997 }
1998
f203af70 1999reschedule:
e74fbd03 2000 adapter->work_counter++;
ea1dae11
SP
2001 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
2002}
2003
8d56ff11
SP
2004static void be_msix_disable(struct be_adapter *adapter)
2005{
ac6a0c4a 2006 if (msix_enabled(adapter)) {
8d56ff11 2007 pci_disable_msix(adapter->pdev);
ac6a0c4a 2008 adapter->num_msix_vec = 0;
3abcdeda
SP
2009 }
2010}
2011
6b7c5b94
SP
2012static void be_msix_enable(struct be_adapter *adapter)
2013{
3abcdeda 2014#define BE_MIN_MSIX_VECTORS (1 + 1) /* Rx + Tx */
ac6a0c4a 2015 int i, status, num_vec;
6b7c5b94 2016
ac6a0c4a 2017 num_vec = be_num_rxqs_want(adapter) + 1;
3abcdeda 2018
ac6a0c4a 2019 for (i = 0; i < num_vec; i++)
6b7c5b94
SP
2020 adapter->msix_entries[i].entry = i;
2021
ac6a0c4a 2022 status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
3abcdeda
SP
2023 if (status == 0) {
2024 goto done;
2025 } else if (status >= BE_MIN_MSIX_VECTORS) {
ac6a0c4a 2026 num_vec = status;
3abcdeda 2027 if (pci_enable_msix(adapter->pdev, adapter->msix_entries,
ac6a0c4a 2028 num_vec) == 0)
3abcdeda 2029 goto done;
3abcdeda
SP
2030 }
2031 return;
2032done:
ac6a0c4a
SP
2033 adapter->num_msix_vec = num_vec;
2034 return;
6b7c5b94
SP
2035}
2036
ba343c77
SB
2037static void be_sriov_enable(struct be_adapter *adapter)
2038{
344dbf10 2039 be_check_sriov_fn_type(adapter);
6dedec81 2040#ifdef CONFIG_PCI_IOV
ba343c77 2041 if (be_physfn(adapter) && num_vfs) {
81be8f0a
AK
2042 int status, pos;
2043 u16 nvfs;
2044
2045 pos = pci_find_ext_capability(adapter->pdev,
2046 PCI_EXT_CAP_ID_SRIOV);
2047 pci_read_config_word(adapter->pdev,
2048 pos + PCI_SRIOV_TOTAL_VF, &nvfs);
2049
2050 if (num_vfs > nvfs) {
2051 dev_info(&adapter->pdev->dev,
2052 "Device supports %d VFs and not %d\n",
2053 nvfs, num_vfs);
2054 num_vfs = nvfs;
2055 }
6dedec81 2056
ba343c77
SB
2057 status = pci_enable_sriov(adapter->pdev, num_vfs);
2058 adapter->sriov_enabled = status ? false : true;
2059 }
2060#endif
ba343c77
SB
2061}
2062
2063static void be_sriov_disable(struct be_adapter *adapter)
2064{
2065#ifdef CONFIG_PCI_IOV
2066 if (adapter->sriov_enabled) {
2067 pci_disable_sriov(adapter->pdev);
2068 adapter->sriov_enabled = false;
2069 }
2070#endif
2071}
2072
fe6d2a38
SP
2073static inline int be_msix_vec_get(struct be_adapter *adapter,
2074 struct be_eq_obj *eq_obj)
6b7c5b94 2075{
ecd62107 2076 return adapter->msix_entries[eq_obj->eq_idx].vector;
6b7c5b94
SP
2077}
2078
b628bde2
SP
2079static int be_request_irq(struct be_adapter *adapter,
2080 struct be_eq_obj *eq_obj,
3abcdeda 2081 void *handler, char *desc, void *context)
6b7c5b94
SP
2082{
2083 struct net_device *netdev = adapter->netdev;
b628bde2
SP
2084 int vec;
2085
2086 sprintf(eq_obj->desc, "%s-%s", netdev->name, desc);
fe6d2a38 2087 vec = be_msix_vec_get(adapter, eq_obj);
3abcdeda 2088 return request_irq(vec, handler, 0, eq_obj->desc, context);
b628bde2
SP
2089}
2090
3abcdeda
SP
2091static void be_free_irq(struct be_adapter *adapter, struct be_eq_obj *eq_obj,
2092 void *context)
b628bde2 2093{
fe6d2a38 2094 int vec = be_msix_vec_get(adapter, eq_obj);
3abcdeda 2095 free_irq(vec, context);
b628bde2 2096}
6b7c5b94 2097
b628bde2
SP
2098static int be_msix_register(struct be_adapter *adapter)
2099{
3abcdeda
SP
2100 struct be_rx_obj *rxo;
2101 int status, i;
2102 char qname[10];
b628bde2 2103
3abcdeda
SP
2104 status = be_request_irq(adapter, &adapter->tx_eq, be_msix_tx_mcc, "tx",
2105 adapter);
6b7c5b94
SP
2106 if (status)
2107 goto err;
2108
3abcdeda
SP
2109 for_all_rx_queues(adapter, rxo, i) {
2110 sprintf(qname, "rxq%d", i);
2111 status = be_request_irq(adapter, &rxo->rx_eq, be_msix_rx,
2112 qname, rxo);
2113 if (status)
2114 goto err_msix;
2115 }
b628bde2 2116
6b7c5b94 2117 return 0;
b628bde2 2118
3abcdeda
SP
2119err_msix:
2120 be_free_irq(adapter, &adapter->tx_eq, adapter);
2121
2122 for (i--, rxo = &adapter->rx_obj[i]; i >= 0; i--, rxo--)
2123 be_free_irq(adapter, &rxo->rx_eq, rxo);
2124
6b7c5b94
SP
2125err:
2126 dev_warn(&adapter->pdev->dev,
2127 "MSIX Request IRQ failed - err %d\n", status);
ac6a0c4a 2128 be_msix_disable(adapter);
6b7c5b94
SP
2129 return status;
2130}
2131
2132static int be_irq_register(struct be_adapter *adapter)
2133{
2134 struct net_device *netdev = adapter->netdev;
2135 int status;
2136
ac6a0c4a 2137 if (msix_enabled(adapter)) {
6b7c5b94
SP
2138 status = be_msix_register(adapter);
2139 if (status == 0)
2140 goto done;
ba343c77
SB
2141 /* INTx is not supported for VF */
2142 if (!be_physfn(adapter))
2143 return status;
6b7c5b94
SP
2144 }
2145
2146 /* INTx */
2147 netdev->irq = adapter->pdev->irq;
2148 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
2149 adapter);
2150 if (status) {
2151 dev_err(&adapter->pdev->dev,
2152 "INTx request IRQ failed - err %d\n", status);
2153 return status;
2154 }
2155done:
2156 adapter->isr_registered = true;
2157 return 0;
2158}
2159
2160static void be_irq_unregister(struct be_adapter *adapter)
2161{
2162 struct net_device *netdev = adapter->netdev;
3abcdeda
SP
2163 struct be_rx_obj *rxo;
2164 int i;
6b7c5b94
SP
2165
2166 if (!adapter->isr_registered)
2167 return;
2168
2169 /* INTx */
ac6a0c4a 2170 if (!msix_enabled(adapter)) {
6b7c5b94
SP
2171 free_irq(netdev->irq, adapter);
2172 goto done;
2173 }
2174
2175 /* MSIx */
3abcdeda
SP
2176 be_free_irq(adapter, &adapter->tx_eq, adapter);
2177
2178 for_all_rx_queues(adapter, rxo, i)
2179 be_free_irq(adapter, &rxo->rx_eq, rxo);
2180
6b7c5b94
SP
2181done:
2182 adapter->isr_registered = false;
6b7c5b94
SP
2183}
2184
482c9e79
SP
2185static void be_rx_queues_clear(struct be_adapter *adapter)
2186{
2187 struct be_queue_info *q;
2188 struct be_rx_obj *rxo;
2189 int i;
2190
2191 for_all_rx_queues(adapter, rxo, i) {
2192 q = &rxo->q;
2193 if (q->created) {
2194 be_cmd_rxq_destroy(adapter, q);
2195 /* After the rxq is invalidated, wait for a grace time
2196 * of 1ms for all dma to end and the flush compl to
2197 * arrive
2198 */
2199 mdelay(1);
2200 be_rx_q_clean(adapter, rxo);
2201 }
2202
2203 /* Clear any residual events */
2204 q = &rxo->rx_eq.q;
2205 if (q->created)
2206 be_eq_clean(adapter, &rxo->rx_eq);
2207 }
2208}
2209
889cd4b2
SP
2210static int be_close(struct net_device *netdev)
2211{
2212 struct be_adapter *adapter = netdev_priv(netdev);
3abcdeda 2213 struct be_rx_obj *rxo;
3c8def97 2214 struct be_tx_obj *txo;
889cd4b2 2215 struct be_eq_obj *tx_eq = &adapter->tx_eq;
3abcdeda 2216 int vec, i;
889cd4b2 2217
889cd4b2
SP
2218 be_async_mcc_disable(adapter);
2219
889cd4b2
SP
2220 adapter->link_up = false;
2221
fe6d2a38
SP
2222 if (!lancer_chip(adapter))
2223 be_intr_set(adapter, false);
889cd4b2 2224
63fcb27f
PR
2225 for_all_rx_queues(adapter, rxo, i)
2226 napi_disable(&rxo->rx_eq.napi);
2227
2228 napi_disable(&tx_eq->napi);
2229
2230 if (lancer_chip(adapter)) {
63fcb27f
PR
2231 be_cq_notify(adapter, adapter->mcc_obj.cq.id, false, 0);
2232 for_all_rx_queues(adapter, rxo, i)
2233 be_cq_notify(adapter, rxo->cq.id, false, 0);
3c8def97
SP
2234 for_all_tx_queues(adapter, txo, i)
2235 be_cq_notify(adapter, txo->cq.id, false, 0);
63fcb27f
PR
2236 }
2237
ac6a0c4a 2238 if (msix_enabled(adapter)) {
fe6d2a38 2239 vec = be_msix_vec_get(adapter, tx_eq);
889cd4b2 2240 synchronize_irq(vec);
3abcdeda
SP
2241
2242 for_all_rx_queues(adapter, rxo, i) {
fe6d2a38 2243 vec = be_msix_vec_get(adapter, &rxo->rx_eq);
3abcdeda
SP
2244 synchronize_irq(vec);
2245 }
889cd4b2
SP
2246 } else {
2247 synchronize_irq(netdev->irq);
2248 }
2249 be_irq_unregister(adapter);
2250
889cd4b2
SP
2251 /* Wait for all pending tx completions to arrive so that
2252 * all tx skbs are freed.
2253 */
3c8def97
SP
2254 for_all_tx_queues(adapter, txo, i)
2255 be_tx_compl_clean(adapter, txo);
889cd4b2 2256
482c9e79
SP
2257 be_rx_queues_clear(adapter);
2258 return 0;
2259}
2260
2261static int be_rx_queues_setup(struct be_adapter *adapter)
2262{
2263 struct be_rx_obj *rxo;
2264 int rc, i;
2265 u8 rsstable[MAX_RSS_QS];
2266
2267 for_all_rx_queues(adapter, rxo, i) {
2268 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
2269 rx_frag_size, BE_MAX_JUMBO_FRAME_SIZE,
2270 adapter->if_handle,
2271 (i > 0) ? 1 : 0/* rss enable */, &rxo->rss_id);
2272 if (rc)
2273 return rc;
2274 }
2275
2276 if (be_multi_rxq(adapter)) {
2277 for_all_rss_queues(adapter, rxo, i)
2278 rsstable[i] = rxo->rss_id;
2279
2280 rc = be_cmd_rss_config(adapter, rsstable,
2281 adapter->num_rx_qs - 1);
2282 if (rc)
2283 return rc;
2284 }
2285
2286 /* First time posting */
2287 for_all_rx_queues(adapter, rxo, i) {
2288 be_post_rx_frags(rxo, GFP_KERNEL);
2289 napi_enable(&rxo->rx_eq.napi);
2290 }
889cd4b2
SP
2291 return 0;
2292}
2293
6b7c5b94
SP
2294static int be_open(struct net_device *netdev)
2295{
2296 struct be_adapter *adapter = netdev_priv(netdev);
6b7c5b94 2297 struct be_eq_obj *tx_eq = &adapter->tx_eq;
3abcdeda 2298 struct be_rx_obj *rxo;
a8f447bd 2299 bool link_up;
3abcdeda 2300 int status, i;
0388f251
SB
2301 u8 mac_speed;
2302 u16 link_speed;
5fb379ee 2303
482c9e79
SP
2304 status = be_rx_queues_setup(adapter);
2305 if (status)
2306 goto err;
2307
5fb379ee
SP
2308 napi_enable(&tx_eq->napi);
2309
2310 be_irq_register(adapter);
2311
fe6d2a38
SP
2312 if (!lancer_chip(adapter))
2313 be_intr_set(adapter, true);
5fb379ee
SP
2314
2315 /* The evt queues are created in unarmed state; arm them */
3abcdeda
SP
2316 for_all_rx_queues(adapter, rxo, i) {
2317 be_eq_notify(adapter, rxo->rx_eq.q.id, true, false, 0);
2318 be_cq_notify(adapter, rxo->cq.id, true, 0);
2319 }
8788fdc2 2320 be_eq_notify(adapter, tx_eq->q.id, true, false, 0);
5fb379ee 2321
7a1e9b20
SP
2322 /* Now that interrupts are on we can process async mcc */
2323 be_async_mcc_enable(adapter);
2324
0388f251 2325 status = be_cmd_link_status_query(adapter, &link_up, &mac_speed,
187e8756 2326 &link_speed, 0);
a8f447bd 2327 if (status)
889cd4b2 2328 goto err;
a8f447bd 2329 be_link_status_update(adapter, link_up);
5fb379ee 2330
889cd4b2 2331 if (be_physfn(adapter)) {
1da87b7f 2332 status = be_vid_config(adapter, false, 0);
889cd4b2
SP
2333 if (status)
2334 goto err;
4f2aa89c 2335
ba343c77
SB
2336 status = be_cmd_set_flow_control(adapter,
2337 adapter->tx_fc, adapter->rx_fc);
2338 if (status)
889cd4b2 2339 goto err;
ba343c77 2340 }
4f2aa89c 2341
889cd4b2
SP
2342 return 0;
2343err:
2344 be_close(adapter->netdev);
2345 return -EIO;
5fb379ee
SP
2346}
2347
71d8d1b5
AK
2348static int be_setup_wol(struct be_adapter *adapter, bool enable)
2349{
2350 struct be_dma_mem cmd;
2351 int status = 0;
2352 u8 mac[ETH_ALEN];
2353
2354 memset(mac, 0, ETH_ALEN);
2355
2356 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
2b7bcebf
IV
2357 cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2358 GFP_KERNEL);
71d8d1b5
AK
2359 if (cmd.va == NULL)
2360 return -1;
2361 memset(cmd.va, 0, cmd.size);
2362
2363 if (enable) {
2364 status = pci_write_config_dword(adapter->pdev,
2365 PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2366 if (status) {
2367 dev_err(&adapter->pdev->dev,
2381a55c 2368 "Could not enable Wake-on-lan\n");
2b7bcebf
IV
2369 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2370 cmd.dma);
71d8d1b5
AK
2371 return status;
2372 }
2373 status = be_cmd_enable_magic_wol(adapter,
2374 adapter->netdev->dev_addr, &cmd);
2375 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2376 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2377 } else {
2378 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2379 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2380 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2381 }
2382
2b7bcebf 2383 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
71d8d1b5
AK
2384 return status;
2385}
2386
6d87f5c3
AK
2387/*
2388 * Generate a seed MAC address from the PF MAC Address using jhash.
2389 * MAC Address for VFs are assigned incrementally starting from the seed.
2390 * These addresses are programmed in the ASIC by the PF and the VF driver
2391 * queries for the MAC address during its probe.
2392 */
2393static inline int be_vf_eth_addr_config(struct be_adapter *adapter)
2394{
2395 u32 vf = 0;
3abcdeda 2396 int status = 0;
6d87f5c3
AK
2397 u8 mac[ETH_ALEN];
2398
2399 be_vf_eth_addr_generate(adapter, mac);
2400
2401 for (vf = 0; vf < num_vfs; vf++) {
2402 status = be_cmd_pmac_add(adapter, mac,
2403 adapter->vf_cfg[vf].vf_if_handle,
f8617e08
AK
2404 &adapter->vf_cfg[vf].vf_pmac_id,
2405 vf + 1);
6d87f5c3
AK
2406 if (status)
2407 dev_err(&adapter->pdev->dev,
2408 "Mac address add failed for VF %d\n", vf);
2409 else
2410 memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
2411
2412 mac[5] += 1;
2413 }
2414 return status;
2415}
2416
2417static inline void be_vf_eth_addr_rem(struct be_adapter *adapter)
2418{
2419 u32 vf;
2420
2421 for (vf = 0; vf < num_vfs; vf++) {
2422 if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID)
2423 be_cmd_pmac_del(adapter,
2424 adapter->vf_cfg[vf].vf_if_handle,
f8617e08 2425 adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
6d87f5c3
AK
2426 }
2427}
2428
5fb379ee
SP
2429static int be_setup(struct be_adapter *adapter)
2430{
5fb379ee 2431 struct net_device *netdev = adapter->netdev;
ba343c77 2432 u32 cap_flags, en_flags, vf = 0;
6b7c5b94 2433 int status;
ba343c77
SB
2434 u8 mac[ETH_ALEN];
2435
2dc1deb6
SP
2436 be_cmd_req_native_mode(adapter);
2437
f21b538c
PR
2438 cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED |
2439 BE_IF_FLAGS_BROADCAST |
2440 BE_IF_FLAGS_MULTICAST;
6b7c5b94 2441
ba343c77
SB
2442 if (be_physfn(adapter)) {
2443 cap_flags |= BE_IF_FLAGS_MCAST_PROMISCUOUS |
2444 BE_IF_FLAGS_PROMISCUOUS |
2445 BE_IF_FLAGS_PASS_L3L4_ERRORS;
2446 en_flags |= BE_IF_FLAGS_PASS_L3L4_ERRORS;
3abcdeda 2447
ac6a0c4a 2448 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS) {
3abcdeda
SP
2449 cap_flags |= BE_IF_FLAGS_RSS;
2450 en_flags |= BE_IF_FLAGS_RSS;
2451 }
ba343c77 2452 }
73d540f2
SP
2453
2454 status = be_cmd_if_create(adapter, cap_flags, en_flags,
2455 netdev->dev_addr, false/* pmac_invalid */,
ba343c77 2456 &adapter->if_handle, &adapter->pmac_id, 0);
6b7c5b94
SP
2457 if (status != 0)
2458 goto do_none;
2459
ba343c77 2460 if (be_physfn(adapter)) {
c99ac3e7
AK
2461 if (adapter->sriov_enabled) {
2462 while (vf < num_vfs) {
2463 cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED |
2464 BE_IF_FLAGS_BROADCAST;
2465 status = be_cmd_if_create(adapter, cap_flags,
2466 en_flags, mac, true,
64600ea5 2467 &adapter->vf_cfg[vf].vf_if_handle,
ba343c77 2468 NULL, vf+1);
c99ac3e7
AK
2469 if (status) {
2470 dev_err(&adapter->pdev->dev,
2471 "Interface Create failed for VF %d\n",
2472 vf);
2473 goto if_destroy;
2474 }
2475 adapter->vf_cfg[vf].vf_pmac_id =
2476 BE_INVALID_PMAC_ID;
2477 vf++;
ba343c77 2478 }
84e5b9f7 2479 }
c99ac3e7 2480 } else {
ba343c77
SB
2481 status = be_cmd_mac_addr_query(adapter, mac,
2482 MAC_ADDRESS_TYPE_NETWORK, false, adapter->if_handle);
2483 if (!status) {
2484 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2485 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2486 }
2487 }
2488
6b7c5b94
SP
2489 status = be_tx_queues_create(adapter);
2490 if (status != 0)
2491 goto if_destroy;
2492
2493 status = be_rx_queues_create(adapter);
2494 if (status != 0)
2495 goto tx_qs_destroy;
2496
2903dd65
SP
2497 /* Allow all priorities by default. A GRP5 evt may modify this */
2498 adapter->vlan_prio_bmap = 0xff;
2499
5fb379ee
SP
2500 status = be_mcc_queues_create(adapter);
2501 if (status != 0)
2502 goto rx_qs_destroy;
6b7c5b94 2503
0dffc83e
AK
2504 adapter->link_speed = -1;
2505
6b7c5b94
SP
2506 return 0;
2507
5fb379ee
SP
2508rx_qs_destroy:
2509 be_rx_queues_destroy(adapter);
6b7c5b94
SP
2510tx_qs_destroy:
2511 be_tx_queues_destroy(adapter);
2512if_destroy:
c99ac3e7
AK
2513 if (be_physfn(adapter) && adapter->sriov_enabled)
2514 for (vf = 0; vf < num_vfs; vf++)
2515 if (adapter->vf_cfg[vf].vf_if_handle)
2516 be_cmd_if_destroy(adapter,
658681f7
AK
2517 adapter->vf_cfg[vf].vf_if_handle,
2518 vf + 1);
2519 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
6b7c5b94
SP
2520do_none:
2521 return status;
2522}
2523
5fb379ee
SP
2524static int be_clear(struct be_adapter *adapter)
2525{
7ab8b0b4
AK
2526 int vf;
2527
c99ac3e7 2528 if (be_physfn(adapter) && adapter->sriov_enabled)
6d87f5c3
AK
2529 be_vf_eth_addr_rem(adapter);
2530
1a8887d8 2531 be_mcc_queues_destroy(adapter);
5fb379ee
SP
2532 be_rx_queues_destroy(adapter);
2533 be_tx_queues_destroy(adapter);
1f5db833 2534 adapter->eq_next_idx = 0;
5fb379ee 2535
7ab8b0b4
AK
2536 if (be_physfn(adapter) && adapter->sriov_enabled)
2537 for (vf = 0; vf < num_vfs; vf++)
2538 if (adapter->vf_cfg[vf].vf_if_handle)
2539 be_cmd_if_destroy(adapter,
2540 adapter->vf_cfg[vf].vf_if_handle,
2541 vf + 1);
2542
658681f7 2543 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
5fb379ee 2544
2dc1deb6
SP
2545 adapter->be3_native = 0;
2546
2243e2e9
SP
2547 /* tell fw we're done with firing cmds */
2548 be_cmd_fw_clean(adapter);
5fb379ee
SP
2549 return 0;
2550}
2551
6b7c5b94 2552
84517482 2553#define FW_FILE_HDR_SIGN "ServerEngines Corp. "
fa9a6fed 2554static bool be_flash_redboot(struct be_adapter *adapter,
3f0d4560
AK
2555 const u8 *p, u32 img_start, int image_size,
2556 int hdr_size)
fa9a6fed
SB
2557{
2558 u32 crc_offset;
2559 u8 flashed_crc[4];
2560 int status;
3f0d4560
AK
2561
2562 crc_offset = hdr_size + img_start + image_size - 4;
2563
fa9a6fed 2564 p += crc_offset;
3f0d4560
AK
2565
2566 status = be_cmd_get_flash_crc(adapter, flashed_crc,
f510fc64 2567 (image_size - 4));
fa9a6fed
SB
2568 if (status) {
2569 dev_err(&adapter->pdev->dev,
2570 "could not get crc from flash, not flashing redboot\n");
2571 return false;
2572 }
2573
2574 /*update redboot only if crc does not match*/
2575 if (!memcmp(flashed_crc, p, 4))
2576 return false;
2577 else
2578 return true;
fa9a6fed
SB
2579}
2580
3f0d4560 2581static int be_flash_data(struct be_adapter *adapter,
84517482 2582 const struct firmware *fw,
3f0d4560
AK
2583 struct be_dma_mem *flash_cmd, int num_of_images)
2584
84517482 2585{
3f0d4560
AK
2586 int status = 0, i, filehdr_size = 0;
2587 u32 total_bytes = 0, flash_op;
84517482
AK
2588 int num_bytes;
2589 const u8 *p = fw->data;
2590 struct be_cmd_write_flashrom *req = flash_cmd->va;
215faf9c 2591 const struct flash_comp *pflashcomp;
9fe96934 2592 int num_comp;
3f0d4560 2593
215faf9c 2594 static const struct flash_comp gen3_flash_types[9] = {
3f0d4560
AK
2595 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, IMG_TYPE_ISCSI_ACTIVE,
2596 FLASH_IMAGE_MAX_SIZE_g3},
2597 { FLASH_REDBOOT_START_g3, IMG_TYPE_REDBOOT,
2598 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3},
2599 { FLASH_iSCSI_BIOS_START_g3, IMG_TYPE_BIOS,
2600 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2601 { FLASH_PXE_BIOS_START_g3, IMG_TYPE_PXE_BIOS,
2602 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2603 { FLASH_FCoE_BIOS_START_g3, IMG_TYPE_FCOE_BIOS,
2604 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2605 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, IMG_TYPE_ISCSI_BACKUP,
2606 FLASH_IMAGE_MAX_SIZE_g3},
2607 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, IMG_TYPE_FCOE_FW_ACTIVE,
2608 FLASH_IMAGE_MAX_SIZE_g3},
2609 { FLASH_FCoE_BACKUP_IMAGE_START_g3, IMG_TYPE_FCOE_FW_BACKUP,
9fe96934
SB
2610 FLASH_IMAGE_MAX_SIZE_g3},
2611 { FLASH_NCSI_START_g3, IMG_TYPE_NCSI_FW,
2612 FLASH_NCSI_IMAGE_MAX_SIZE_g3}
3f0d4560 2613 };
215faf9c 2614 static const struct flash_comp gen2_flash_types[8] = {
3f0d4560
AK
2615 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, IMG_TYPE_ISCSI_ACTIVE,
2616 FLASH_IMAGE_MAX_SIZE_g2},
2617 { FLASH_REDBOOT_START_g2, IMG_TYPE_REDBOOT,
2618 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2},
2619 { FLASH_iSCSI_BIOS_START_g2, IMG_TYPE_BIOS,
2620 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2621 { FLASH_PXE_BIOS_START_g2, IMG_TYPE_PXE_BIOS,
2622 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2623 { FLASH_FCoE_BIOS_START_g2, IMG_TYPE_FCOE_BIOS,
2624 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2625 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, IMG_TYPE_ISCSI_BACKUP,
2626 FLASH_IMAGE_MAX_SIZE_g2},
2627 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, IMG_TYPE_FCOE_FW_ACTIVE,
2628 FLASH_IMAGE_MAX_SIZE_g2},
2629 { FLASH_FCoE_BACKUP_IMAGE_START_g2, IMG_TYPE_FCOE_FW_BACKUP,
2630 FLASH_IMAGE_MAX_SIZE_g2}
2631 };
2632
2633 if (adapter->generation == BE_GEN3) {
2634 pflashcomp = gen3_flash_types;
2635 filehdr_size = sizeof(struct flash_file_hdr_g3);
215faf9c 2636 num_comp = ARRAY_SIZE(gen3_flash_types);
3f0d4560
AK
2637 } else {
2638 pflashcomp = gen2_flash_types;
2639 filehdr_size = sizeof(struct flash_file_hdr_g2);
215faf9c 2640 num_comp = ARRAY_SIZE(gen2_flash_types);
84517482 2641 }
9fe96934
SB
2642 for (i = 0; i < num_comp; i++) {
2643 if ((pflashcomp[i].optype == IMG_TYPE_NCSI_FW) &&
2644 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
2645 continue;
3f0d4560
AK
2646 if ((pflashcomp[i].optype == IMG_TYPE_REDBOOT) &&
2647 (!be_flash_redboot(adapter, fw->data,
fae21a4d
AK
2648 pflashcomp[i].offset, pflashcomp[i].size, filehdr_size +
2649 (num_of_images * sizeof(struct image_hdr)))))
3f0d4560
AK
2650 continue;
2651 p = fw->data;
2652 p += filehdr_size + pflashcomp[i].offset
2653 + (num_of_images * sizeof(struct image_hdr));
2654 if (p + pflashcomp[i].size > fw->data + fw->size)
84517482 2655 return -1;
3f0d4560
AK
2656 total_bytes = pflashcomp[i].size;
2657 while (total_bytes) {
2658 if (total_bytes > 32*1024)
2659 num_bytes = 32*1024;
2660 else
2661 num_bytes = total_bytes;
2662 total_bytes -= num_bytes;
2663
2664 if (!total_bytes)
2665 flash_op = FLASHROM_OPER_FLASH;
2666 else
2667 flash_op = FLASHROM_OPER_SAVE;
2668 memcpy(req->params.data_buf, p, num_bytes);
2669 p += num_bytes;
2670 status = be_cmd_write_flashrom(adapter, flash_cmd,
2671 pflashcomp[i].optype, flash_op, num_bytes);
2672 if (status) {
2673 dev_err(&adapter->pdev->dev,
2674 "cmd to write to flash rom failed.\n");
2675 return -1;
2676 }
84517482 2677 }
84517482 2678 }
84517482
AK
2679 return 0;
2680}
2681
3f0d4560
AK
2682static int get_ufigen_type(struct flash_file_hdr_g2 *fhdr)
2683{
2684 if (fhdr == NULL)
2685 return 0;
2686 if (fhdr->build[0] == '3')
2687 return BE_GEN3;
2688 else if (fhdr->build[0] == '2')
2689 return BE_GEN2;
2690 else
2691 return 0;
2692}
2693
485bf569
SN
2694static int lancer_fw_download(struct be_adapter *adapter,
2695 const struct firmware *fw)
84517482 2696{
485bf569
SN
2697#define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024)
2698#define LANCER_FW_DOWNLOAD_LOCATION "/prg"
84517482 2699 struct be_dma_mem flash_cmd;
485bf569
SN
2700 const u8 *data_ptr = NULL;
2701 u8 *dest_image_ptr = NULL;
2702 size_t image_size = 0;
2703 u32 chunk_size = 0;
2704 u32 data_written = 0;
2705 u32 offset = 0;
2706 int status = 0;
2707 u8 add_status = 0;
84517482 2708
485bf569 2709 if (!IS_ALIGNED(fw->size, sizeof(u32))) {
d9efd2af 2710 dev_err(&adapter->pdev->dev,
485bf569
SN
2711 "FW Image not properly aligned. "
2712 "Length must be 4 byte aligned.\n");
2713 status = -EINVAL;
2714 goto lancer_fw_exit;
d9efd2af
SB
2715 }
2716
485bf569
SN
2717 flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
2718 + LANCER_FW_DOWNLOAD_CHUNK;
2719 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
2720 &flash_cmd.dma, GFP_KERNEL);
2721 if (!flash_cmd.va) {
2722 status = -ENOMEM;
2723 dev_err(&adapter->pdev->dev,
2724 "Memory allocation failure while flashing\n");
2725 goto lancer_fw_exit;
2726 }
84517482 2727
485bf569
SN
2728 dest_image_ptr = flash_cmd.va +
2729 sizeof(struct lancer_cmd_req_write_object);
2730 image_size = fw->size;
2731 data_ptr = fw->data;
2732
2733 while (image_size) {
2734 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
2735
2736 /* Copy the image chunk content. */
2737 memcpy(dest_image_ptr, data_ptr, chunk_size);
2738
2739 status = lancer_cmd_write_object(adapter, &flash_cmd,
2740 chunk_size, offset, LANCER_FW_DOWNLOAD_LOCATION,
2741 &data_written, &add_status);
2742
2743 if (status)
2744 break;
2745
2746 offset += data_written;
2747 data_ptr += data_written;
2748 image_size -= data_written;
2749 }
2750
2751 if (!status) {
2752 /* Commit the FW written */
2753 status = lancer_cmd_write_object(adapter, &flash_cmd,
2754 0, offset, LANCER_FW_DOWNLOAD_LOCATION,
2755 &data_written, &add_status);
2756 }
2757
2758 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
2759 flash_cmd.dma);
2760 if (status) {
2761 dev_err(&adapter->pdev->dev,
2762 "Firmware load error. "
2763 "Status code: 0x%x Additional Status: 0x%x\n",
2764 status, add_status);
2765 goto lancer_fw_exit;
2766 }
2767
2768 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
2769lancer_fw_exit:
2770 return status;
2771}
2772
2773static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
2774{
2775 struct flash_file_hdr_g2 *fhdr;
2776 struct flash_file_hdr_g3 *fhdr3;
2777 struct image_hdr *img_hdr_ptr = NULL;
2778 struct be_dma_mem flash_cmd;
2779 const u8 *p;
2780 int status = 0, i = 0, num_imgs = 0;
84517482
AK
2781
2782 p = fw->data;
3f0d4560 2783 fhdr = (struct flash_file_hdr_g2 *) p;
84517482 2784
84517482 2785 flash_cmd.size = sizeof(struct be_cmd_write_flashrom) + 32*1024;
2b7bcebf
IV
2786 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
2787 &flash_cmd.dma, GFP_KERNEL);
84517482
AK
2788 if (!flash_cmd.va) {
2789 status = -ENOMEM;
2790 dev_err(&adapter->pdev->dev,
2791 "Memory allocation failure while flashing\n");
485bf569 2792 goto be_fw_exit;
84517482
AK
2793 }
2794
3f0d4560
AK
2795 if ((adapter->generation == BE_GEN3) &&
2796 (get_ufigen_type(fhdr) == BE_GEN3)) {
2797 fhdr3 = (struct flash_file_hdr_g3 *) fw->data;
8b93b710
AK
2798 num_imgs = le32_to_cpu(fhdr3->num_imgs);
2799 for (i = 0; i < num_imgs; i++) {
3f0d4560
AK
2800 img_hdr_ptr = (struct image_hdr *) (fw->data +
2801 (sizeof(struct flash_file_hdr_g3) +
8b93b710
AK
2802 i * sizeof(struct image_hdr)));
2803 if (le32_to_cpu(img_hdr_ptr->imageid) == 1)
2804 status = be_flash_data(adapter, fw, &flash_cmd,
2805 num_imgs);
3f0d4560
AK
2806 }
2807 } else if ((adapter->generation == BE_GEN2) &&
2808 (get_ufigen_type(fhdr) == BE_GEN2)) {
2809 status = be_flash_data(adapter, fw, &flash_cmd, 0);
2810 } else {
2811 dev_err(&adapter->pdev->dev,
2812 "UFI and Interface are not compatible for flashing\n");
2813 status = -1;
84517482
AK
2814 }
2815
2b7bcebf
IV
2816 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
2817 flash_cmd.dma);
84517482
AK
2818 if (status) {
2819 dev_err(&adapter->pdev->dev, "Firmware load error\n");
485bf569 2820 goto be_fw_exit;
84517482
AK
2821 }
2822
af901ca1 2823 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
84517482 2824
485bf569
SN
2825be_fw_exit:
2826 return status;
2827}
2828
2829int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
2830{
2831 const struct firmware *fw;
2832 int status;
2833
2834 if (!netif_running(adapter->netdev)) {
2835 dev_err(&adapter->pdev->dev,
2836 "Firmware load not allowed (interface is down)\n");
2837 return -1;
2838 }
2839
2840 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
2841 if (status)
2842 goto fw_exit;
2843
2844 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
2845
2846 if (lancer_chip(adapter))
2847 status = lancer_fw_download(adapter, fw);
2848 else
2849 status = be_fw_download(adapter, fw);
2850
84517482
AK
2851fw_exit:
2852 release_firmware(fw);
2853 return status;
2854}
2855
6b7c5b94
SP
2856static struct net_device_ops be_netdev_ops = {
2857 .ndo_open = be_open,
2858 .ndo_stop = be_close,
2859 .ndo_start_xmit = be_xmit,
6b7c5b94
SP
2860 .ndo_set_rx_mode = be_set_multicast_list,
2861 .ndo_set_mac_address = be_mac_addr_set,
2862 .ndo_change_mtu = be_change_mtu,
ab1594e9 2863 .ndo_get_stats64 = be_get_stats64,
6b7c5b94 2864 .ndo_validate_addr = eth_validate_addr,
6b7c5b94
SP
2865 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
2866 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
64600ea5 2867 .ndo_set_vf_mac = be_set_vf_mac,
1da87b7f 2868 .ndo_set_vf_vlan = be_set_vf_vlan,
e1d18735 2869 .ndo_set_vf_tx_rate = be_set_vf_tx_rate,
64600ea5 2870 .ndo_get_vf_config = be_get_vf_config
6b7c5b94
SP
2871};
2872
2873static void be_netdev_init(struct net_device *netdev)
2874{
2875 struct be_adapter *adapter = netdev_priv(netdev);
3abcdeda
SP
2876 struct be_rx_obj *rxo;
2877 int i;
6b7c5b94 2878
6332c8d3 2879 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
8b8ddc68
MM
2880 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
2881 NETIF_F_HW_VLAN_TX;
2882 if (be_multi_rxq(adapter))
2883 netdev->hw_features |= NETIF_F_RXHASH;
6332c8d3
MM
2884
2885 netdev->features |= netdev->hw_features |
8b8ddc68 2886 NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
4b972914 2887
eb8a50d9 2888 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
79032644 2889 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
51c59870 2890
6b7c5b94
SP
2891 netdev->flags |= IFF_MULTICAST;
2892
9e90c961
AK
2893 /* Default settings for Rx and Tx flow control */
2894 adapter->rx_fc = true;
2895 adapter->tx_fc = true;
2896
c190e3c8
AK
2897 netif_set_gso_max_size(netdev, 65535);
2898
6b7c5b94
SP
2899 BE_SET_NETDEV_OPS(netdev, &be_netdev_ops);
2900
2901 SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
2902
3abcdeda
SP
2903 for_all_rx_queues(adapter, rxo, i)
2904 netif_napi_add(netdev, &rxo->rx_eq.napi, be_poll_rx,
2905 BE_NAPI_WEIGHT);
2906
5fb379ee 2907 netif_napi_add(netdev, &adapter->tx_eq.napi, be_poll_tx_mcc,
6b7c5b94 2908 BE_NAPI_WEIGHT);
6b7c5b94
SP
2909}
2910
2911static void be_unmap_pci_bars(struct be_adapter *adapter)
2912{
8788fdc2
SP
2913 if (adapter->csr)
2914 iounmap(adapter->csr);
2915 if (adapter->db)
2916 iounmap(adapter->db);
ba343c77 2917 if (adapter->pcicfg && be_physfn(adapter))
8788fdc2 2918 iounmap(adapter->pcicfg);
6b7c5b94
SP
2919}
2920
2921static int be_map_pci_bars(struct be_adapter *adapter)
2922{
2923 u8 __iomem *addr;
ba343c77 2924 int pcicfg_reg, db_reg;
6b7c5b94 2925
fe6d2a38
SP
2926 if (lancer_chip(adapter)) {
2927 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 0),
2928 pci_resource_len(adapter->pdev, 0));
2929 if (addr == NULL)
2930 return -ENOMEM;
2931 adapter->db = addr;
2932 return 0;
2933 }
2934
ba343c77
SB
2935 if (be_physfn(adapter)) {
2936 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 2),
2937 pci_resource_len(adapter->pdev, 2));
2938 if (addr == NULL)
2939 return -ENOMEM;
2940 adapter->csr = addr;
2941 }
6b7c5b94 2942
ba343c77 2943 if (adapter->generation == BE_GEN2) {
7b139c83 2944 pcicfg_reg = 1;
ba343c77
SB
2945 db_reg = 4;
2946 } else {
7b139c83 2947 pcicfg_reg = 0;
ba343c77
SB
2948 if (be_physfn(adapter))
2949 db_reg = 4;
2950 else
2951 db_reg = 0;
2952 }
2953 addr = ioremap_nocache(pci_resource_start(adapter->pdev, db_reg),
2954 pci_resource_len(adapter->pdev, db_reg));
6b7c5b94
SP
2955 if (addr == NULL)
2956 goto pci_map_err;
ba343c77
SB
2957 adapter->db = addr;
2958
2959 if (be_physfn(adapter)) {
2960 addr = ioremap_nocache(
2961 pci_resource_start(adapter->pdev, pcicfg_reg),
2962 pci_resource_len(adapter->pdev, pcicfg_reg));
2963 if (addr == NULL)
2964 goto pci_map_err;
2965 adapter->pcicfg = addr;
2966 } else
2967 adapter->pcicfg = adapter->db + SRIOV_VF_PCICFG_OFFSET;
6b7c5b94
SP
2968
2969 return 0;
2970pci_map_err:
2971 be_unmap_pci_bars(adapter);
2972 return -ENOMEM;
2973}
2974
2975
2976static void be_ctrl_cleanup(struct be_adapter *adapter)
2977{
8788fdc2 2978 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
6b7c5b94
SP
2979
2980 be_unmap_pci_bars(adapter);
2981
2982 if (mem->va)
2b7bcebf
IV
2983 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
2984 mem->dma);
e7b909a6
SP
2985
2986 mem = &adapter->mc_cmd_mem;
2987 if (mem->va)
2b7bcebf
IV
2988 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
2989 mem->dma);
6b7c5b94
SP
2990}
2991
6b7c5b94
SP
2992static int be_ctrl_init(struct be_adapter *adapter)
2993{
8788fdc2
SP
2994 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
2995 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
e7b909a6 2996 struct be_dma_mem *mc_cmd_mem = &adapter->mc_cmd_mem;
6b7c5b94 2997 int status;
6b7c5b94
SP
2998
2999 status = be_map_pci_bars(adapter);
3000 if (status)
e7b909a6 3001 goto done;
6b7c5b94
SP
3002
3003 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
2b7bcebf
IV
3004 mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
3005 mbox_mem_alloc->size,
3006 &mbox_mem_alloc->dma,
3007 GFP_KERNEL);
6b7c5b94 3008 if (!mbox_mem_alloc->va) {
e7b909a6
SP
3009 status = -ENOMEM;
3010 goto unmap_pci_bars;
6b7c5b94 3011 }
e7b909a6 3012
6b7c5b94
SP
3013 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
3014 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
3015 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
3016 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
e7b909a6
SP
3017
3018 mc_cmd_mem->size = sizeof(struct be_cmd_req_mcast_mac_config);
2b7bcebf
IV
3019 mc_cmd_mem->va = dma_alloc_coherent(&adapter->pdev->dev,
3020 mc_cmd_mem->size, &mc_cmd_mem->dma,
3021 GFP_KERNEL);
e7b909a6
SP
3022 if (mc_cmd_mem->va == NULL) {
3023 status = -ENOMEM;
3024 goto free_mbox;
3025 }
3026 memset(mc_cmd_mem->va, 0, mc_cmd_mem->size);
3027
2984961c 3028 mutex_init(&adapter->mbox_lock);
8788fdc2
SP
3029 spin_lock_init(&adapter->mcc_lock);
3030 spin_lock_init(&adapter->mcc_cq_lock);
a8f447bd 3031
dd131e76 3032 init_completion(&adapter->flash_compl);
cf588477 3033 pci_save_state(adapter->pdev);
6b7c5b94 3034 return 0;
e7b909a6
SP
3035
3036free_mbox:
2b7bcebf
IV
3037 dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
3038 mbox_mem_alloc->va, mbox_mem_alloc->dma);
e7b909a6
SP
3039
3040unmap_pci_bars:
3041 be_unmap_pci_bars(adapter);
3042
3043done:
3044 return status;
6b7c5b94
SP
3045}
3046
3047static void be_stats_cleanup(struct be_adapter *adapter)
3048{
3abcdeda 3049 struct be_dma_mem *cmd = &adapter->stats_cmd;
6b7c5b94
SP
3050
3051 if (cmd->va)
2b7bcebf
IV
3052 dma_free_coherent(&adapter->pdev->dev, cmd->size,
3053 cmd->va, cmd->dma);
6b7c5b94
SP
3054}
3055
3056static int be_stats_init(struct be_adapter *adapter)
3057{
3abcdeda 3058 struct be_dma_mem *cmd = &adapter->stats_cmd;
6b7c5b94 3059
005d5696 3060 if (adapter->generation == BE_GEN2) {
89a88ab8 3061 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
005d5696
SX
3062 } else {
3063 if (lancer_chip(adapter))
3064 cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
3065 else
3066 cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
3067 }
2b7bcebf
IV
3068 cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
3069 GFP_KERNEL);
6b7c5b94
SP
3070 if (cmd->va == NULL)
3071 return -1;
d291b9af 3072 memset(cmd->va, 0, cmd->size);
6b7c5b94
SP
3073 return 0;
3074}
3075
3076static void __devexit be_remove(struct pci_dev *pdev)
3077{
3078 struct be_adapter *adapter = pci_get_drvdata(pdev);
8d56ff11 3079
6b7c5b94
SP
3080 if (!adapter)
3081 return;
3082
f203af70
SK
3083 cancel_delayed_work_sync(&adapter->work);
3084
6b7c5b94
SP
3085 unregister_netdev(adapter->netdev);
3086
5fb379ee
SP
3087 be_clear(adapter);
3088
6b7c5b94
SP
3089 be_stats_cleanup(adapter);
3090
3091 be_ctrl_cleanup(adapter);
3092
48f5a191 3093 kfree(adapter->vf_cfg);
ba343c77
SB
3094 be_sriov_disable(adapter);
3095
8d56ff11 3096 be_msix_disable(adapter);
6b7c5b94
SP
3097
3098 pci_set_drvdata(pdev, NULL);
3099 pci_release_regions(pdev);
3100 pci_disable_device(pdev);
3101
3102 free_netdev(adapter->netdev);
3103}
3104
2243e2e9 3105static int be_get_config(struct be_adapter *adapter)
6b7c5b94 3106{
6b7c5b94 3107 int status;
2243e2e9 3108 u8 mac[ETH_ALEN];
6b7c5b94 3109
2243e2e9 3110 status = be_cmd_get_fw_ver(adapter, adapter->fw_ver);
6b7c5b94
SP
3111 if (status)
3112 return status;
3113
3abcdeda
SP
3114 status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
3115 &adapter->function_mode, &adapter->function_caps);
43a04fdc
SP
3116 if (status)
3117 return status;
3118
2243e2e9 3119 memset(mac, 0, ETH_ALEN);
ba343c77 3120
12f4d0a8
ME
3121 /* A default permanent address is given to each VF for Lancer*/
3122 if (be_physfn(adapter) || lancer_chip(adapter)) {
ba343c77 3123 status = be_cmd_mac_addr_query(adapter, mac,
2243e2e9 3124 MAC_ADDRESS_TYPE_NETWORK, true /*permanent */, 0);
ca9e4988 3125
ba343c77
SB
3126 if (status)
3127 return status;
ca9e4988 3128
ba343c77
SB
3129 if (!is_valid_ether_addr(mac))
3130 return -EADDRNOTAVAIL;
3131
3132 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
3133 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
3134 }
6b7c5b94 3135
3486be29 3136 if (adapter->function_mode & 0x400)
82903e4b
AK
3137 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/4;
3138 else
3139 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
3140
9e1453c5
AK
3141 status = be_cmd_get_cntl_attributes(adapter);
3142 if (status)
3143 return status;
3144
3c8def97
SP
3145 if ((num_vfs && adapter->sriov_enabled) ||
3146 (adapter->function_mode & 0x400) ||
3147 lancer_chip(adapter) || !be_physfn(adapter)) {
3148 adapter->num_tx_qs = 1;
3149 netif_set_real_num_tx_queues(adapter->netdev,
3150 adapter->num_tx_qs);
3151 } else {
3152 adapter->num_tx_qs = MAX_TX_QS;
3153 }
3154
2243e2e9 3155 return 0;
6b7c5b94
SP
3156}
3157
fe6d2a38
SP
3158static int be_dev_family_check(struct be_adapter *adapter)
3159{
3160 struct pci_dev *pdev = adapter->pdev;
3161 u32 sli_intf = 0, if_type;
3162
3163 switch (pdev->device) {
3164 case BE_DEVICE_ID1:
3165 case OC_DEVICE_ID1:
3166 adapter->generation = BE_GEN2;
3167 break;
3168 case BE_DEVICE_ID2:
3169 case OC_DEVICE_ID2:
3170 adapter->generation = BE_GEN3;
3171 break;
3172 case OC_DEVICE_ID3:
12f4d0a8 3173 case OC_DEVICE_ID4:
fe6d2a38
SP
3174 pci_read_config_dword(pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3175 if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
3176 SLI_INTF_IF_TYPE_SHIFT;
3177
3178 if (((sli_intf & SLI_INTF_VALID_MASK) != SLI_INTF_VALID) ||
3179 if_type != 0x02) {
3180 dev_err(&pdev->dev, "SLI_INTF reg val is not valid\n");
3181 return -EINVAL;
3182 }
fe6d2a38
SP
3183 adapter->sli_family = ((sli_intf & SLI_INTF_FAMILY_MASK) >>
3184 SLI_INTF_FAMILY_SHIFT);
3185 adapter->generation = BE_GEN3;
3186 break;
3187 default:
3188 adapter->generation = 0;
3189 }
3190 return 0;
3191}
3192
37eed1cb
PR
3193static int lancer_wait_ready(struct be_adapter *adapter)
3194{
3195#define SLIPORT_READY_TIMEOUT 500
3196 u32 sliport_status;
3197 int status = 0, i;
3198
3199 for (i = 0; i < SLIPORT_READY_TIMEOUT; i++) {
3200 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3201 if (sliport_status & SLIPORT_STATUS_RDY_MASK)
3202 break;
3203
3204 msleep(20);
3205 }
3206
3207 if (i == SLIPORT_READY_TIMEOUT)
3208 status = -1;
3209
3210 return status;
3211}
3212
3213static int lancer_test_and_set_rdy_state(struct be_adapter *adapter)
3214{
3215 int status;
3216 u32 sliport_status, err, reset_needed;
3217 status = lancer_wait_ready(adapter);
3218 if (!status) {
3219 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3220 err = sliport_status & SLIPORT_STATUS_ERR_MASK;
3221 reset_needed = sliport_status & SLIPORT_STATUS_RN_MASK;
3222 if (err && reset_needed) {
3223 iowrite32(SLI_PORT_CONTROL_IP_MASK,
3224 adapter->db + SLIPORT_CONTROL_OFFSET);
3225
3226 /* check adapter has corrected the error */
3227 status = lancer_wait_ready(adapter);
3228 sliport_status = ioread32(adapter->db +
3229 SLIPORT_STATUS_OFFSET);
3230 sliport_status &= (SLIPORT_STATUS_ERR_MASK |
3231 SLIPORT_STATUS_RN_MASK);
3232 if (status || sliport_status)
3233 status = -1;
3234 } else if (err || reset_needed) {
3235 status = -1;
3236 }
3237 }
3238 return status;
3239}
3240
6b7c5b94
SP
3241static int __devinit be_probe(struct pci_dev *pdev,
3242 const struct pci_device_id *pdev_id)
3243{
3244 int status = 0;
3245 struct be_adapter *adapter;
3246 struct net_device *netdev;
6b7c5b94
SP
3247
3248 status = pci_enable_device(pdev);
3249 if (status)
3250 goto do_none;
3251
3252 status = pci_request_regions(pdev, DRV_NAME);
3253 if (status)
3254 goto disable_dev;
3255 pci_set_master(pdev);
3256
3c8def97 3257 netdev = alloc_etherdev_mq(sizeof(struct be_adapter), MAX_TX_QS);
6b7c5b94
SP
3258 if (netdev == NULL) {
3259 status = -ENOMEM;
3260 goto rel_reg;
3261 }
3262 adapter = netdev_priv(netdev);
3263 adapter->pdev = pdev;
3264 pci_set_drvdata(pdev, adapter);
fe6d2a38
SP
3265
3266 status = be_dev_family_check(adapter);
63657b9c 3267 if (status)
fe6d2a38
SP
3268 goto free_netdev;
3269
6b7c5b94 3270 adapter->netdev = netdev;
2243e2e9 3271 SET_NETDEV_DEV(netdev, &pdev->dev);
6b7c5b94 3272
2b7bcebf 3273 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
6b7c5b94
SP
3274 if (!status) {
3275 netdev->features |= NETIF_F_HIGHDMA;
3276 } else {
2b7bcebf 3277 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
6b7c5b94
SP
3278 if (status) {
3279 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
3280 goto free_netdev;
3281 }
3282 }
3283
ba343c77 3284 be_sriov_enable(adapter);
48f5a191
AK
3285 if (adapter->sriov_enabled) {
3286 adapter->vf_cfg = kcalloc(num_vfs,
3287 sizeof(struct be_vf_cfg), GFP_KERNEL);
3288
3289 if (!adapter->vf_cfg)
3290 goto free_netdev;
3291 }
ba343c77 3292
6b7c5b94
SP
3293 status = be_ctrl_init(adapter);
3294 if (status)
48f5a191 3295 goto free_vf_cfg;
6b7c5b94 3296
37eed1cb
PR
3297 if (lancer_chip(adapter)) {
3298 status = lancer_test_and_set_rdy_state(adapter);
3299 if (status) {
3300 dev_err(&pdev->dev, "Adapter in non recoverable error\n");
48f5a191 3301 goto ctrl_clean;
37eed1cb
PR
3302 }
3303 }
3304
2243e2e9 3305 /* sync up with fw's ready state */
ba343c77
SB
3306 if (be_physfn(adapter)) {
3307 status = be_cmd_POST(adapter);
3308 if (status)
3309 goto ctrl_clean;
ba343c77 3310 }
6b7c5b94 3311
2243e2e9
SP
3312 /* tell fw we're ready to fire cmds */
3313 status = be_cmd_fw_init(adapter);
6b7c5b94 3314 if (status)
2243e2e9
SP
3315 goto ctrl_clean;
3316
a4b4dfab
AK
3317 status = be_cmd_reset_function(adapter);
3318 if (status)
3319 goto ctrl_clean;
556ae191 3320
2243e2e9
SP
3321 status = be_stats_init(adapter);
3322 if (status)
3323 goto ctrl_clean;
3324
3325 status = be_get_config(adapter);
6b7c5b94
SP
3326 if (status)
3327 goto stats_clean;
6b7c5b94 3328
b9ab82c7
SP
3329 /* The INTR bit may be set in the card when probed by a kdump kernel
3330 * after a crash.
3331 */
3332 if (!lancer_chip(adapter))
3333 be_intr_set(adapter, false);
3334
3abcdeda
SP
3335 be_msix_enable(adapter);
3336
6b7c5b94 3337 INIT_DELAYED_WORK(&adapter->work, be_worker);
6b7c5b94 3338
5fb379ee
SP
3339 status = be_setup(adapter);
3340 if (status)
3abcdeda 3341 goto msix_disable;
2243e2e9 3342
3abcdeda 3343 be_netdev_init(netdev);
6b7c5b94
SP
3344 status = register_netdev(netdev);
3345 if (status != 0)
5fb379ee 3346 goto unsetup;
6b7c5b94 3347
e6319365 3348 if (be_physfn(adapter) && adapter->sriov_enabled) {
d0381c42
AK
3349 u8 mac_speed;
3350 bool link_up;
3351 u16 vf, lnk_speed;
3352
12f4d0a8
ME
3353 if (!lancer_chip(adapter)) {
3354 status = be_vf_eth_addr_config(adapter);
3355 if (status)
3356 goto unreg_netdev;
3357 }
d0381c42
AK
3358
3359 for (vf = 0; vf < num_vfs; vf++) {
3360 status = be_cmd_link_status_query(adapter, &link_up,
3361 &mac_speed, &lnk_speed, vf + 1);
3362 if (!status)
3363 adapter->vf_cfg[vf].vf_tx_rate = lnk_speed * 10;
3364 else
3365 goto unreg_netdev;
3366 }
e6319365
AK
3367 }
3368
c4ca2374 3369 dev_info(&pdev->dev, "%s port %d\n", nic_name(pdev), adapter->port_num);
34b1ef04 3370
f203af70 3371 schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
6b7c5b94
SP
3372 return 0;
3373
e6319365
AK
3374unreg_netdev:
3375 unregister_netdev(netdev);
5fb379ee
SP
3376unsetup:
3377 be_clear(adapter);
3abcdeda
SP
3378msix_disable:
3379 be_msix_disable(adapter);
6b7c5b94
SP
3380stats_clean:
3381 be_stats_cleanup(adapter);
3382ctrl_clean:
3383 be_ctrl_cleanup(adapter);
48f5a191
AK
3384free_vf_cfg:
3385 kfree(adapter->vf_cfg);
6b7c5b94 3386free_netdev:
ba343c77 3387 be_sriov_disable(adapter);
fe6d2a38 3388 free_netdev(netdev);
8d56ff11 3389 pci_set_drvdata(pdev, NULL);
6b7c5b94
SP
3390rel_reg:
3391 pci_release_regions(pdev);
3392disable_dev:
3393 pci_disable_device(pdev);
3394do_none:
c4ca2374 3395 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
6b7c5b94
SP
3396 return status;
3397}
3398
3399static int be_suspend(struct pci_dev *pdev, pm_message_t state)
3400{
3401 struct be_adapter *adapter = pci_get_drvdata(pdev);
3402 struct net_device *netdev = adapter->netdev;
3403
a4ca055f 3404 cancel_delayed_work_sync(&adapter->work);
71d8d1b5
AK
3405 if (adapter->wol)
3406 be_setup_wol(adapter, true);
3407
6b7c5b94
SP
3408 netif_device_detach(netdev);
3409 if (netif_running(netdev)) {
3410 rtnl_lock();
3411 be_close(netdev);
3412 rtnl_unlock();
3413 }
9e90c961 3414 be_cmd_get_flow_control(adapter, &adapter->tx_fc, &adapter->rx_fc);
9b0365f1 3415 be_clear(adapter);
6b7c5b94 3416
a4ca055f 3417 be_msix_disable(adapter);
6b7c5b94
SP
3418 pci_save_state(pdev);
3419 pci_disable_device(pdev);
3420 pci_set_power_state(pdev, pci_choose_state(pdev, state));
3421 return 0;
3422}
3423
3424static int be_resume(struct pci_dev *pdev)
3425{
3426 int status = 0;
3427 struct be_adapter *adapter = pci_get_drvdata(pdev);
3428 struct net_device *netdev = adapter->netdev;
3429
3430 netif_device_detach(netdev);
3431
3432 status = pci_enable_device(pdev);
3433 if (status)
3434 return status;
3435
3436 pci_set_power_state(pdev, 0);
3437 pci_restore_state(pdev);
3438
a4ca055f 3439 be_msix_enable(adapter);
2243e2e9
SP
3440 /* tell fw we're ready to fire cmds */
3441 status = be_cmd_fw_init(adapter);
3442 if (status)
3443 return status;
3444
9b0365f1 3445 be_setup(adapter);
6b7c5b94
SP
3446 if (netif_running(netdev)) {
3447 rtnl_lock();
3448 be_open(netdev);
3449 rtnl_unlock();
3450 }
3451 netif_device_attach(netdev);
71d8d1b5
AK
3452
3453 if (adapter->wol)
3454 be_setup_wol(adapter, false);
a4ca055f
AK
3455
3456 schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
6b7c5b94
SP
3457 return 0;
3458}
3459
82456b03
SP
3460/*
3461 * An FLR will stop BE from DMAing any data.
3462 */
3463static void be_shutdown(struct pci_dev *pdev)
3464{
3465 struct be_adapter *adapter = pci_get_drvdata(pdev);
82456b03 3466
2d5d4154
AK
3467 if (!adapter)
3468 return;
82456b03 3469
0f4a6828 3470 cancel_delayed_work_sync(&adapter->work);
a4ca055f 3471
2d5d4154 3472 netif_device_detach(adapter->netdev);
82456b03 3473
82456b03
SP
3474 if (adapter->wol)
3475 be_setup_wol(adapter, true);
3476
57841869
AK
3477 be_cmd_reset_function(adapter);
3478
82456b03 3479 pci_disable_device(pdev);
82456b03
SP
3480}
3481
cf588477
SP
3482static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
3483 pci_channel_state_t state)
3484{
3485 struct be_adapter *adapter = pci_get_drvdata(pdev);
3486 struct net_device *netdev = adapter->netdev;
3487
3488 dev_err(&adapter->pdev->dev, "EEH error detected\n");
3489
3490 adapter->eeh_err = true;
3491
3492 netif_device_detach(netdev);
3493
3494 if (netif_running(netdev)) {
3495 rtnl_lock();
3496 be_close(netdev);
3497 rtnl_unlock();
3498 }
3499 be_clear(adapter);
3500
3501 if (state == pci_channel_io_perm_failure)
3502 return PCI_ERS_RESULT_DISCONNECT;
3503
3504 pci_disable_device(pdev);
3505
3506 return PCI_ERS_RESULT_NEED_RESET;
3507}
3508
3509static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
3510{
3511 struct be_adapter *adapter = pci_get_drvdata(pdev);
3512 int status;
3513
3514 dev_info(&adapter->pdev->dev, "EEH reset\n");
3515 adapter->eeh_err = false;
3516
3517 status = pci_enable_device(pdev);
3518 if (status)
3519 return PCI_ERS_RESULT_DISCONNECT;
3520
3521 pci_set_master(pdev);
3522 pci_set_power_state(pdev, 0);
3523 pci_restore_state(pdev);
3524
3525 /* Check if card is ok and fw is ready */
3526 status = be_cmd_POST(adapter);
3527 if (status)
3528 return PCI_ERS_RESULT_DISCONNECT;
3529
3530 return PCI_ERS_RESULT_RECOVERED;
3531}
3532
3533static void be_eeh_resume(struct pci_dev *pdev)
3534{
3535 int status = 0;
3536 struct be_adapter *adapter = pci_get_drvdata(pdev);
3537 struct net_device *netdev = adapter->netdev;
3538
3539 dev_info(&adapter->pdev->dev, "EEH resume\n");
3540
3541 pci_save_state(pdev);
3542
3543 /* tell fw we're ready to fire cmds */
3544 status = be_cmd_fw_init(adapter);
3545 if (status)
3546 goto err;
3547
3548 status = be_setup(adapter);
3549 if (status)
3550 goto err;
3551
3552 if (netif_running(netdev)) {
3553 status = be_open(netdev);
3554 if (status)
3555 goto err;
3556 }
3557 netif_device_attach(netdev);
3558 return;
3559err:
3560 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
cf588477
SP
3561}
3562
3563static struct pci_error_handlers be_eeh_handlers = {
3564 .error_detected = be_eeh_err_detected,
3565 .slot_reset = be_eeh_reset,
3566 .resume = be_eeh_resume,
3567};
3568
6b7c5b94
SP
3569static struct pci_driver be_driver = {
3570 .name = DRV_NAME,
3571 .id_table = be_dev_ids,
3572 .probe = be_probe,
3573 .remove = be_remove,
3574 .suspend = be_suspend,
cf588477 3575 .resume = be_resume,
82456b03 3576 .shutdown = be_shutdown,
cf588477 3577 .err_handler = &be_eeh_handlers
6b7c5b94
SP
3578};
3579
3580static int __init be_init_module(void)
3581{
8e95a202
JP
3582 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
3583 rx_frag_size != 2048) {
6b7c5b94
SP
3584 printk(KERN_WARNING DRV_NAME
3585 " : Module param rx_frag_size must be 2048/4096/8192."
3586 " Using 2048\n");
3587 rx_frag_size = 2048;
3588 }
6b7c5b94
SP
3589
3590 return pci_register_driver(&be_driver);
3591}
3592module_init(be_init_module);
3593
3594static void __exit be_exit_module(void)
3595{
3596 pci_unregister_driver(&be_driver);
3597}
3598module_exit(be_exit_module);