tcp: Fix build warning after tcp_{v4,v6}_init_sock consolidation.
[linux-2.6-block.git] / drivers / net / ethernet / emulex / benet / be_main.c
CommitLineData
6b7c5b94 1/*
d2145cde 2 * Copyright (C) 2005 - 2011 Emulex
6b7c5b94
SP
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
d2145cde 11 * linux-drivers@emulex.com
6b7c5b94 12 *
d2145cde
AK
13 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
6b7c5b94
SP
16 */
17
70c71606 18#include <linux/prefetch.h>
9d9779e7 19#include <linux/module.h>
6b7c5b94 20#include "be.h"
8788fdc2 21#include "be_cmds.h"
65f71b8b 22#include <asm/div64.h>
6b7c5b94
SP
23
24MODULE_VERSION(DRV_VER);
25MODULE_DEVICE_TABLE(pci, be_dev_ids);
26MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
27MODULE_AUTHOR("ServerEngines Corporation");
28MODULE_LICENSE("GPL");
29
ba343c77 30static unsigned int num_vfs;
ba343c77 31module_param(num_vfs, uint, S_IRUGO);
ba343c77 32MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
6b7c5b94 33
11ac75ed
SP
34static ushort rx_frag_size = 2048;
35module_param(rx_frag_size, ushort, S_IRUGO);
36MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
37
6b7c5b94 38static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
c4ca2374 39 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
59fd5d87 40 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
c4ca2374
AK
41 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
42 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
fe6d2a38 43 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
12f4d0a8 44 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
ecedb6ae 45 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
6b7c5b94
SP
46 { 0 }
47};
48MODULE_DEVICE_TABLE(pci, be_dev_ids);
7c185276 49/* UE Status Low CSR */
42c8b11e 50static const char * const ue_status_low_desc[] = {
7c185276
AK
51 "CEV",
52 "CTX",
53 "DBUF",
54 "ERX",
55 "Host",
56 "MPU",
57 "NDMA",
58 "PTC ",
59 "RDMA ",
60 "RXF ",
61 "RXIPS ",
62 "RXULP0 ",
63 "RXULP1 ",
64 "RXULP2 ",
65 "TIM ",
66 "TPOST ",
67 "TPRE ",
68 "TXIPS ",
69 "TXULP0 ",
70 "TXULP1 ",
71 "UC ",
72 "WDMA ",
73 "TXULP2 ",
74 "HOST1 ",
75 "P0_OB_LINK ",
76 "P1_OB_LINK ",
77 "HOST_GPIO ",
78 "MBOX ",
79 "AXGMAC0",
80 "AXGMAC1",
81 "JTAG",
82 "MPU_INTPEND"
83};
84/* UE Status High CSR */
42c8b11e 85static const char * const ue_status_hi_desc[] = {
7c185276
AK
86 "LPCMEMHOST",
87 "MGMT_MAC",
88 "PCS0ONLINE",
89 "MPU_IRAM",
90 "PCS1ONLINE",
91 "PCTL0",
92 "PCTL1",
93 "PMEM",
94 "RR",
95 "TXPB",
96 "RXPP",
97 "XAUI",
98 "TXP",
99 "ARM",
100 "IPC",
101 "HOST2",
102 "HOST3",
103 "HOST4",
104 "HOST5",
105 "HOST6",
106 "HOST7",
107 "HOST8",
108 "HOST9",
42c8b11e 109 "NETC",
7c185276
AK
110 "Unknown",
111 "Unknown",
112 "Unknown",
113 "Unknown",
114 "Unknown",
115 "Unknown",
116 "Unknown",
117 "Unknown"
118};
6b7c5b94 119
752961a1
SP
120/* Is BE in a multi-channel mode */
121static inline bool be_is_mc(struct be_adapter *adapter) {
122 return (adapter->function_mode & FLEX10_MODE ||
123 adapter->function_mode & VNIC_MODE ||
124 adapter->function_mode & UMC_ENABLED);
125}
126
6b7c5b94
SP
127static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
128{
129 struct be_dma_mem *mem = &q->dma_mem;
1cfafab9 130 if (mem->va) {
2b7bcebf
IV
131 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
132 mem->dma);
1cfafab9
SP
133 mem->va = NULL;
134 }
6b7c5b94
SP
135}
136
137static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
138 u16 len, u16 entry_size)
139{
140 struct be_dma_mem *mem = &q->dma_mem;
141
142 memset(q, 0, sizeof(*q));
143 q->len = len;
144 q->entry_size = entry_size;
145 mem->size = len * entry_size;
2b7bcebf
IV
146 mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
147 GFP_KERNEL);
6b7c5b94 148 if (!mem->va)
10ef9ab4 149 return -ENOMEM;
6b7c5b94
SP
150 memset(mem->va, 0, mem->size);
151 return 0;
152}
153
8788fdc2 154static void be_intr_set(struct be_adapter *adapter, bool enable)
6b7c5b94 155{
db3ea781 156 u32 reg, enabled;
5f0b849e 157
cf588477
SP
158 if (adapter->eeh_err)
159 return;
160
db3ea781
SP
161 pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
162 &reg);
163 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
164
5f0b849e 165 if (!enabled && enable)
6b7c5b94 166 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
5f0b849e 167 else if (enabled && !enable)
6b7c5b94 168 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
5f0b849e 169 else
6b7c5b94 170 return;
5f0b849e 171
db3ea781
SP
172 pci_write_config_dword(adapter->pdev,
173 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
6b7c5b94
SP
174}
175
8788fdc2 176static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
6b7c5b94
SP
177{
178 u32 val = 0;
179 val |= qid & DB_RQ_RING_ID_MASK;
180 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
f3eb62d2
SP
181
182 wmb();
8788fdc2 183 iowrite32(val, adapter->db + DB_RQ_OFFSET);
6b7c5b94
SP
184}
185
8788fdc2 186static void be_txq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
6b7c5b94
SP
187{
188 u32 val = 0;
189 val |= qid & DB_TXULP_RING_ID_MASK;
190 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
f3eb62d2
SP
191
192 wmb();
8788fdc2 193 iowrite32(val, adapter->db + DB_TXULP1_OFFSET);
6b7c5b94
SP
194}
195
8788fdc2 196static void be_eq_notify(struct be_adapter *adapter, u16 qid,
6b7c5b94
SP
197 bool arm, bool clear_int, u16 num_popped)
198{
199 u32 val = 0;
200 val |= qid & DB_EQ_RING_ID_MASK;
fe6d2a38
SP
201 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
202 DB_EQ_RING_ID_EXT_MASK_SHIFT);
cf588477
SP
203
204 if (adapter->eeh_err)
205 return;
206
6b7c5b94
SP
207 if (arm)
208 val |= 1 << DB_EQ_REARM_SHIFT;
209 if (clear_int)
210 val |= 1 << DB_EQ_CLR_SHIFT;
211 val |= 1 << DB_EQ_EVNT_SHIFT;
212 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
8788fdc2 213 iowrite32(val, adapter->db + DB_EQ_OFFSET);
6b7c5b94
SP
214}
215
8788fdc2 216void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
6b7c5b94
SP
217{
218 u32 val = 0;
219 val |= qid & DB_CQ_RING_ID_MASK;
fe6d2a38
SP
220 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
221 DB_CQ_RING_ID_EXT_MASK_SHIFT);
cf588477
SP
222
223 if (adapter->eeh_err)
224 return;
225
6b7c5b94
SP
226 if (arm)
227 val |= 1 << DB_CQ_REARM_SHIFT;
228 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
8788fdc2 229 iowrite32(val, adapter->db + DB_CQ_OFFSET);
6b7c5b94
SP
230}
231
6b7c5b94
SP
232static int be_mac_addr_set(struct net_device *netdev, void *p)
233{
234 struct be_adapter *adapter = netdev_priv(netdev);
235 struct sockaddr *addr = p;
236 int status = 0;
e3a7ae2c 237 u8 current_mac[ETH_ALEN];
fbc13f01 238 u32 pmac_id = adapter->pmac_id[0];
6b7c5b94 239
ca9e4988
AK
240 if (!is_valid_ether_addr(addr->sa_data))
241 return -EADDRNOTAVAIL;
242
e3a7ae2c 243 status = be_cmd_mac_addr_query(adapter, current_mac,
590c391d
PR
244 MAC_ADDRESS_TYPE_NETWORK, false,
245 adapter->if_handle, 0);
a65027e4 246 if (status)
e3a7ae2c 247 goto err;
6b7c5b94 248
e3a7ae2c
SK
249 if (memcmp(addr->sa_data, current_mac, ETH_ALEN)) {
250 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
fbc13f01 251 adapter->if_handle, &adapter->pmac_id[0], 0);
e3a7ae2c
SK
252 if (status)
253 goto err;
6b7c5b94 254
e3a7ae2c
SK
255 be_cmd_pmac_del(adapter, adapter->if_handle, pmac_id, 0);
256 }
257 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
258 return 0;
259err:
260 dev_err(&adapter->pdev->dev, "MAC %pM set Failed\n", addr->sa_data);
6b7c5b94
SP
261 return status;
262}
263
89a88ab8
AK
264static void populate_be2_stats(struct be_adapter *adapter)
265{
ac124ff9
SP
266 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
267 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
268 struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
89a88ab8 269 struct be_port_rxf_stats_v0 *port_stats =
ac124ff9
SP
270 &rxf_stats->port[adapter->port_num];
271 struct be_drv_stats *drvs = &adapter->drv_stats;
89a88ab8 272
ac124ff9 273 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
89a88ab8
AK
274 drvs->rx_pause_frames = port_stats->rx_pause_frames;
275 drvs->rx_crc_errors = port_stats->rx_crc_errors;
276 drvs->rx_control_frames = port_stats->rx_control_frames;
277 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
278 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
279 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
280 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
281 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
282 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
283 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
284 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
285 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
286 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
287 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
ac124ff9 288 drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
89a88ab8
AK
289 drvs->rx_dropped_header_too_small =
290 port_stats->rx_dropped_header_too_small;
d45b9d39
SP
291 drvs->rx_address_mismatch_drops =
292 port_stats->rx_address_mismatch_drops +
293 port_stats->rx_vlan_mismatch_drops;
89a88ab8
AK
294 drvs->rx_alignment_symbol_errors =
295 port_stats->rx_alignment_symbol_errors;
296
297 drvs->tx_pauseframes = port_stats->tx_pauseframes;
298 drvs->tx_controlframes = port_stats->tx_controlframes;
299
300 if (adapter->port_num)
ac124ff9 301 drvs->jabber_events = rxf_stats->port1_jabber_events;
89a88ab8 302 else
ac124ff9 303 drvs->jabber_events = rxf_stats->port0_jabber_events;
89a88ab8 304 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
89a88ab8 305 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
89a88ab8
AK
306 drvs->forwarded_packets = rxf_stats->forwarded_packets;
307 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
ac124ff9
SP
308 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
309 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
89a88ab8
AK
310 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
311}
312
313static void populate_be3_stats(struct be_adapter *adapter)
314{
ac124ff9
SP
315 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
316 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
317 struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
89a88ab8 318 struct be_port_rxf_stats_v1 *port_stats =
ac124ff9
SP
319 &rxf_stats->port[adapter->port_num];
320 struct be_drv_stats *drvs = &adapter->drv_stats;
89a88ab8 321
ac124ff9 322 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
02fe7027
AK
323 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
324 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
89a88ab8
AK
325 drvs->rx_pause_frames = port_stats->rx_pause_frames;
326 drvs->rx_crc_errors = port_stats->rx_crc_errors;
327 drvs->rx_control_frames = port_stats->rx_control_frames;
328 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
329 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
330 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
331 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
332 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
333 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
334 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
335 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
336 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
337 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
338 drvs->rx_dropped_header_too_small =
339 port_stats->rx_dropped_header_too_small;
340 drvs->rx_input_fifo_overflow_drop =
341 port_stats->rx_input_fifo_overflow_drop;
d45b9d39 342 drvs->rx_address_mismatch_drops = port_stats->rx_address_mismatch_drops;
89a88ab8
AK
343 drvs->rx_alignment_symbol_errors =
344 port_stats->rx_alignment_symbol_errors;
ac124ff9 345 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
89a88ab8
AK
346 drvs->tx_pauseframes = port_stats->tx_pauseframes;
347 drvs->tx_controlframes = port_stats->tx_controlframes;
348 drvs->jabber_events = port_stats->jabber_events;
349 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
89a88ab8 350 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
89a88ab8
AK
351 drvs->forwarded_packets = rxf_stats->forwarded_packets;
352 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
ac124ff9
SP
353 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
354 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
89a88ab8
AK
355 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
356}
357
005d5696
SX
358static void populate_lancer_stats(struct be_adapter *adapter)
359{
89a88ab8 360
005d5696 361 struct be_drv_stats *drvs = &adapter->drv_stats;
ac124ff9
SP
362 struct lancer_pport_stats *pport_stats =
363 pport_stats_from_cmd(adapter);
364
365 be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
366 drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
367 drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
368 drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
005d5696 369 drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
ac124ff9 370 drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
005d5696
SX
371 drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
372 drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
373 drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
374 drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
375 drvs->rx_dropped_tcp_length =
376 pport_stats->rx_dropped_invalid_tcp_length;
377 drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
378 drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
379 drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
380 drvs->rx_dropped_header_too_small =
381 pport_stats->rx_dropped_header_too_small;
382 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
d45b9d39
SP
383 drvs->rx_address_mismatch_drops =
384 pport_stats->rx_address_mismatch_drops +
385 pport_stats->rx_vlan_mismatch_drops;
ac124ff9 386 drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
005d5696 387 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
ac124ff9
SP
388 drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
389 drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
005d5696 390 drvs->jabber_events = pport_stats->rx_jabbers;
ac124ff9
SP
391 drvs->forwarded_packets = pport_stats->num_forwards_lo;
392 drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
005d5696 393 drvs->rx_drops_too_many_frags =
ac124ff9 394 pport_stats->rx_drops_too_many_frags_lo;
005d5696 395}
89a88ab8 396
09c1c68f
SP
397static void accumulate_16bit_val(u32 *acc, u16 val)
398{
399#define lo(x) (x & 0xFFFF)
400#define hi(x) (x & 0xFFFF0000)
401 bool wrapped = val < lo(*acc);
402 u32 newacc = hi(*acc) + val;
403
404 if (wrapped)
405 newacc += 65536;
406 ACCESS_ONCE(*acc) = newacc;
407}
408
89a88ab8
AK
409void be_parse_stats(struct be_adapter *adapter)
410{
ac124ff9
SP
411 struct be_erx_stats_v1 *erx = be_erx_stats_from_cmd(adapter);
412 struct be_rx_obj *rxo;
413 int i;
414
005d5696
SX
415 if (adapter->generation == BE_GEN3) {
416 if (lancer_chip(adapter))
417 populate_lancer_stats(adapter);
418 else
419 populate_be3_stats(adapter);
420 } else {
89a88ab8 421 populate_be2_stats(adapter);
005d5696 422 }
ac124ff9
SP
423
424 /* as erx_v1 is longer than v0, ok to use v1 defn for v0 access */
09c1c68f
SP
425 for_all_rx_queues(adapter, rxo, i) {
426 /* below erx HW counter can actually wrap around after
427 * 65535. Driver accumulates a 32-bit value
428 */
429 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
430 (u16)erx->rx_drops_no_fragments[rxo->q.id]);
431 }
89a88ab8
AK
432}
433
ab1594e9
SP
434static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
435 struct rtnl_link_stats64 *stats)
6b7c5b94 436{
ab1594e9 437 struct be_adapter *adapter = netdev_priv(netdev);
89a88ab8 438 struct be_drv_stats *drvs = &adapter->drv_stats;
3abcdeda 439 struct be_rx_obj *rxo;
3c8def97 440 struct be_tx_obj *txo;
ab1594e9
SP
441 u64 pkts, bytes;
442 unsigned int start;
3abcdeda 443 int i;
6b7c5b94 444
3abcdeda 445 for_all_rx_queues(adapter, rxo, i) {
ab1594e9
SP
446 const struct be_rx_stats *rx_stats = rx_stats(rxo);
447 do {
448 start = u64_stats_fetch_begin_bh(&rx_stats->sync);
449 pkts = rx_stats(rxo)->rx_pkts;
450 bytes = rx_stats(rxo)->rx_bytes;
451 } while (u64_stats_fetch_retry_bh(&rx_stats->sync, start));
452 stats->rx_packets += pkts;
453 stats->rx_bytes += bytes;
454 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
455 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
456 rx_stats(rxo)->rx_drops_no_frags;
3abcdeda
SP
457 }
458
3c8def97 459 for_all_tx_queues(adapter, txo, i) {
ab1594e9
SP
460 const struct be_tx_stats *tx_stats = tx_stats(txo);
461 do {
462 start = u64_stats_fetch_begin_bh(&tx_stats->sync);
463 pkts = tx_stats(txo)->tx_pkts;
464 bytes = tx_stats(txo)->tx_bytes;
465 } while (u64_stats_fetch_retry_bh(&tx_stats->sync, start));
466 stats->tx_packets += pkts;
467 stats->tx_bytes += bytes;
3c8def97 468 }
6b7c5b94
SP
469
470 /* bad pkts received */
ab1594e9 471 stats->rx_errors = drvs->rx_crc_errors +
89a88ab8
AK
472 drvs->rx_alignment_symbol_errors +
473 drvs->rx_in_range_errors +
474 drvs->rx_out_range_errors +
475 drvs->rx_frame_too_long +
476 drvs->rx_dropped_too_small +
477 drvs->rx_dropped_too_short +
478 drvs->rx_dropped_header_too_small +
479 drvs->rx_dropped_tcp_length +
ab1594e9 480 drvs->rx_dropped_runt;
68110868 481
6b7c5b94 482 /* detailed rx errors */
ab1594e9 483 stats->rx_length_errors = drvs->rx_in_range_errors +
89a88ab8
AK
484 drvs->rx_out_range_errors +
485 drvs->rx_frame_too_long;
68110868 486
ab1594e9 487 stats->rx_crc_errors = drvs->rx_crc_errors;
6b7c5b94
SP
488
489 /* frame alignment errors */
ab1594e9 490 stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
68110868 491
6b7c5b94
SP
492 /* receiver fifo overrun */
493 /* drops_no_pbuf is no per i/f, it's per BE card */
ab1594e9 494 stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
89a88ab8
AK
495 drvs->rx_input_fifo_overflow_drop +
496 drvs->rx_drops_no_pbuf;
ab1594e9 497 return stats;
6b7c5b94
SP
498}
499
b236916a 500void be_link_status_update(struct be_adapter *adapter, u8 link_status)
6b7c5b94 501{
6b7c5b94
SP
502 struct net_device *netdev = adapter->netdev;
503
b236916a 504 if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
ea172a01 505 netif_carrier_off(netdev);
b236916a 506 adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
6b7c5b94 507 }
b236916a
AK
508
509 if ((link_status & LINK_STATUS_MASK) == LINK_UP)
510 netif_carrier_on(netdev);
511 else
512 netif_carrier_off(netdev);
6b7c5b94
SP
513}
514
3c8def97 515static void be_tx_stats_update(struct be_tx_obj *txo,
91992e44 516 u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
6b7c5b94 517{
3c8def97
SP
518 struct be_tx_stats *stats = tx_stats(txo);
519
ab1594e9 520 u64_stats_update_begin(&stats->sync);
ac124ff9
SP
521 stats->tx_reqs++;
522 stats->tx_wrbs += wrb_cnt;
523 stats->tx_bytes += copied;
524 stats->tx_pkts += (gso_segs ? gso_segs : 1);
6b7c5b94 525 if (stopped)
ac124ff9 526 stats->tx_stops++;
ab1594e9 527 u64_stats_update_end(&stats->sync);
6b7c5b94
SP
528}
529
530/* Determine number of WRB entries needed to xmit data in an skb */
fe6d2a38
SP
531static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
532 bool *dummy)
6b7c5b94 533{
ebc8d2ab
DM
534 int cnt = (skb->len > skb->data_len);
535
536 cnt += skb_shinfo(skb)->nr_frags;
537
6b7c5b94
SP
538 /* to account for hdr wrb */
539 cnt++;
fe6d2a38
SP
540 if (lancer_chip(adapter) || !(cnt & 1)) {
541 *dummy = false;
542 } else {
6b7c5b94
SP
543 /* add a dummy to make it an even num */
544 cnt++;
545 *dummy = true;
fe6d2a38 546 }
6b7c5b94
SP
547 BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
548 return cnt;
549}
550
551static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
552{
553 wrb->frag_pa_hi = upper_32_bits(addr);
554 wrb->frag_pa_lo = addr & 0xFFFFFFFF;
555 wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
556}
557
1ded132d
AK
558static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
559 struct sk_buff *skb)
560{
561 u8 vlan_prio;
562 u16 vlan_tag;
563
564 vlan_tag = vlan_tx_tag_get(skb);
565 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
566 /* If vlan priority provided by OS is NOT in available bmap */
567 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
568 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
569 adapter->recommended_prio;
570
571 return vlan_tag;
572}
573
cc4ce020
SK
574static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
575 struct sk_buff *skb, u32 wrb_cnt, u32 len)
6b7c5b94 576{
1ded132d 577 u16 vlan_tag;
cc4ce020 578
6b7c5b94
SP
579 memset(hdr, 0, sizeof(*hdr));
580
581 AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
582
49e4b847 583 if (skb_is_gso(skb)) {
6b7c5b94
SP
584 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
585 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
586 hdr, skb_shinfo(skb)->gso_size);
fe6d2a38 587 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
49e4b847 588 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
fe6d2a38
SP
589 if (lancer_chip(adapter) && adapter->sli_family ==
590 LANCER_A0_SLI_FAMILY) {
591 AMAP_SET_BITS(struct amap_eth_hdr_wrb, ipcs, hdr, 1);
592 if (is_tcp_pkt(skb))
593 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
594 tcpcs, hdr, 1);
595 else if (is_udp_pkt(skb))
596 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
597 udpcs, hdr, 1);
598 }
6b7c5b94
SP
599 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
600 if (is_tcp_pkt(skb))
601 AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
602 else if (is_udp_pkt(skb))
603 AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
604 }
605
4c5102f9 606 if (vlan_tx_tag_present(skb)) {
6b7c5b94 607 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
1ded132d 608 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
cc4ce020 609 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
6b7c5b94
SP
610 }
611
612 AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
613 AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, 1);
614 AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
615 AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
616}
617
2b7bcebf 618static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
7101e111
SP
619 bool unmap_single)
620{
621 dma_addr_t dma;
622
623 be_dws_le_to_cpu(wrb, sizeof(*wrb));
624
625 dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
b681ee77 626 if (wrb->frag_len) {
7101e111 627 if (unmap_single)
2b7bcebf
IV
628 dma_unmap_single(dev, dma, wrb->frag_len,
629 DMA_TO_DEVICE);
7101e111 630 else
2b7bcebf 631 dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
7101e111
SP
632 }
633}
6b7c5b94 634
3c8def97 635static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
6b7c5b94
SP
636 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb)
637{
7101e111
SP
638 dma_addr_t busaddr;
639 int i, copied = 0;
2b7bcebf 640 struct device *dev = &adapter->pdev->dev;
6b7c5b94 641 struct sk_buff *first_skb = skb;
6b7c5b94
SP
642 struct be_eth_wrb *wrb;
643 struct be_eth_hdr_wrb *hdr;
7101e111
SP
644 bool map_single = false;
645 u16 map_head;
6b7c5b94 646
6b7c5b94
SP
647 hdr = queue_head_node(txq);
648 queue_head_inc(txq);
7101e111 649 map_head = txq->head;
6b7c5b94 650
ebc8d2ab 651 if (skb->len > skb->data_len) {
e743d313 652 int len = skb_headlen(skb);
2b7bcebf
IV
653 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
654 if (dma_mapping_error(dev, busaddr))
7101e111
SP
655 goto dma_err;
656 map_single = true;
ebc8d2ab
DM
657 wrb = queue_head_node(txq);
658 wrb_fill(wrb, busaddr, len);
659 be_dws_cpu_to_le(wrb, sizeof(*wrb));
660 queue_head_inc(txq);
661 copied += len;
662 }
6b7c5b94 663
ebc8d2ab 664 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
9e903e08 665 const struct skb_frag_struct *frag =
ebc8d2ab 666 &skb_shinfo(skb)->frags[i];
b061b39e 667 busaddr = skb_frag_dma_map(dev, frag, 0,
9e903e08 668 skb_frag_size(frag), DMA_TO_DEVICE);
2b7bcebf 669 if (dma_mapping_error(dev, busaddr))
7101e111 670 goto dma_err;
ebc8d2ab 671 wrb = queue_head_node(txq);
9e903e08 672 wrb_fill(wrb, busaddr, skb_frag_size(frag));
ebc8d2ab
DM
673 be_dws_cpu_to_le(wrb, sizeof(*wrb));
674 queue_head_inc(txq);
9e903e08 675 copied += skb_frag_size(frag);
6b7c5b94
SP
676 }
677
678 if (dummy_wrb) {
679 wrb = queue_head_node(txq);
680 wrb_fill(wrb, 0, 0);
681 be_dws_cpu_to_le(wrb, sizeof(*wrb));
682 queue_head_inc(txq);
683 }
684
cc4ce020 685 wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied);
6b7c5b94
SP
686 be_dws_cpu_to_le(hdr, sizeof(*hdr));
687
688 return copied;
7101e111
SP
689dma_err:
690 txq->head = map_head;
691 while (copied) {
692 wrb = queue_head_node(txq);
2b7bcebf 693 unmap_tx_frag(dev, wrb, map_single);
7101e111
SP
694 map_single = false;
695 copied -= wrb->frag_len;
696 queue_head_inc(txq);
697 }
698 return 0;
6b7c5b94
SP
699}
700
61357325 701static netdev_tx_t be_xmit(struct sk_buff *skb,
b31c50a7 702 struct net_device *netdev)
6b7c5b94
SP
703{
704 struct be_adapter *adapter = netdev_priv(netdev);
3c8def97
SP
705 struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
706 struct be_queue_info *txq = &txo->q;
6b7c5b94
SP
707 u32 wrb_cnt = 0, copied = 0;
708 u32 start = txq->head;
709 bool dummy_wrb, stopped = false;
710
1ded132d
AK
711 /* For vlan tagged pkts, BE
712 * 1) calculates checksum even when CSO is not requested
713 * 2) calculates checksum wrongly for padded pkt less than
714 * 60 bytes long.
715 * As a workaround disable TX vlan offloading in such cases.
716 */
717 if (unlikely(vlan_tx_tag_present(skb) &&
718 (skb->ip_summed != CHECKSUM_PARTIAL || skb->len <= 60))) {
719 skb = skb_share_check(skb, GFP_ATOMIC);
720 if (unlikely(!skb))
721 goto tx_drop;
722
723 skb = __vlan_put_tag(skb, be_get_tx_vlan_tag(adapter, skb));
724 if (unlikely(!skb))
725 goto tx_drop;
726
727 skb->vlan_tci = 0;
728 }
729
fe6d2a38 730 wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
6b7c5b94 731
3c8def97 732 copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb);
c190e3c8
AK
733 if (copied) {
734 /* record the sent skb in the sent_skb table */
3c8def97
SP
735 BUG_ON(txo->sent_skb_list[start]);
736 txo->sent_skb_list[start] = skb;
c190e3c8
AK
737
738 /* Ensure txq has space for the next skb; Else stop the queue
739 * *BEFORE* ringing the tx doorbell, so that we serialze the
740 * tx compls of the current transmit which'll wake up the queue
741 */
7101e111 742 atomic_add(wrb_cnt, &txq->used);
c190e3c8
AK
743 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
744 txq->len) {
3c8def97 745 netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
c190e3c8
AK
746 stopped = true;
747 }
6b7c5b94 748
c190e3c8 749 be_txq_notify(adapter, txq->id, wrb_cnt);
6b7c5b94 750
3c8def97 751 be_tx_stats_update(txo, wrb_cnt, copied,
91992e44 752 skb_shinfo(skb)->gso_segs, stopped);
c190e3c8
AK
753 } else {
754 txq->head = start;
755 dev_kfree_skb_any(skb);
6b7c5b94 756 }
1ded132d 757tx_drop:
6b7c5b94
SP
758 return NETDEV_TX_OK;
759}
760
761static int be_change_mtu(struct net_device *netdev, int new_mtu)
762{
763 struct be_adapter *adapter = netdev_priv(netdev);
764 if (new_mtu < BE_MIN_MTU ||
34a89b8c
AK
765 new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
766 (ETH_HLEN + ETH_FCS_LEN))) {
6b7c5b94
SP
767 dev_info(&adapter->pdev->dev,
768 "MTU must be between %d and %d bytes\n",
34a89b8c
AK
769 BE_MIN_MTU,
770 (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
6b7c5b94
SP
771 return -EINVAL;
772 }
773 dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
774 netdev->mtu, new_mtu);
775 netdev->mtu = new_mtu;
776 return 0;
777}
778
779/*
82903e4b
AK
780 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
781 * If the user configures more, place BE in vlan promiscuous mode.
6b7c5b94 782 */
1da87b7f 783static int be_vid_config(struct be_adapter *adapter, bool vf, u32 vf_num)
6b7c5b94 784{
11ac75ed 785 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf_num];
6b7c5b94
SP
786 u16 vtag[BE_NUM_VLANS_SUPPORTED];
787 u16 ntags = 0, i;
82903e4b 788 int status = 0;
1da87b7f
AK
789
790 if (vf) {
11ac75ed
SP
791 vtag[0] = cpu_to_le16(vf_cfg->vlan_tag);
792 status = be_cmd_vlan_config(adapter, vf_cfg->if_handle, vtag,
793 1, 1, 0);
1da87b7f 794 }
6b7c5b94 795
c0e64ef4
SP
796 /* No need to further configure vids if in promiscuous mode */
797 if (adapter->promiscuous)
798 return 0;
799
82903e4b 800 if (adapter->vlans_added <= adapter->max_vlans) {
6b7c5b94 801 /* Construct VLAN Table to give to HW */
b738127d 802 for (i = 0; i < VLAN_N_VID; i++) {
6b7c5b94
SP
803 if (adapter->vlan_tag[i]) {
804 vtag[ntags] = cpu_to_le16(i);
805 ntags++;
806 }
807 }
b31c50a7
SP
808 status = be_cmd_vlan_config(adapter, adapter->if_handle,
809 vtag, ntags, 1, 0);
6b7c5b94 810 } else {
b31c50a7
SP
811 status = be_cmd_vlan_config(adapter, adapter->if_handle,
812 NULL, 0, 1, 1);
6b7c5b94 813 }
1da87b7f 814
b31c50a7 815 return status;
6b7c5b94
SP
816}
817
8e586137 818static int be_vlan_add_vid(struct net_device *netdev, u16 vid)
6b7c5b94
SP
819{
820 struct be_adapter *adapter = netdev_priv(netdev);
80817cbf 821 int status = 0;
6b7c5b94 822
80817cbf
AK
823 if (!be_physfn(adapter)) {
824 status = -EINVAL;
825 goto ret;
826 }
ba343c77 827
6b7c5b94 828 adapter->vlan_tag[vid] = 1;
82903e4b 829 if (adapter->vlans_added <= (adapter->max_vlans + 1))
80817cbf 830 status = be_vid_config(adapter, false, 0);
8e586137 831
80817cbf
AK
832 if (!status)
833 adapter->vlans_added++;
834 else
835 adapter->vlan_tag[vid] = 0;
836ret:
837 return status;
6b7c5b94
SP
838}
839
8e586137 840static int be_vlan_rem_vid(struct net_device *netdev, u16 vid)
6b7c5b94
SP
841{
842 struct be_adapter *adapter = netdev_priv(netdev);
80817cbf 843 int status = 0;
6b7c5b94 844
80817cbf
AK
845 if (!be_physfn(adapter)) {
846 status = -EINVAL;
847 goto ret;
848 }
ba343c77 849
6b7c5b94 850 adapter->vlan_tag[vid] = 0;
82903e4b 851 if (adapter->vlans_added <= adapter->max_vlans)
80817cbf 852 status = be_vid_config(adapter, false, 0);
8e586137 853
80817cbf
AK
854 if (!status)
855 adapter->vlans_added--;
856 else
857 adapter->vlan_tag[vid] = 1;
858ret:
859 return status;
6b7c5b94
SP
860}
861
a54769f5 862static void be_set_rx_mode(struct net_device *netdev)
6b7c5b94
SP
863{
864 struct be_adapter *adapter = netdev_priv(netdev);
6b7c5b94 865
24307eef 866 if (netdev->flags & IFF_PROMISC) {
5b8821b7 867 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
24307eef
SP
868 adapter->promiscuous = true;
869 goto done;
6b7c5b94
SP
870 }
871
25985edc 872 /* BE was previously in promiscuous mode; disable it */
24307eef
SP
873 if (adapter->promiscuous) {
874 adapter->promiscuous = false;
5b8821b7 875 be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
c0e64ef4
SP
876
877 if (adapter->vlans_added)
878 be_vid_config(adapter, false, 0);
6b7c5b94
SP
879 }
880
e7b909a6 881 /* Enable multicast promisc if num configured exceeds what we support */
4cd24eaf 882 if (netdev->flags & IFF_ALLMULTI ||
5b8821b7
SP
883 netdev_mc_count(netdev) > BE_MAX_MC) {
884 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
24307eef 885 goto done;
6b7c5b94 886 }
6b7c5b94 887
fbc13f01
AK
888 if (netdev_uc_count(netdev) != adapter->uc_macs) {
889 struct netdev_hw_addr *ha;
890 int i = 1; /* First slot is claimed by the Primary MAC */
891
892 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++) {
893 be_cmd_pmac_del(adapter, adapter->if_handle,
894 adapter->pmac_id[i], 0);
895 }
896
897 if (netdev_uc_count(netdev) > adapter->max_pmac_cnt) {
898 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
899 adapter->promiscuous = true;
900 goto done;
901 }
902
903 netdev_for_each_uc_addr(ha, adapter->netdev) {
904 adapter->uc_macs++; /* First slot is for Primary MAC */
905 be_cmd_pmac_add(adapter, (u8 *)ha->addr,
906 adapter->if_handle,
907 &adapter->pmac_id[adapter->uc_macs], 0);
908 }
909 }
910
5b8821b7 911 be_cmd_rx_filter(adapter, IFF_MULTICAST, ON);
24307eef
SP
912done:
913 return;
6b7c5b94
SP
914}
915
ba343c77
SB
916static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
917{
918 struct be_adapter *adapter = netdev_priv(netdev);
11ac75ed 919 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
ba343c77
SB
920 int status;
921
11ac75ed 922 if (!sriov_enabled(adapter))
ba343c77
SB
923 return -EPERM;
924
11ac75ed 925 if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
ba343c77
SB
926 return -EINVAL;
927
590c391d
PR
928 if (lancer_chip(adapter)) {
929 status = be_cmd_set_mac_list(adapter, mac, 1, vf + 1);
930 } else {
11ac75ed
SP
931 status = be_cmd_pmac_del(adapter, vf_cfg->if_handle,
932 vf_cfg->pmac_id, vf + 1);
ba343c77 933
11ac75ed
SP
934 status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
935 &vf_cfg->pmac_id, vf + 1);
590c391d
PR
936 }
937
64600ea5 938 if (status)
ba343c77
SB
939 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
940 mac, vf);
64600ea5 941 else
11ac75ed 942 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
64600ea5 943
ba343c77
SB
944 return status;
945}
946
64600ea5
AK
947static int be_get_vf_config(struct net_device *netdev, int vf,
948 struct ifla_vf_info *vi)
949{
950 struct be_adapter *adapter = netdev_priv(netdev);
11ac75ed 951 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
64600ea5 952
11ac75ed 953 if (!sriov_enabled(adapter))
64600ea5
AK
954 return -EPERM;
955
11ac75ed 956 if (vf >= adapter->num_vfs)
64600ea5
AK
957 return -EINVAL;
958
959 vi->vf = vf;
11ac75ed
SP
960 vi->tx_rate = vf_cfg->tx_rate;
961 vi->vlan = vf_cfg->vlan_tag;
64600ea5 962 vi->qos = 0;
11ac75ed 963 memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
64600ea5
AK
964
965 return 0;
966}
967
1da87b7f
AK
968static int be_set_vf_vlan(struct net_device *netdev,
969 int vf, u16 vlan, u8 qos)
970{
971 struct be_adapter *adapter = netdev_priv(netdev);
972 int status = 0;
973
11ac75ed 974 if (!sriov_enabled(adapter))
1da87b7f
AK
975 return -EPERM;
976
11ac75ed 977 if (vf >= adapter->num_vfs || vlan > 4095)
1da87b7f
AK
978 return -EINVAL;
979
980 if (vlan) {
f1f3ee1b
AK
981 if (adapter->vf_cfg[vf].vlan_tag != vlan) {
982 /* If this is new value, program it. Else skip. */
983 adapter->vf_cfg[vf].vlan_tag = vlan;
984
985 status = be_cmd_set_hsw_config(adapter, vlan,
986 vf + 1, adapter->vf_cfg[vf].if_handle);
987 }
1da87b7f 988 } else {
f1f3ee1b 989 /* Reset Transparent Vlan Tagging. */
11ac75ed 990 adapter->vf_cfg[vf].vlan_tag = 0;
f1f3ee1b
AK
991 vlan = adapter->vf_cfg[vf].def_vid;
992 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
993 adapter->vf_cfg[vf].if_handle);
1da87b7f
AK
994 }
995
1da87b7f
AK
996
997 if (status)
998 dev_info(&adapter->pdev->dev,
999 "VLAN %d config on VF %d failed\n", vlan, vf);
1000 return status;
1001}
1002
e1d18735
AK
1003static int be_set_vf_tx_rate(struct net_device *netdev,
1004 int vf, int rate)
1005{
1006 struct be_adapter *adapter = netdev_priv(netdev);
1007 int status = 0;
1008
11ac75ed 1009 if (!sriov_enabled(adapter))
e1d18735
AK
1010 return -EPERM;
1011
94f434c2 1012 if (vf >= adapter->num_vfs)
e1d18735
AK
1013 return -EINVAL;
1014
94f434c2
AK
1015 if (rate < 100 || rate > 10000) {
1016 dev_err(&adapter->pdev->dev,
1017 "tx rate must be between 100 and 10000 Mbps\n");
1018 return -EINVAL;
1019 }
e1d18735 1020
856c4012 1021 status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
e1d18735
AK
1022
1023 if (status)
94f434c2 1024 dev_err(&adapter->pdev->dev,
e1d18735 1025 "tx rate %d on VF %d failed\n", rate, vf);
94f434c2
AK
1026 else
1027 adapter->vf_cfg[vf].tx_rate = rate;
e1d18735
AK
1028 return status;
1029}
1030
10ef9ab4 1031static void be_eqd_update(struct be_adapter *adapter, struct be_eq_obj *eqo)
6b7c5b94 1032{
10ef9ab4 1033 struct be_rx_stats *stats = rx_stats(&adapter->rx_obj[eqo->idx]);
4097f663 1034 ulong now = jiffies;
ac124ff9 1035 ulong delta = now - stats->rx_jiffies;
ab1594e9
SP
1036 u64 pkts;
1037 unsigned int start, eqd;
ac124ff9 1038
10ef9ab4
SP
1039 if (!eqo->enable_aic) {
1040 eqd = eqo->eqd;
1041 goto modify_eqd;
1042 }
1043
1044 if (eqo->idx >= adapter->num_rx_qs)
ac124ff9 1045 return;
6b7c5b94 1046
10ef9ab4
SP
1047 stats = rx_stats(&adapter->rx_obj[eqo->idx]);
1048
4097f663 1049 /* Wrapped around */
3abcdeda
SP
1050 if (time_before(now, stats->rx_jiffies)) {
1051 stats->rx_jiffies = now;
4097f663
SP
1052 return;
1053 }
6b7c5b94 1054
ac124ff9
SP
1055 /* Update once a second */
1056 if (delta < HZ)
6b7c5b94
SP
1057 return;
1058
ab1594e9
SP
1059 do {
1060 start = u64_stats_fetch_begin_bh(&stats->sync);
1061 pkts = stats->rx_pkts;
1062 } while (u64_stats_fetch_retry_bh(&stats->sync, start));
1063
68c3e5a7 1064 stats->rx_pps = (unsigned long)(pkts - stats->rx_pkts_prev) / (delta / HZ);
ab1594e9 1065 stats->rx_pkts_prev = pkts;
3abcdeda 1066 stats->rx_jiffies = now;
10ef9ab4
SP
1067 eqd = (stats->rx_pps / 110000) << 3;
1068 eqd = min(eqd, eqo->max_eqd);
1069 eqd = max(eqd, eqo->min_eqd);
ac124ff9
SP
1070 if (eqd < 10)
1071 eqd = 0;
10ef9ab4
SP
1072
1073modify_eqd:
1074 if (eqd != eqo->cur_eqd) {
1075 be_cmd_modify_eqd(adapter, eqo->q.id, eqd);
1076 eqo->cur_eqd = eqd;
ac124ff9 1077 }
6b7c5b94
SP
1078}
1079
3abcdeda 1080static void be_rx_stats_update(struct be_rx_obj *rxo,
2e588f84 1081 struct be_rx_compl_info *rxcp)
4097f663 1082{
ac124ff9 1083 struct be_rx_stats *stats = rx_stats(rxo);
1ef78abe 1084
ab1594e9 1085 u64_stats_update_begin(&stats->sync);
3abcdeda 1086 stats->rx_compl++;
2e588f84 1087 stats->rx_bytes += rxcp->pkt_size;
3abcdeda 1088 stats->rx_pkts++;
2e588f84 1089 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
3abcdeda 1090 stats->rx_mcast_pkts++;
2e588f84 1091 if (rxcp->err)
ac124ff9 1092 stats->rx_compl_err++;
ab1594e9 1093 u64_stats_update_end(&stats->sync);
4097f663
SP
1094}
1095
2e588f84 1096static inline bool csum_passed(struct be_rx_compl_info *rxcp)
728a9972 1097{
19fad86f
PR
1098 /* L4 checksum is not reliable for non TCP/UDP packets.
1099 * Also ignore ipcksm for ipv6 pkts */
2e588f84
SP
1100 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
1101 (rxcp->ip_csum || rxcp->ipv6);
728a9972
AK
1102}
1103
10ef9ab4
SP
1104static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo,
1105 u16 frag_idx)
6b7c5b94 1106{
10ef9ab4 1107 struct be_adapter *adapter = rxo->adapter;
6b7c5b94 1108 struct be_rx_page_info *rx_page_info;
3abcdeda 1109 struct be_queue_info *rxq = &rxo->q;
6b7c5b94 1110
3abcdeda 1111 rx_page_info = &rxo->page_info_tbl[frag_idx];
6b7c5b94
SP
1112 BUG_ON(!rx_page_info->page);
1113
205859a2 1114 if (rx_page_info->last_page_user) {
2b7bcebf
IV
1115 dma_unmap_page(&adapter->pdev->dev,
1116 dma_unmap_addr(rx_page_info, bus),
1117 adapter->big_page_size, DMA_FROM_DEVICE);
205859a2
AK
1118 rx_page_info->last_page_user = false;
1119 }
6b7c5b94
SP
1120
1121 atomic_dec(&rxq->used);
1122 return rx_page_info;
1123}
1124
1125/* Throwaway the data in the Rx completion */
10ef9ab4
SP
1126static void be_rx_compl_discard(struct be_rx_obj *rxo,
1127 struct be_rx_compl_info *rxcp)
6b7c5b94 1128{
3abcdeda 1129 struct be_queue_info *rxq = &rxo->q;
6b7c5b94 1130 struct be_rx_page_info *page_info;
2e588f84 1131 u16 i, num_rcvd = rxcp->num_rcvd;
6b7c5b94 1132
e80d9da6 1133 for (i = 0; i < num_rcvd; i++) {
10ef9ab4 1134 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
e80d9da6
PR
1135 put_page(page_info->page);
1136 memset(page_info, 0, sizeof(*page_info));
2e588f84 1137 index_inc(&rxcp->rxq_idx, rxq->len);
6b7c5b94
SP
1138 }
1139}
1140
1141/*
1142 * skb_fill_rx_data forms a complete skb for an ether frame
1143 * indicated by rxcp.
1144 */
10ef9ab4
SP
1145static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
1146 struct be_rx_compl_info *rxcp)
6b7c5b94 1147{
3abcdeda 1148 struct be_queue_info *rxq = &rxo->q;
6b7c5b94 1149 struct be_rx_page_info *page_info;
2e588f84
SP
1150 u16 i, j;
1151 u16 hdr_len, curr_frag_len, remaining;
6b7c5b94 1152 u8 *start;
6b7c5b94 1153
10ef9ab4 1154 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
6b7c5b94
SP
1155 start = page_address(page_info->page) + page_info->page_offset;
1156 prefetch(start);
1157
1158 /* Copy data in the first descriptor of this completion */
2e588f84 1159 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
6b7c5b94
SP
1160
1161 /* Copy the header portion into skb_data */
2e588f84 1162 hdr_len = min(BE_HDR_LEN, curr_frag_len);
6b7c5b94
SP
1163 memcpy(skb->data, start, hdr_len);
1164 skb->len = curr_frag_len;
1165 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
1166 /* Complete packet has now been moved to data */
1167 put_page(page_info->page);
1168 skb->data_len = 0;
1169 skb->tail += curr_frag_len;
1170 } else {
1171 skb_shinfo(skb)->nr_frags = 1;
b061b39e 1172 skb_frag_set_page(skb, 0, page_info->page);
6b7c5b94
SP
1173 skb_shinfo(skb)->frags[0].page_offset =
1174 page_info->page_offset + hdr_len;
9e903e08 1175 skb_frag_size_set(&skb_shinfo(skb)->frags[0], curr_frag_len - hdr_len);
6b7c5b94 1176 skb->data_len = curr_frag_len - hdr_len;
bdb28a97 1177 skb->truesize += rx_frag_size;
6b7c5b94
SP
1178 skb->tail += hdr_len;
1179 }
205859a2 1180 page_info->page = NULL;
6b7c5b94 1181
2e588f84
SP
1182 if (rxcp->pkt_size <= rx_frag_size) {
1183 BUG_ON(rxcp->num_rcvd != 1);
1184 return;
6b7c5b94
SP
1185 }
1186
1187 /* More frags present for this completion */
2e588f84
SP
1188 index_inc(&rxcp->rxq_idx, rxq->len);
1189 remaining = rxcp->pkt_size - curr_frag_len;
1190 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
10ef9ab4 1191 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
2e588f84 1192 curr_frag_len = min(remaining, rx_frag_size);
6b7c5b94 1193
bd46cb6c
AK
1194 /* Coalesce all frags from the same physical page in one slot */
1195 if (page_info->page_offset == 0) {
1196 /* Fresh page */
1197 j++;
b061b39e 1198 skb_frag_set_page(skb, j, page_info->page);
bd46cb6c
AK
1199 skb_shinfo(skb)->frags[j].page_offset =
1200 page_info->page_offset;
9e903e08 1201 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
bd46cb6c
AK
1202 skb_shinfo(skb)->nr_frags++;
1203 } else {
1204 put_page(page_info->page);
1205 }
1206
9e903e08 1207 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
6b7c5b94
SP
1208 skb->len += curr_frag_len;
1209 skb->data_len += curr_frag_len;
bdb28a97 1210 skb->truesize += rx_frag_size;
2e588f84
SP
1211 remaining -= curr_frag_len;
1212 index_inc(&rxcp->rxq_idx, rxq->len);
205859a2 1213 page_info->page = NULL;
6b7c5b94 1214 }
bd46cb6c 1215 BUG_ON(j > MAX_SKB_FRAGS);
6b7c5b94
SP
1216}
1217
5be93b9a 1218/* Process the RX completion indicated by rxcp when GRO is disabled */
10ef9ab4
SP
1219static void be_rx_compl_process(struct be_rx_obj *rxo,
1220 struct be_rx_compl_info *rxcp)
6b7c5b94 1221{
10ef9ab4 1222 struct be_adapter *adapter = rxo->adapter;
6332c8d3 1223 struct net_device *netdev = adapter->netdev;
6b7c5b94 1224 struct sk_buff *skb;
89420424 1225
bb349bb4 1226 skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE);
a058a632 1227 if (unlikely(!skb)) {
ac124ff9 1228 rx_stats(rxo)->rx_drops_no_skbs++;
10ef9ab4 1229 be_rx_compl_discard(rxo, rxcp);
6b7c5b94
SP
1230 return;
1231 }
1232
10ef9ab4 1233 skb_fill_rx_data(rxo, skb, rxcp);
6b7c5b94 1234
6332c8d3 1235 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
728a9972 1236 skb->ip_summed = CHECKSUM_UNNECESSARY;
c6ce2f4b
SK
1237 else
1238 skb_checksum_none_assert(skb);
6b7c5b94 1239
6332c8d3 1240 skb->protocol = eth_type_trans(skb, netdev);
10ef9ab4 1241 if (netdev->features & NETIF_F_RXHASH)
4b972914
AK
1242 skb->rxhash = rxcp->rss_hash;
1243
6b7c5b94 1244
343e43c0 1245 if (rxcp->vlanf)
4c5102f9
AK
1246 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1247
1248 netif_receive_skb(skb);
6b7c5b94
SP
1249}
1250
5be93b9a 1251/* Process the RX completion indicated by rxcp when GRO is enabled */
10ef9ab4
SP
1252void be_rx_compl_process_gro(struct be_rx_obj *rxo, struct napi_struct *napi,
1253 struct be_rx_compl_info *rxcp)
6b7c5b94 1254{
10ef9ab4 1255 struct be_adapter *adapter = rxo->adapter;
6b7c5b94 1256 struct be_rx_page_info *page_info;
5be93b9a 1257 struct sk_buff *skb = NULL;
3abcdeda 1258 struct be_queue_info *rxq = &rxo->q;
2e588f84
SP
1259 u16 remaining, curr_frag_len;
1260 u16 i, j;
3968fa1e 1261
10ef9ab4 1262 skb = napi_get_frags(napi);
5be93b9a 1263 if (!skb) {
10ef9ab4 1264 be_rx_compl_discard(rxo, rxcp);
5be93b9a
AK
1265 return;
1266 }
1267
2e588f84
SP
1268 remaining = rxcp->pkt_size;
1269 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
10ef9ab4 1270 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
6b7c5b94
SP
1271
1272 curr_frag_len = min(remaining, rx_frag_size);
1273
bd46cb6c
AK
1274 /* Coalesce all frags from the same physical page in one slot */
1275 if (i == 0 || page_info->page_offset == 0) {
1276 /* First frag or Fresh page */
1277 j++;
b061b39e 1278 skb_frag_set_page(skb, j, page_info->page);
5be93b9a
AK
1279 skb_shinfo(skb)->frags[j].page_offset =
1280 page_info->page_offset;
9e903e08 1281 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
bd46cb6c
AK
1282 } else {
1283 put_page(page_info->page);
1284 }
9e903e08 1285 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
bdb28a97 1286 skb->truesize += rx_frag_size;
bd46cb6c 1287 remaining -= curr_frag_len;
2e588f84 1288 index_inc(&rxcp->rxq_idx, rxq->len);
6b7c5b94
SP
1289 memset(page_info, 0, sizeof(*page_info));
1290 }
bd46cb6c 1291 BUG_ON(j > MAX_SKB_FRAGS);
6b7c5b94 1292
5be93b9a 1293 skb_shinfo(skb)->nr_frags = j + 1;
2e588f84
SP
1294 skb->len = rxcp->pkt_size;
1295 skb->data_len = rxcp->pkt_size;
5be93b9a 1296 skb->ip_summed = CHECKSUM_UNNECESSARY;
4b972914
AK
1297 if (adapter->netdev->features & NETIF_F_RXHASH)
1298 skb->rxhash = rxcp->rss_hash;
5be93b9a 1299
343e43c0 1300 if (rxcp->vlanf)
4c5102f9
AK
1301 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1302
10ef9ab4 1303 napi_gro_frags(napi);
2e588f84
SP
1304}
1305
10ef9ab4
SP
1306static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
1307 struct be_rx_compl_info *rxcp)
2e588f84
SP
1308{
1309 rxcp->pkt_size =
1310 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1311 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1312 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1313 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
9ecb42fd 1314 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
2e588f84
SP
1315 rxcp->ip_csum =
1316 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1317 rxcp->l4_csum =
1318 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1319 rxcp->ipv6 =
1320 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
1321 rxcp->rxq_idx =
1322 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, fragndx, compl);
1323 rxcp->num_rcvd =
1324 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1325 rxcp->pkt_type =
1326 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
4b972914
AK
1327 rxcp->rss_hash =
1328 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, rxcp);
15d72184
SP
1329 if (rxcp->vlanf) {
1330 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm,
3c709f8f
DM
1331 compl);
1332 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
1333 compl);
15d72184 1334 }
12004ae9 1335 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, port, compl);
2e588f84
SP
1336}
1337
10ef9ab4
SP
1338static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
1339 struct be_rx_compl_info *rxcp)
2e588f84
SP
1340{
1341 rxcp->pkt_size =
1342 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1343 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1344 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1345 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
9ecb42fd 1346 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
2e588f84
SP
1347 rxcp->ip_csum =
1348 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1349 rxcp->l4_csum =
1350 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1351 rxcp->ipv6 =
1352 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
1353 rxcp->rxq_idx =
1354 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, fragndx, compl);
1355 rxcp->num_rcvd =
1356 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1357 rxcp->pkt_type =
1358 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
4b972914
AK
1359 rxcp->rss_hash =
1360 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, rxcp);
15d72184
SP
1361 if (rxcp->vlanf) {
1362 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm,
3c709f8f
DM
1363 compl);
1364 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
1365 compl);
15d72184 1366 }
12004ae9 1367 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, port, compl);
2e588f84
SP
1368}
1369
1370static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1371{
1372 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1373 struct be_rx_compl_info *rxcp = &rxo->rxcp;
1374 struct be_adapter *adapter = rxo->adapter;
6b7c5b94 1375
2e588f84
SP
1376 /* For checking the valid bit it is Ok to use either definition as the
1377 * valid bit is at the same position in both v0 and v1 Rx compl */
1378 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
1379 return NULL;
6b7c5b94 1380
2e588f84
SP
1381 rmb();
1382 be_dws_le_to_cpu(compl, sizeof(*compl));
6b7c5b94 1383
2e588f84 1384 if (adapter->be3_native)
10ef9ab4 1385 be_parse_rx_compl_v1(compl, rxcp);
2e588f84 1386 else
10ef9ab4 1387 be_parse_rx_compl_v0(compl, rxcp);
6b7c5b94 1388
15d72184
SP
1389 if (rxcp->vlanf) {
1390 /* vlanf could be wrongly set in some cards.
1391 * ignore if vtm is not set */
752961a1 1392 if ((adapter->function_mode & FLEX10_MODE) && !rxcp->vtm)
15d72184 1393 rxcp->vlanf = 0;
6b7c5b94 1394
15d72184 1395 if (!lancer_chip(adapter))
3c709f8f 1396 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
6b7c5b94 1397
939cf306 1398 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
3c709f8f 1399 !adapter->vlan_tag[rxcp->vlan_tag])
15d72184
SP
1400 rxcp->vlanf = 0;
1401 }
2e588f84
SP
1402
1403 /* As the compl has been parsed, reset it; we wont touch it again */
1404 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
6b7c5b94 1405
3abcdeda 1406 queue_tail_inc(&rxo->cq);
6b7c5b94
SP
1407 return rxcp;
1408}
1409
1829b086 1410static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
6b7c5b94 1411{
6b7c5b94 1412 u32 order = get_order(size);
1829b086 1413
6b7c5b94 1414 if (order > 0)
1829b086
ED
1415 gfp |= __GFP_COMP;
1416 return alloc_pages(gfp, order);
6b7c5b94
SP
1417}
1418
1419/*
1420 * Allocate a page, split it to fragments of size rx_frag_size and post as
1421 * receive buffers to BE
1422 */
1829b086 1423static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
6b7c5b94 1424{
3abcdeda 1425 struct be_adapter *adapter = rxo->adapter;
26d92f92 1426 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
3abcdeda 1427 struct be_queue_info *rxq = &rxo->q;
6b7c5b94
SP
1428 struct page *pagep = NULL;
1429 struct be_eth_rx_d *rxd;
1430 u64 page_dmaaddr = 0, frag_dmaaddr;
1431 u32 posted, page_offset = 0;
1432
3abcdeda 1433 page_info = &rxo->page_info_tbl[rxq->head];
6b7c5b94
SP
1434 for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1435 if (!pagep) {
1829b086 1436 pagep = be_alloc_pages(adapter->big_page_size, gfp);
6b7c5b94 1437 if (unlikely(!pagep)) {
ac124ff9 1438 rx_stats(rxo)->rx_post_fail++;
6b7c5b94
SP
1439 break;
1440 }
2b7bcebf
IV
1441 page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
1442 0, adapter->big_page_size,
1443 DMA_FROM_DEVICE);
6b7c5b94
SP
1444 page_info->page_offset = 0;
1445 } else {
1446 get_page(pagep);
1447 page_info->page_offset = page_offset + rx_frag_size;
1448 }
1449 page_offset = page_info->page_offset;
1450 page_info->page = pagep;
fac6da5b 1451 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
6b7c5b94
SP
1452 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1453
1454 rxd = queue_head_node(rxq);
1455 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1456 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
6b7c5b94
SP
1457
1458 /* Any space left in the current big page for another frag? */
1459 if ((page_offset + rx_frag_size + rx_frag_size) >
1460 adapter->big_page_size) {
1461 pagep = NULL;
1462 page_info->last_page_user = true;
1463 }
26d92f92
SP
1464
1465 prev_page_info = page_info;
1466 queue_head_inc(rxq);
10ef9ab4 1467 page_info = &rxo->page_info_tbl[rxq->head];
6b7c5b94
SP
1468 }
1469 if (pagep)
26d92f92 1470 prev_page_info->last_page_user = true;
6b7c5b94
SP
1471
1472 if (posted) {
6b7c5b94 1473 atomic_add(posted, &rxq->used);
8788fdc2 1474 be_rxq_notify(adapter, rxq->id, posted);
ea1dae11
SP
1475 } else if (atomic_read(&rxq->used) == 0) {
1476 /* Let be_worker replenish when memory is available */
3abcdeda 1477 rxo->rx_post_starved = true;
6b7c5b94 1478 }
6b7c5b94
SP
1479}
1480
5fb379ee 1481static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
6b7c5b94 1482{
6b7c5b94
SP
1483 struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1484
1485 if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1486 return NULL;
1487
f3eb62d2 1488 rmb();
6b7c5b94
SP
1489 be_dws_le_to_cpu(txcp, sizeof(*txcp));
1490
1491 txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1492
1493 queue_tail_inc(tx_cq);
1494 return txcp;
1495}
1496
3c8def97
SP
1497static u16 be_tx_compl_process(struct be_adapter *adapter,
1498 struct be_tx_obj *txo, u16 last_index)
6b7c5b94 1499{
3c8def97 1500 struct be_queue_info *txq = &txo->q;
a73b796e 1501 struct be_eth_wrb *wrb;
3c8def97 1502 struct sk_buff **sent_skbs = txo->sent_skb_list;
6b7c5b94 1503 struct sk_buff *sent_skb;
ec43b1a6
SP
1504 u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1505 bool unmap_skb_hdr = true;
6b7c5b94 1506
ec43b1a6 1507 sent_skb = sent_skbs[txq->tail];
6b7c5b94 1508 BUG_ON(!sent_skb);
ec43b1a6
SP
1509 sent_skbs[txq->tail] = NULL;
1510
1511 /* skip header wrb */
a73b796e 1512 queue_tail_inc(txq);
6b7c5b94 1513
ec43b1a6 1514 do {
6b7c5b94 1515 cur_index = txq->tail;
a73b796e 1516 wrb = queue_tail_node(txq);
2b7bcebf
IV
1517 unmap_tx_frag(&adapter->pdev->dev, wrb,
1518 (unmap_skb_hdr && skb_headlen(sent_skb)));
ec43b1a6
SP
1519 unmap_skb_hdr = false;
1520
6b7c5b94
SP
1521 num_wrbs++;
1522 queue_tail_inc(txq);
ec43b1a6 1523 } while (cur_index != last_index);
6b7c5b94 1524
6b7c5b94 1525 kfree_skb(sent_skb);
4d586b82 1526 return num_wrbs;
6b7c5b94
SP
1527}
1528
10ef9ab4
SP
1529/* Return the number of events in the event queue */
1530static inline int events_get(struct be_eq_obj *eqo)
859b1e4e 1531{
10ef9ab4
SP
1532 struct be_eq_entry *eqe;
1533 int num = 0;
859b1e4e 1534
10ef9ab4
SP
1535 do {
1536 eqe = queue_tail_node(&eqo->q);
1537 if (eqe->evt == 0)
1538 break;
859b1e4e 1539
10ef9ab4
SP
1540 rmb();
1541 eqe->evt = 0;
1542 num++;
1543 queue_tail_inc(&eqo->q);
1544 } while (true);
1545
1546 return num;
859b1e4e
SP
1547}
1548
10ef9ab4 1549static int event_handle(struct be_eq_obj *eqo)
859b1e4e 1550{
10ef9ab4
SP
1551 bool rearm = false;
1552 int num = events_get(eqo);
859b1e4e 1553
10ef9ab4 1554 /* Deal with any spurious interrupts that come without events */
3c8def97
SP
1555 if (!num)
1556 rearm = true;
1557
10ef9ab4 1558 be_eq_notify(eqo->adapter, eqo->q.id, rearm, true, num);
859b1e4e 1559 if (num)
10ef9ab4 1560 napi_schedule(&eqo->napi);
859b1e4e
SP
1561
1562 return num;
1563}
1564
10ef9ab4
SP
1565/* Leaves the EQ is disarmed state */
1566static void be_eq_clean(struct be_eq_obj *eqo)
859b1e4e 1567{
10ef9ab4 1568 int num = events_get(eqo);
859b1e4e 1569
10ef9ab4 1570 be_eq_notify(eqo->adapter, eqo->q.id, false, true, num);
859b1e4e
SP
1571}
1572
10ef9ab4 1573static void be_rx_cq_clean(struct be_rx_obj *rxo)
6b7c5b94
SP
1574{
1575 struct be_rx_page_info *page_info;
3abcdeda
SP
1576 struct be_queue_info *rxq = &rxo->q;
1577 struct be_queue_info *rx_cq = &rxo->cq;
2e588f84 1578 struct be_rx_compl_info *rxcp;
6b7c5b94
SP
1579 u16 tail;
1580
1581 /* First cleanup pending rx completions */
3abcdeda 1582 while ((rxcp = be_rx_compl_get(rxo)) != NULL) {
10ef9ab4
SP
1583 be_rx_compl_discard(rxo, rxcp);
1584 be_cq_notify(rxo->adapter, rx_cq->id, false, 1);
6b7c5b94
SP
1585 }
1586
1587 /* Then free posted rx buffer that were not used */
1588 tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
cdab23b7 1589 for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
10ef9ab4 1590 page_info = get_rx_page_info(rxo, tail);
6b7c5b94
SP
1591 put_page(page_info->page);
1592 memset(page_info, 0, sizeof(*page_info));
1593 }
1594 BUG_ON(atomic_read(&rxq->used));
482c9e79 1595 rxq->tail = rxq->head = 0;
6b7c5b94
SP
1596}
1597
0ae57bb3 1598static void be_tx_compl_clean(struct be_adapter *adapter)
6b7c5b94 1599{
0ae57bb3
SP
1600 struct be_tx_obj *txo;
1601 struct be_queue_info *txq;
a8e9179a 1602 struct be_eth_tx_compl *txcp;
4d586b82 1603 u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
b03388d6
SP
1604 struct sk_buff *sent_skb;
1605 bool dummy_wrb;
0ae57bb3 1606 int i, pending_txqs;
a8e9179a
SP
1607
1608 /* Wait for a max of 200ms for all the tx-completions to arrive. */
1609 do {
0ae57bb3
SP
1610 pending_txqs = adapter->num_tx_qs;
1611
1612 for_all_tx_queues(adapter, txo, i) {
1613 txq = &txo->q;
1614 while ((txcp = be_tx_compl_get(&txo->cq))) {
1615 end_idx =
1616 AMAP_GET_BITS(struct amap_eth_tx_compl,
1617 wrb_index, txcp);
1618 num_wrbs += be_tx_compl_process(adapter, txo,
1619 end_idx);
1620 cmpl++;
1621 }
1622 if (cmpl) {
1623 be_cq_notify(adapter, txo->cq.id, false, cmpl);
1624 atomic_sub(num_wrbs, &txq->used);
1625 cmpl = 0;
1626 num_wrbs = 0;
1627 }
1628 if (atomic_read(&txq->used) == 0)
1629 pending_txqs--;
a8e9179a
SP
1630 }
1631
0ae57bb3 1632 if (pending_txqs == 0 || ++timeo > 200)
a8e9179a
SP
1633 break;
1634
1635 mdelay(1);
1636 } while (true);
1637
0ae57bb3
SP
1638 for_all_tx_queues(adapter, txo, i) {
1639 txq = &txo->q;
1640 if (atomic_read(&txq->used))
1641 dev_err(&adapter->pdev->dev, "%d pending tx-compls\n",
1642 atomic_read(&txq->used));
1643
1644 /* free posted tx for which compls will never arrive */
1645 while (atomic_read(&txq->used)) {
1646 sent_skb = txo->sent_skb_list[txq->tail];
1647 end_idx = txq->tail;
1648 num_wrbs = wrb_cnt_for_skb(adapter, sent_skb,
1649 &dummy_wrb);
1650 index_adv(&end_idx, num_wrbs - 1, txq->len);
1651 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
1652 atomic_sub(num_wrbs, &txq->used);
1653 }
b03388d6 1654 }
6b7c5b94
SP
1655}
1656
10ef9ab4
SP
1657static void be_evt_queues_destroy(struct be_adapter *adapter)
1658{
1659 struct be_eq_obj *eqo;
1660 int i;
1661
1662 for_all_evt_queues(adapter, eqo, i) {
1663 be_eq_clean(eqo);
1664 if (eqo->q.created)
1665 be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
1666 be_queue_free(adapter, &eqo->q);
1667 }
1668}
1669
1670static int be_evt_queues_create(struct be_adapter *adapter)
1671{
1672 struct be_queue_info *eq;
1673 struct be_eq_obj *eqo;
1674 int i, rc;
1675
1676 adapter->num_evt_qs = num_irqs(adapter);
1677
1678 for_all_evt_queues(adapter, eqo, i) {
1679 eqo->adapter = adapter;
1680 eqo->tx_budget = BE_TX_BUDGET;
1681 eqo->idx = i;
1682 eqo->max_eqd = BE_MAX_EQD;
1683 eqo->enable_aic = true;
1684
1685 eq = &eqo->q;
1686 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1687 sizeof(struct be_eq_entry));
1688 if (rc)
1689 return rc;
1690
1691 rc = be_cmd_eq_create(adapter, eq, eqo->cur_eqd);
1692 if (rc)
1693 return rc;
1694 }
1cfafab9 1695 return 0;
10ef9ab4
SP
1696}
1697
5fb379ee
SP
1698static void be_mcc_queues_destroy(struct be_adapter *adapter)
1699{
1700 struct be_queue_info *q;
5fb379ee 1701
8788fdc2 1702 q = &adapter->mcc_obj.q;
5fb379ee 1703 if (q->created)
8788fdc2 1704 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
5fb379ee
SP
1705 be_queue_free(adapter, q);
1706
8788fdc2 1707 q = &adapter->mcc_obj.cq;
5fb379ee 1708 if (q->created)
8788fdc2 1709 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
5fb379ee
SP
1710 be_queue_free(adapter, q);
1711}
1712
1713/* Must be called only after TX qs are created as MCC shares TX EQ */
1714static int be_mcc_queues_create(struct be_adapter *adapter)
1715{
1716 struct be_queue_info *q, *cq;
5fb379ee 1717
8788fdc2 1718 cq = &adapter->mcc_obj.cq;
5fb379ee 1719 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
efd2e40a 1720 sizeof(struct be_mcc_compl)))
5fb379ee
SP
1721 goto err;
1722
10ef9ab4
SP
1723 /* Use the default EQ for MCC completions */
1724 if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0))
5fb379ee
SP
1725 goto mcc_cq_free;
1726
8788fdc2 1727 q = &adapter->mcc_obj.q;
5fb379ee
SP
1728 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
1729 goto mcc_cq_destroy;
1730
8788fdc2 1731 if (be_cmd_mccq_create(adapter, q, cq))
5fb379ee
SP
1732 goto mcc_q_free;
1733
1734 return 0;
1735
1736mcc_q_free:
1737 be_queue_free(adapter, q);
1738mcc_cq_destroy:
8788fdc2 1739 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
5fb379ee
SP
1740mcc_cq_free:
1741 be_queue_free(adapter, cq);
1742err:
1743 return -1;
1744}
1745
6b7c5b94
SP
1746static void be_tx_queues_destroy(struct be_adapter *adapter)
1747{
1748 struct be_queue_info *q;
3c8def97
SP
1749 struct be_tx_obj *txo;
1750 u8 i;
6b7c5b94 1751
3c8def97
SP
1752 for_all_tx_queues(adapter, txo, i) {
1753 q = &txo->q;
1754 if (q->created)
1755 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
1756 be_queue_free(adapter, q);
6b7c5b94 1757
3c8def97
SP
1758 q = &txo->cq;
1759 if (q->created)
1760 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1761 be_queue_free(adapter, q);
1762 }
6b7c5b94
SP
1763}
1764
dafc0fe3
SP
1765static int be_num_txqs_want(struct be_adapter *adapter)
1766{
11ac75ed 1767 if (sriov_enabled(adapter) || be_is_mc(adapter) ||
dafc0fe3
SP
1768 lancer_chip(adapter) || !be_physfn(adapter) ||
1769 adapter->generation == BE_GEN2)
1770 return 1;
1771 else
1772 return MAX_TX_QS;
1773}
1774
10ef9ab4 1775static int be_tx_cqs_create(struct be_adapter *adapter)
6b7c5b94 1776{
10ef9ab4
SP
1777 struct be_queue_info *cq, *eq;
1778 int status;
3c8def97
SP
1779 struct be_tx_obj *txo;
1780 u8 i;
6b7c5b94 1781
dafc0fe3 1782 adapter->num_tx_qs = be_num_txqs_want(adapter);
3bb62f4f
PR
1783 if (adapter->num_tx_qs != MAX_TX_QS) {
1784 rtnl_lock();
dafc0fe3
SP
1785 netif_set_real_num_tx_queues(adapter->netdev,
1786 adapter->num_tx_qs);
3bb62f4f
PR
1787 rtnl_unlock();
1788 }
dafc0fe3 1789
10ef9ab4
SP
1790 for_all_tx_queues(adapter, txo, i) {
1791 cq = &txo->cq;
1792 status = be_queue_alloc(adapter, cq, TX_CQ_LEN,
1793 sizeof(struct be_eth_tx_compl));
1794 if (status)
1795 return status;
3c8def97 1796
10ef9ab4
SP
1797 /* If num_evt_qs is less than num_tx_qs, then more than
1798 * one txq share an eq
1799 */
1800 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
1801 status = be_cmd_cq_create(adapter, cq, eq, false, 3);
1802 if (status)
1803 return status;
1804 }
1805 return 0;
1806}
6b7c5b94 1807
10ef9ab4
SP
1808static int be_tx_qs_create(struct be_adapter *adapter)
1809{
1810 struct be_tx_obj *txo;
1811 int i, status;
fe6d2a38 1812
3c8def97 1813 for_all_tx_queues(adapter, txo, i) {
10ef9ab4
SP
1814 status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
1815 sizeof(struct be_eth_wrb));
1816 if (status)
1817 return status;
6b7c5b94 1818
10ef9ab4
SP
1819 status = be_cmd_txq_create(adapter, &txo->q, &txo->cq);
1820 if (status)
1821 return status;
3c8def97 1822 }
6b7c5b94 1823
10ef9ab4 1824 return 0;
6b7c5b94
SP
1825}
1826
10ef9ab4 1827static void be_rx_cqs_destroy(struct be_adapter *adapter)
6b7c5b94
SP
1828{
1829 struct be_queue_info *q;
3abcdeda
SP
1830 struct be_rx_obj *rxo;
1831 int i;
1832
1833 for_all_rx_queues(adapter, rxo, i) {
3abcdeda
SP
1834 q = &rxo->cq;
1835 if (q->created)
1836 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1837 be_queue_free(adapter, q);
ac6a0c4a
SP
1838 }
1839}
1840
10ef9ab4 1841static int be_rx_cqs_create(struct be_adapter *adapter)
6b7c5b94 1842{
10ef9ab4 1843 struct be_queue_info *eq, *cq;
3abcdeda
SP
1844 struct be_rx_obj *rxo;
1845 int rc, i;
6b7c5b94 1846
10ef9ab4
SP
1847 /* We'll create as many RSS rings as there are irqs.
1848 * But when there's only one irq there's no use creating RSS rings
1849 */
1850 adapter->num_rx_qs = (num_irqs(adapter) > 1) ?
1851 num_irqs(adapter) + 1 : 1;
ac6a0c4a 1852
6b7c5b94 1853 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
3abcdeda
SP
1854 for_all_rx_queues(adapter, rxo, i) {
1855 rxo->adapter = adapter;
3abcdeda
SP
1856 cq = &rxo->cq;
1857 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
1858 sizeof(struct be_eth_rx_compl));
1859 if (rc)
10ef9ab4 1860 return rc;
3abcdeda 1861
10ef9ab4
SP
1862 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
1863 rc = be_cmd_cq_create(adapter, cq, eq, false, 3);
3abcdeda 1864 if (rc)
10ef9ab4 1865 return rc;
3abcdeda 1866 }
6b7c5b94 1867
10ef9ab4
SP
1868 if (adapter->num_rx_qs != MAX_RX_QS)
1869 dev_info(&adapter->pdev->dev,
1870 "Created only %d receive queues", adapter->num_rx_qs);
6b7c5b94 1871
10ef9ab4 1872 return 0;
b628bde2
SP
1873}
1874
6b7c5b94
SP
1875static irqreturn_t be_intx(int irq, void *dev)
1876{
1877 struct be_adapter *adapter = dev;
10ef9ab4 1878 int num_evts;
6b7c5b94 1879
10ef9ab4
SP
1880 /* With INTx only one EQ is used */
1881 num_evts = event_handle(&adapter->eq_obj[0]);
1882 if (num_evts)
1883 return IRQ_HANDLED;
1884 else
1885 return IRQ_NONE;
6b7c5b94
SP
1886}
1887
10ef9ab4 1888static irqreturn_t be_msix(int irq, void *dev)
6b7c5b94 1889{
10ef9ab4 1890 struct be_eq_obj *eqo = dev;
6b7c5b94 1891
10ef9ab4 1892 event_handle(eqo);
6b7c5b94
SP
1893 return IRQ_HANDLED;
1894}
1895
2e588f84 1896static inline bool do_gro(struct be_rx_compl_info *rxcp)
6b7c5b94 1897{
2e588f84 1898 return (rxcp->tcpf && !rxcp->err) ? true : false;
6b7c5b94
SP
1899}
1900
10ef9ab4
SP
1901static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
1902 int budget)
6b7c5b94 1903{
3abcdeda
SP
1904 struct be_adapter *adapter = rxo->adapter;
1905 struct be_queue_info *rx_cq = &rxo->cq;
2e588f84 1906 struct be_rx_compl_info *rxcp;
6b7c5b94
SP
1907 u32 work_done;
1908
1909 for (work_done = 0; work_done < budget; work_done++) {
3abcdeda 1910 rxcp = be_rx_compl_get(rxo);
6b7c5b94
SP
1911 if (!rxcp)
1912 break;
1913
12004ae9
SP
1914 /* Is it a flush compl that has no data */
1915 if (unlikely(rxcp->num_rcvd == 0))
1916 goto loop_continue;
1917
1918 /* Discard compl with partial DMA Lancer B0 */
1919 if (unlikely(!rxcp->pkt_size)) {
10ef9ab4 1920 be_rx_compl_discard(rxo, rxcp);
12004ae9
SP
1921 goto loop_continue;
1922 }
1923
1924 /* On BE drop pkts that arrive due to imperfect filtering in
1925 * promiscuous mode on some skews
1926 */
1927 if (unlikely(rxcp->port != adapter->port_num &&
1928 !lancer_chip(adapter))) {
10ef9ab4 1929 be_rx_compl_discard(rxo, rxcp);
12004ae9 1930 goto loop_continue;
64642811 1931 }
009dd872 1932
12004ae9 1933 if (do_gro(rxcp))
10ef9ab4 1934 be_rx_compl_process_gro(rxo, napi, rxcp);
12004ae9 1935 else
10ef9ab4 1936 be_rx_compl_process(rxo, rxcp);
12004ae9 1937loop_continue:
2e588f84 1938 be_rx_stats_update(rxo, rxcp);
6b7c5b94
SP
1939 }
1940
10ef9ab4
SP
1941 if (work_done) {
1942 be_cq_notify(adapter, rx_cq->id, true, work_done);
9372cacb 1943
10ef9ab4
SP
1944 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
1945 be_post_rx_frags(rxo, GFP_ATOMIC);
6b7c5b94 1946 }
10ef9ab4 1947
6b7c5b94
SP
1948 return work_done;
1949}
1950
10ef9ab4
SP
1951static bool be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
1952 int budget, int idx)
6b7c5b94 1953{
6b7c5b94 1954 struct be_eth_tx_compl *txcp;
10ef9ab4 1955 int num_wrbs = 0, work_done;
3c8def97 1956
10ef9ab4
SP
1957 for (work_done = 0; work_done < budget; work_done++) {
1958 txcp = be_tx_compl_get(&txo->cq);
1959 if (!txcp)
1960 break;
1961 num_wrbs += be_tx_compl_process(adapter, txo,
3c8def97
SP
1962 AMAP_GET_BITS(struct amap_eth_tx_compl,
1963 wrb_index, txcp));
10ef9ab4 1964 }
6b7c5b94 1965
10ef9ab4
SP
1966 if (work_done) {
1967 be_cq_notify(adapter, txo->cq.id, true, work_done);
1968 atomic_sub(num_wrbs, &txo->q.used);
3c8def97 1969
10ef9ab4
SP
1970 /* As Tx wrbs have been freed up, wake up netdev queue
1971 * if it was stopped due to lack of tx wrbs. */
1972 if (__netif_subqueue_stopped(adapter->netdev, idx) &&
1973 atomic_read(&txo->q.used) < txo->q.len / 2) {
1974 netif_wake_subqueue(adapter->netdev, idx);
3c8def97 1975 }
10ef9ab4
SP
1976
1977 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
1978 tx_stats(txo)->tx_compl += work_done;
1979 u64_stats_update_end(&tx_stats(txo)->sync_compl);
6b7c5b94 1980 }
10ef9ab4
SP
1981 return (work_done < budget); /* Done */
1982}
6b7c5b94 1983
10ef9ab4
SP
1984int be_poll(struct napi_struct *napi, int budget)
1985{
1986 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
1987 struct be_adapter *adapter = eqo->adapter;
1988 int max_work = 0, work, i;
1989 bool tx_done;
f31e50a8 1990
10ef9ab4
SP
1991 /* Process all TXQs serviced by this EQ */
1992 for (i = eqo->idx; i < adapter->num_tx_qs; i += adapter->num_evt_qs) {
1993 tx_done = be_process_tx(adapter, &adapter->tx_obj[i],
1994 eqo->tx_budget, i);
1995 if (!tx_done)
1996 max_work = budget;
f31e50a8
SP
1997 }
1998
10ef9ab4
SP
1999 /* This loop will iterate twice for EQ0 in which
2000 * completions of the last RXQ (default one) are also processed
2001 * For other EQs the loop iterates only once
2002 */
2003 for (i = eqo->idx; i < adapter->num_rx_qs; i += adapter->num_evt_qs) {
2004 work = be_process_rx(&adapter->rx_obj[i], napi, budget);
2005 max_work = max(work, max_work);
2006 }
6b7c5b94 2007
10ef9ab4
SP
2008 if (is_mcc_eqo(eqo))
2009 be_process_mcc(adapter);
93c86700 2010
10ef9ab4
SP
2011 if (max_work < budget) {
2012 napi_complete(napi);
2013 be_eq_notify(adapter, eqo->q.id, true, false, 0);
2014 } else {
2015 /* As we'll continue in polling mode, count and clear events */
2016 be_eq_notify(adapter, eqo->q.id, false, false, events_get(eqo));
93c86700 2017 }
10ef9ab4 2018 return max_work;
6b7c5b94
SP
2019}
2020
d053de91 2021void be_detect_dump_ue(struct be_adapter *adapter)
7c185276 2022{
e1cfb67a
PR
2023 u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
2024 u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
7c185276
AK
2025 u32 i;
2026
72f02485
SP
2027 if (adapter->eeh_err || adapter->ue_detected)
2028 return;
2029
e1cfb67a
PR
2030 if (lancer_chip(adapter)) {
2031 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2032 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2033 sliport_err1 = ioread32(adapter->db +
2034 SLIPORT_ERROR1_OFFSET);
2035 sliport_err2 = ioread32(adapter->db +
2036 SLIPORT_ERROR2_OFFSET);
2037 }
2038 } else {
2039 pci_read_config_dword(adapter->pdev,
2040 PCICFG_UE_STATUS_LOW, &ue_lo);
2041 pci_read_config_dword(adapter->pdev,
2042 PCICFG_UE_STATUS_HIGH, &ue_hi);
2043 pci_read_config_dword(adapter->pdev,
2044 PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask);
2045 pci_read_config_dword(adapter->pdev,
2046 PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
2047
2048 ue_lo = (ue_lo & (~ue_lo_mask));
2049 ue_hi = (ue_hi & (~ue_hi_mask));
2050 }
7c185276 2051
e1cfb67a
PR
2052 if (ue_lo || ue_hi ||
2053 sliport_status & SLIPORT_STATUS_ERR_MASK) {
d053de91 2054 adapter->ue_detected = true;
7acc2087 2055 adapter->eeh_err = true;
434b3648
SP
2056 dev_err(&adapter->pdev->dev,
2057 "Unrecoverable error in the card\n");
d053de91
AK
2058 }
2059
e1cfb67a
PR
2060 if (ue_lo) {
2061 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
2062 if (ue_lo & 1)
7c185276
AK
2063 dev_err(&adapter->pdev->dev,
2064 "UE: %s bit set\n", ue_status_low_desc[i]);
2065 }
2066 }
e1cfb67a
PR
2067 if (ue_hi) {
2068 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
2069 if (ue_hi & 1)
7c185276
AK
2070 dev_err(&adapter->pdev->dev,
2071 "UE: %s bit set\n", ue_status_hi_desc[i]);
2072 }
2073 }
2074
e1cfb67a
PR
2075 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2076 dev_err(&adapter->pdev->dev,
2077 "sliport status 0x%x\n", sliport_status);
2078 dev_err(&adapter->pdev->dev,
2079 "sliport error1 0x%x\n", sliport_err1);
2080 dev_err(&adapter->pdev->dev,
2081 "sliport error2 0x%x\n", sliport_err2);
2082 }
7c185276
AK
2083}
2084
8d56ff11
SP
2085static void be_msix_disable(struct be_adapter *adapter)
2086{
ac6a0c4a 2087 if (msix_enabled(adapter)) {
8d56ff11 2088 pci_disable_msix(adapter->pdev);
ac6a0c4a 2089 adapter->num_msix_vec = 0;
3abcdeda
SP
2090 }
2091}
2092
10ef9ab4
SP
2093static uint be_num_rss_want(struct be_adapter *adapter)
2094{
2095 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
2096 adapter->num_vfs == 0 && be_physfn(adapter) &&
2097 !be_is_mc(adapter))
2098 return (adapter->be3_native) ? BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
2099 else
2100 return 0;
2101}
2102
6b7c5b94
SP
2103static void be_msix_enable(struct be_adapter *adapter)
2104{
10ef9ab4 2105#define BE_MIN_MSIX_VECTORS 1
ac6a0c4a 2106 int i, status, num_vec;
6b7c5b94 2107
10ef9ab4
SP
2108 /* If RSS queues are not used, need a vec for default RX Q */
2109 num_vec = min(be_num_rss_want(adapter), num_online_cpus());
2110 num_vec = max(num_vec, BE_MIN_MSIX_VECTORS);
3abcdeda 2111
ac6a0c4a 2112 for (i = 0; i < num_vec; i++)
6b7c5b94
SP
2113 adapter->msix_entries[i].entry = i;
2114
ac6a0c4a 2115 status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
3abcdeda
SP
2116 if (status == 0) {
2117 goto done;
2118 } else if (status >= BE_MIN_MSIX_VECTORS) {
ac6a0c4a 2119 num_vec = status;
3abcdeda 2120 if (pci_enable_msix(adapter->pdev, adapter->msix_entries,
ac6a0c4a 2121 num_vec) == 0)
3abcdeda 2122 goto done;
3abcdeda
SP
2123 }
2124 return;
2125done:
ac6a0c4a
SP
2126 adapter->num_msix_vec = num_vec;
2127 return;
6b7c5b94
SP
2128}
2129
f9449ab7 2130static int be_sriov_enable(struct be_adapter *adapter)
ba343c77 2131{
344dbf10 2132 be_check_sriov_fn_type(adapter);
11ac75ed 2133
6dedec81 2134#ifdef CONFIG_PCI_IOV
ba343c77 2135 if (be_physfn(adapter) && num_vfs) {
81be8f0a 2136 int status, pos;
11ac75ed 2137 u16 dev_vfs;
81be8f0a
AK
2138
2139 pos = pci_find_ext_capability(adapter->pdev,
2140 PCI_EXT_CAP_ID_SRIOV);
2141 pci_read_config_word(adapter->pdev,
11ac75ed 2142 pos + PCI_SRIOV_TOTAL_VF, &dev_vfs);
81be8f0a 2143
11ac75ed
SP
2144 adapter->num_vfs = min_t(u16, num_vfs, dev_vfs);
2145 if (adapter->num_vfs != num_vfs)
81be8f0a 2146 dev_info(&adapter->pdev->dev,
11ac75ed
SP
2147 "Device supports %d VFs and not %d\n",
2148 adapter->num_vfs, num_vfs);
6dedec81 2149
11ac75ed
SP
2150 status = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
2151 if (status)
2152 adapter->num_vfs = 0;
f9449ab7 2153
11ac75ed 2154 if (adapter->num_vfs) {
f9449ab7
SP
2155 adapter->vf_cfg = kcalloc(num_vfs,
2156 sizeof(struct be_vf_cfg),
2157 GFP_KERNEL);
2158 if (!adapter->vf_cfg)
2159 return -ENOMEM;
2160 }
ba343c77
SB
2161 }
2162#endif
f9449ab7 2163 return 0;
ba343c77
SB
2164}
2165
2166static void be_sriov_disable(struct be_adapter *adapter)
2167{
2168#ifdef CONFIG_PCI_IOV
11ac75ed 2169 if (sriov_enabled(adapter)) {
ba343c77 2170 pci_disable_sriov(adapter->pdev);
f9449ab7 2171 kfree(adapter->vf_cfg);
11ac75ed 2172 adapter->num_vfs = 0;
ba343c77
SB
2173 }
2174#endif
2175}
2176
fe6d2a38 2177static inline int be_msix_vec_get(struct be_adapter *adapter,
10ef9ab4 2178 struct be_eq_obj *eqo)
b628bde2 2179{
10ef9ab4 2180 return adapter->msix_entries[eqo->idx].vector;
b628bde2 2181}
6b7c5b94 2182
b628bde2
SP
2183static int be_msix_register(struct be_adapter *adapter)
2184{
10ef9ab4
SP
2185 struct net_device *netdev = adapter->netdev;
2186 struct be_eq_obj *eqo;
2187 int status, i, vec;
6b7c5b94 2188
10ef9ab4
SP
2189 for_all_evt_queues(adapter, eqo, i) {
2190 sprintf(eqo->desc, "%s-q%d", netdev->name, i);
2191 vec = be_msix_vec_get(adapter, eqo);
2192 status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
3abcdeda
SP
2193 if (status)
2194 goto err_msix;
2195 }
b628bde2 2196
6b7c5b94 2197 return 0;
3abcdeda 2198err_msix:
10ef9ab4
SP
2199 for (i--, eqo = &adapter->eq_obj[i]; i >= 0; i--, eqo--)
2200 free_irq(be_msix_vec_get(adapter, eqo), eqo);
2201 dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
2202 status);
ac6a0c4a 2203 be_msix_disable(adapter);
6b7c5b94
SP
2204 return status;
2205}
2206
2207static int be_irq_register(struct be_adapter *adapter)
2208{
2209 struct net_device *netdev = adapter->netdev;
2210 int status;
2211
ac6a0c4a 2212 if (msix_enabled(adapter)) {
6b7c5b94
SP
2213 status = be_msix_register(adapter);
2214 if (status == 0)
2215 goto done;
ba343c77
SB
2216 /* INTx is not supported for VF */
2217 if (!be_physfn(adapter))
2218 return status;
6b7c5b94
SP
2219 }
2220
2221 /* INTx */
2222 netdev->irq = adapter->pdev->irq;
2223 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
2224 adapter);
2225 if (status) {
2226 dev_err(&adapter->pdev->dev,
2227 "INTx request IRQ failed - err %d\n", status);
2228 return status;
2229 }
2230done:
2231 adapter->isr_registered = true;
2232 return 0;
2233}
2234
2235static void be_irq_unregister(struct be_adapter *adapter)
2236{
2237 struct net_device *netdev = adapter->netdev;
10ef9ab4 2238 struct be_eq_obj *eqo;
3abcdeda 2239 int i;
6b7c5b94
SP
2240
2241 if (!adapter->isr_registered)
2242 return;
2243
2244 /* INTx */
ac6a0c4a 2245 if (!msix_enabled(adapter)) {
6b7c5b94
SP
2246 free_irq(netdev->irq, adapter);
2247 goto done;
2248 }
2249
2250 /* MSIx */
10ef9ab4
SP
2251 for_all_evt_queues(adapter, eqo, i)
2252 free_irq(be_msix_vec_get(adapter, eqo), eqo);
3abcdeda 2253
6b7c5b94
SP
2254done:
2255 adapter->isr_registered = false;
6b7c5b94
SP
2256}
2257
10ef9ab4 2258static void be_rx_qs_destroy(struct be_adapter *adapter)
482c9e79
SP
2259{
2260 struct be_queue_info *q;
2261 struct be_rx_obj *rxo;
2262 int i;
2263
2264 for_all_rx_queues(adapter, rxo, i) {
2265 q = &rxo->q;
2266 if (q->created) {
2267 be_cmd_rxq_destroy(adapter, q);
2268 /* After the rxq is invalidated, wait for a grace time
2269 * of 1ms for all dma to end and the flush compl to
2270 * arrive
2271 */
2272 mdelay(1);
10ef9ab4 2273 be_rx_cq_clean(rxo);
482c9e79 2274 }
10ef9ab4 2275 be_queue_free(adapter, q);
482c9e79
SP
2276 }
2277}
2278
889cd4b2
SP
2279static int be_close(struct net_device *netdev)
2280{
2281 struct be_adapter *adapter = netdev_priv(netdev);
10ef9ab4
SP
2282 struct be_eq_obj *eqo;
2283 int i;
889cd4b2 2284
889cd4b2
SP
2285 be_async_mcc_disable(adapter);
2286
fe6d2a38
SP
2287 if (!lancer_chip(adapter))
2288 be_intr_set(adapter, false);
889cd4b2 2289
10ef9ab4
SP
2290 for_all_evt_queues(adapter, eqo, i) {
2291 napi_disable(&eqo->napi);
2292 if (msix_enabled(adapter))
2293 synchronize_irq(be_msix_vec_get(adapter, eqo));
2294 else
2295 synchronize_irq(netdev->irq);
2296 be_eq_clean(eqo);
63fcb27f
PR
2297 }
2298
889cd4b2
SP
2299 be_irq_unregister(adapter);
2300
889cd4b2
SP
2301 /* Wait for all pending tx completions to arrive so that
2302 * all tx skbs are freed.
2303 */
0ae57bb3 2304 be_tx_compl_clean(adapter);
889cd4b2 2305
10ef9ab4 2306 be_rx_qs_destroy(adapter);
482c9e79
SP
2307 return 0;
2308}
2309
10ef9ab4 2310static int be_rx_qs_create(struct be_adapter *adapter)
482c9e79
SP
2311{
2312 struct be_rx_obj *rxo;
e9008ee9
PR
2313 int rc, i, j;
2314 u8 rsstable[128];
482c9e79
SP
2315
2316 for_all_rx_queues(adapter, rxo, i) {
10ef9ab4
SP
2317 rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
2318 sizeof(struct be_eth_rx_d));
2319 if (rc)
2320 return rc;
2321 }
2322
2323 /* The FW would like the default RXQ to be created first */
2324 rxo = default_rxo(adapter);
2325 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id, rx_frag_size,
2326 adapter->if_handle, false, &rxo->rss_id);
2327 if (rc)
2328 return rc;
2329
2330 for_all_rss_queues(adapter, rxo, i) {
482c9e79 2331 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
10ef9ab4
SP
2332 rx_frag_size, adapter->if_handle,
2333 true, &rxo->rss_id);
482c9e79
SP
2334 if (rc)
2335 return rc;
2336 }
2337
2338 if (be_multi_rxq(adapter)) {
e9008ee9
PR
2339 for (j = 0; j < 128; j += adapter->num_rx_qs - 1) {
2340 for_all_rss_queues(adapter, rxo, i) {
2341 if ((j + i) >= 128)
2342 break;
2343 rsstable[j + i] = rxo->rss_id;
2344 }
2345 }
2346 rc = be_cmd_rss_config(adapter, rsstable, 128);
482c9e79
SP
2347 if (rc)
2348 return rc;
2349 }
2350
2351 /* First time posting */
10ef9ab4 2352 for_all_rx_queues(adapter, rxo, i)
482c9e79 2353 be_post_rx_frags(rxo, GFP_KERNEL);
889cd4b2
SP
2354 return 0;
2355}
2356
6b7c5b94
SP
2357static int be_open(struct net_device *netdev)
2358{
2359 struct be_adapter *adapter = netdev_priv(netdev);
10ef9ab4 2360 struct be_eq_obj *eqo;
3abcdeda 2361 struct be_rx_obj *rxo;
10ef9ab4 2362 struct be_tx_obj *txo;
b236916a 2363 u8 link_status;
3abcdeda 2364 int status, i;
5fb379ee 2365
10ef9ab4 2366 status = be_rx_qs_create(adapter);
482c9e79
SP
2367 if (status)
2368 goto err;
2369
5fb379ee
SP
2370 be_irq_register(adapter);
2371
fe6d2a38
SP
2372 if (!lancer_chip(adapter))
2373 be_intr_set(adapter, true);
5fb379ee 2374
10ef9ab4 2375 for_all_rx_queues(adapter, rxo, i)
3abcdeda 2376 be_cq_notify(adapter, rxo->cq.id, true, 0);
5fb379ee 2377
10ef9ab4
SP
2378 for_all_tx_queues(adapter, txo, i)
2379 be_cq_notify(adapter, txo->cq.id, true, 0);
2380
7a1e9b20
SP
2381 be_async_mcc_enable(adapter);
2382
10ef9ab4
SP
2383 for_all_evt_queues(adapter, eqo, i) {
2384 napi_enable(&eqo->napi);
2385 be_eq_notify(adapter, eqo->q.id, true, false, 0);
2386 }
2387
b236916a
AK
2388 status = be_cmd_link_status_query(adapter, NULL, NULL,
2389 &link_status, 0);
2390 if (!status)
2391 be_link_status_update(adapter, link_status);
2392
889cd4b2
SP
2393 return 0;
2394err:
2395 be_close(adapter->netdev);
2396 return -EIO;
5fb379ee
SP
2397}
2398
71d8d1b5
AK
2399static int be_setup_wol(struct be_adapter *adapter, bool enable)
2400{
2401 struct be_dma_mem cmd;
2402 int status = 0;
2403 u8 mac[ETH_ALEN];
2404
2405 memset(mac, 0, ETH_ALEN);
2406
2407 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
2b7bcebf
IV
2408 cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2409 GFP_KERNEL);
71d8d1b5
AK
2410 if (cmd.va == NULL)
2411 return -1;
2412 memset(cmd.va, 0, cmd.size);
2413
2414 if (enable) {
2415 status = pci_write_config_dword(adapter->pdev,
2416 PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2417 if (status) {
2418 dev_err(&adapter->pdev->dev,
2381a55c 2419 "Could not enable Wake-on-lan\n");
2b7bcebf
IV
2420 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2421 cmd.dma);
71d8d1b5
AK
2422 return status;
2423 }
2424 status = be_cmd_enable_magic_wol(adapter,
2425 adapter->netdev->dev_addr, &cmd);
2426 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2427 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2428 } else {
2429 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2430 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2431 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2432 }
2433
2b7bcebf 2434 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
71d8d1b5
AK
2435 return status;
2436}
2437
6d87f5c3
AK
2438/*
2439 * Generate a seed MAC address from the PF MAC Address using jhash.
2440 * MAC Address for VFs are assigned incrementally starting from the seed.
2441 * These addresses are programmed in the ASIC by the PF and the VF driver
2442 * queries for the MAC address during its probe.
2443 */
2444static inline int be_vf_eth_addr_config(struct be_adapter *adapter)
2445{
f9449ab7 2446 u32 vf;
3abcdeda 2447 int status = 0;
6d87f5c3 2448 u8 mac[ETH_ALEN];
11ac75ed 2449 struct be_vf_cfg *vf_cfg;
6d87f5c3
AK
2450
2451 be_vf_eth_addr_generate(adapter, mac);
2452
11ac75ed 2453 for_all_vfs(adapter, vf_cfg, vf) {
590c391d
PR
2454 if (lancer_chip(adapter)) {
2455 status = be_cmd_set_mac_list(adapter, mac, 1, vf + 1);
2456 } else {
2457 status = be_cmd_pmac_add(adapter, mac,
11ac75ed
SP
2458 vf_cfg->if_handle,
2459 &vf_cfg->pmac_id, vf + 1);
590c391d
PR
2460 }
2461
6d87f5c3
AK
2462 if (status)
2463 dev_err(&adapter->pdev->dev,
590c391d 2464 "Mac address assignment failed for VF %d\n", vf);
6d87f5c3 2465 else
11ac75ed 2466 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
6d87f5c3
AK
2467
2468 mac[5] += 1;
2469 }
2470 return status;
2471}
2472
f9449ab7 2473static void be_vf_clear(struct be_adapter *adapter)
6d87f5c3 2474{
11ac75ed 2475 struct be_vf_cfg *vf_cfg;
6d87f5c3
AK
2476 u32 vf;
2477
11ac75ed 2478 for_all_vfs(adapter, vf_cfg, vf) {
590c391d
PR
2479 if (lancer_chip(adapter))
2480 be_cmd_set_mac_list(adapter, NULL, 0, vf + 1);
2481 else
11ac75ed
SP
2482 be_cmd_pmac_del(adapter, vf_cfg->if_handle,
2483 vf_cfg->pmac_id, vf + 1);
f9449ab7 2484
11ac75ed
SP
2485 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
2486 }
6d87f5c3
AK
2487}
2488
a54769f5
SP
2489static int be_clear(struct be_adapter *adapter)
2490{
fbc13f01
AK
2491 int i = 1;
2492
191eb756
SP
2493 if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) {
2494 cancel_delayed_work_sync(&adapter->work);
2495 adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED;
2496 }
2497
11ac75ed 2498 if (sriov_enabled(adapter))
f9449ab7
SP
2499 be_vf_clear(adapter);
2500
fbc13f01
AK
2501 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++)
2502 be_cmd_pmac_del(adapter, adapter->if_handle,
2503 adapter->pmac_id[i], 0);
2504
f9449ab7 2505 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
a54769f5
SP
2506
2507 be_mcc_queues_destroy(adapter);
10ef9ab4 2508 be_rx_cqs_destroy(adapter);
a54769f5 2509 be_tx_queues_destroy(adapter);
10ef9ab4 2510 be_evt_queues_destroy(adapter);
a54769f5
SP
2511
2512 /* tell fw we're done with firing cmds */
2513 be_cmd_fw_clean(adapter);
10ef9ab4
SP
2514
2515 be_msix_disable(adapter);
fbc13f01 2516 kfree(adapter->pmac_id);
a54769f5
SP
2517 return 0;
2518}
2519
30128031
SP
2520static void be_vf_setup_init(struct be_adapter *adapter)
2521{
11ac75ed 2522 struct be_vf_cfg *vf_cfg;
30128031
SP
2523 int vf;
2524
11ac75ed
SP
2525 for_all_vfs(adapter, vf_cfg, vf) {
2526 vf_cfg->if_handle = -1;
2527 vf_cfg->pmac_id = -1;
30128031
SP
2528 }
2529}
2530
f9449ab7
SP
2531static int be_vf_setup(struct be_adapter *adapter)
2532{
11ac75ed 2533 struct be_vf_cfg *vf_cfg;
f9449ab7 2534 u32 cap_flags, en_flags, vf;
f1f3ee1b 2535 u16 def_vlan, lnk_speed;
f9449ab7
SP
2536 int status;
2537
30128031
SP
2538 be_vf_setup_init(adapter);
2539
590c391d
PR
2540 cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2541 BE_IF_FLAGS_MULTICAST;
11ac75ed 2542 for_all_vfs(adapter, vf_cfg, vf) {
f9449ab7 2543 status = be_cmd_if_create(adapter, cap_flags, en_flags, NULL,
11ac75ed 2544 &vf_cfg->if_handle, NULL, vf + 1);
f9449ab7
SP
2545 if (status)
2546 goto err;
f9449ab7
SP
2547 }
2548
590c391d
PR
2549 status = be_vf_eth_addr_config(adapter);
2550 if (status)
2551 goto err;
f9449ab7 2552
11ac75ed 2553 for_all_vfs(adapter, vf_cfg, vf) {
f9449ab7 2554 status = be_cmd_link_status_query(adapter, NULL, &lnk_speed,
b236916a 2555 NULL, vf + 1);
f9449ab7
SP
2556 if (status)
2557 goto err;
11ac75ed 2558 vf_cfg->tx_rate = lnk_speed * 10;
f1f3ee1b
AK
2559
2560 status = be_cmd_get_hsw_config(adapter, &def_vlan,
2561 vf + 1, vf_cfg->if_handle);
2562 if (status)
2563 goto err;
2564 vf_cfg->def_vid = def_vlan;
f9449ab7
SP
2565 }
2566 return 0;
2567err:
2568 return status;
2569}
2570
30128031
SP
2571static void be_setup_init(struct be_adapter *adapter)
2572{
2573 adapter->vlan_prio_bmap = 0xff;
2574 adapter->link_speed = -1;
2575 adapter->if_handle = -1;
2576 adapter->be3_native = false;
2577 adapter->promiscuous = false;
2578 adapter->eq_next_idx = 0;
2579}
2580
e5e1ee89 2581static int be_add_mac_from_list(struct be_adapter *adapter, u8 *mac)
590c391d
PR
2582{
2583 u32 pmac_id;
e5e1ee89
PR
2584 int status;
2585 bool pmac_id_active;
2586
2587 status = be_cmd_get_mac_from_list(adapter, 0, &pmac_id_active,
2588 &pmac_id, mac);
590c391d
PR
2589 if (status != 0)
2590 goto do_none;
e5e1ee89
PR
2591
2592 if (pmac_id_active) {
2593 status = be_cmd_mac_addr_query(adapter, mac,
2594 MAC_ADDRESS_TYPE_NETWORK,
2595 false, adapter->if_handle, pmac_id);
2596
2597 if (!status)
fbc13f01 2598 adapter->pmac_id[0] = pmac_id;
e5e1ee89
PR
2599 } else {
2600 status = be_cmd_pmac_add(adapter, mac,
fbc13f01 2601 adapter->if_handle, &adapter->pmac_id[0], 0);
e5e1ee89 2602 }
590c391d
PR
2603do_none:
2604 return status;
2605}
2606
5fb379ee
SP
2607static int be_setup(struct be_adapter *adapter)
2608{
5fb379ee 2609 struct net_device *netdev = adapter->netdev;
f9449ab7 2610 u32 cap_flags, en_flags;
a54769f5 2611 u32 tx_fc, rx_fc;
10ef9ab4 2612 int status;
ba343c77
SB
2613 u8 mac[ETH_ALEN];
2614
30128031 2615 be_setup_init(adapter);
6b7c5b94 2616
f9449ab7 2617 be_cmd_req_native_mode(adapter);
73d540f2 2618
10ef9ab4
SP
2619 be_msix_enable(adapter);
2620
2621 status = be_evt_queues_create(adapter);
2622 if (status)
a54769f5 2623 goto err;
6b7c5b94 2624
10ef9ab4
SP
2625 status = be_tx_cqs_create(adapter);
2626 if (status)
2627 goto err;
2628
2629 status = be_rx_cqs_create(adapter);
2630 if (status)
a54769f5 2631 goto err;
6b7c5b94 2632
f9449ab7 2633 status = be_mcc_queues_create(adapter);
10ef9ab4 2634 if (status)
a54769f5 2635 goto err;
6b7c5b94 2636
f9449ab7
SP
2637 memset(mac, 0, ETH_ALEN);
2638 status = be_cmd_mac_addr_query(adapter, mac, MAC_ADDRESS_TYPE_NETWORK,
590c391d 2639 true /*permanent */, 0, 0);
f9449ab7
SP
2640 if (status)
2641 return status;
2642 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2643 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2903dd65 2644
f9449ab7
SP
2645 en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2646 BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
2647 cap_flags = en_flags | BE_IF_FLAGS_MCAST_PROMISCUOUS |
5d5adb93
PR
2648 BE_IF_FLAGS_VLAN_PROMISCUOUS | BE_IF_FLAGS_PROMISCUOUS;
2649
f9449ab7
SP
2650 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS) {
2651 cap_flags |= BE_IF_FLAGS_RSS;
2652 en_flags |= BE_IF_FLAGS_RSS;
2653 }
2654 status = be_cmd_if_create(adapter, cap_flags, en_flags,
2655 netdev->dev_addr, &adapter->if_handle,
fbc13f01 2656 &adapter->pmac_id[0], 0);
5fb379ee 2657 if (status != 0)
a54769f5 2658 goto err;
6b7c5b94 2659
590c391d
PR
2660 /* The VF's permanent mac queried from card is incorrect.
2661 * For BEx: Query the mac configued by the PF using if_handle
2662 * For Lancer: Get and use mac_list to obtain mac address.
2663 */
2664 if (!be_physfn(adapter)) {
2665 if (lancer_chip(adapter))
e5e1ee89 2666 status = be_add_mac_from_list(adapter, mac);
590c391d
PR
2667 else
2668 status = be_cmd_mac_addr_query(adapter, mac,
2669 MAC_ADDRESS_TYPE_NETWORK, false,
2670 adapter->if_handle, 0);
f9449ab7
SP
2671 if (!status) {
2672 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2673 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2674 }
2675 }
0dffc83e 2676
10ef9ab4
SP
2677 status = be_tx_qs_create(adapter);
2678 if (status)
2679 goto err;
2680
04b71175 2681 be_cmd_get_fw_ver(adapter, adapter->fw_ver, NULL);
5a56eb10 2682
a54769f5
SP
2683 status = be_vid_config(adapter, false, 0);
2684 if (status)
2685 goto err;
7ab8b0b4 2686
a54769f5 2687 be_set_rx_mode(adapter->netdev);
5fb379ee 2688
a54769f5 2689 status = be_cmd_get_flow_control(adapter, &tx_fc, &rx_fc);
590c391d
PR
2690 /* For Lancer: It is legal for this cmd to fail on VF */
2691 if (status && (be_physfn(adapter) || !lancer_chip(adapter)))
a54769f5 2692 goto err;
590c391d 2693
a54769f5
SP
2694 if (rx_fc != adapter->rx_fc || tx_fc != adapter->tx_fc) {
2695 status = be_cmd_set_flow_control(adapter, adapter->tx_fc,
2696 adapter->rx_fc);
590c391d
PR
2697 /* For Lancer: It is legal for this cmd to fail on VF */
2698 if (status && (be_physfn(adapter) || !lancer_chip(adapter)))
a54769f5
SP
2699 goto err;
2700 }
2dc1deb6 2701
a54769f5 2702 pcie_set_readrq(adapter->pdev, 4096);
5fb379ee 2703
11ac75ed 2704 if (sriov_enabled(adapter)) {
f9449ab7
SP
2705 status = be_vf_setup(adapter);
2706 if (status)
2707 goto err;
2708 }
2709
191eb756
SP
2710 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
2711 adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
2712
f9449ab7 2713 return 0;
a54769f5
SP
2714err:
2715 be_clear(adapter);
2716 return status;
2717}
6b7c5b94 2718
66268739
IV
2719#ifdef CONFIG_NET_POLL_CONTROLLER
2720static void be_netpoll(struct net_device *netdev)
2721{
2722 struct be_adapter *adapter = netdev_priv(netdev);
10ef9ab4 2723 struct be_eq_obj *eqo;
66268739
IV
2724 int i;
2725
10ef9ab4
SP
2726 for_all_evt_queues(adapter, eqo, i)
2727 event_handle(eqo);
2728
2729 return;
66268739
IV
2730}
2731#endif
2732
84517482 2733#define FW_FILE_HDR_SIGN "ServerEngines Corp. "
fa9a6fed 2734static bool be_flash_redboot(struct be_adapter *adapter,
3f0d4560
AK
2735 const u8 *p, u32 img_start, int image_size,
2736 int hdr_size)
fa9a6fed
SB
2737{
2738 u32 crc_offset;
2739 u8 flashed_crc[4];
2740 int status;
3f0d4560
AK
2741
2742 crc_offset = hdr_size + img_start + image_size - 4;
2743
fa9a6fed 2744 p += crc_offset;
3f0d4560
AK
2745
2746 status = be_cmd_get_flash_crc(adapter, flashed_crc,
f510fc64 2747 (image_size - 4));
fa9a6fed
SB
2748 if (status) {
2749 dev_err(&adapter->pdev->dev,
2750 "could not get crc from flash, not flashing redboot\n");
2751 return false;
2752 }
2753
2754 /*update redboot only if crc does not match*/
2755 if (!memcmp(flashed_crc, p, 4))
2756 return false;
2757 else
2758 return true;
fa9a6fed
SB
2759}
2760
306f1348
SP
2761static bool phy_flashing_required(struct be_adapter *adapter)
2762{
2763 int status = 0;
2764 struct be_phy_info phy_info;
2765
2766 status = be_cmd_get_phy_info(adapter, &phy_info);
2767 if (status)
2768 return false;
2769 if ((phy_info.phy_type == TN_8022) &&
2770 (phy_info.interface_type == PHY_TYPE_BASET_10GB)) {
2771 return true;
2772 }
2773 return false;
2774}
2775
3f0d4560 2776static int be_flash_data(struct be_adapter *adapter,
84517482 2777 const struct firmware *fw,
3f0d4560
AK
2778 struct be_dma_mem *flash_cmd, int num_of_images)
2779
84517482 2780{
3f0d4560
AK
2781 int status = 0, i, filehdr_size = 0;
2782 u32 total_bytes = 0, flash_op;
84517482
AK
2783 int num_bytes;
2784 const u8 *p = fw->data;
2785 struct be_cmd_write_flashrom *req = flash_cmd->va;
215faf9c 2786 const struct flash_comp *pflashcomp;
9fe96934 2787 int num_comp;
3f0d4560 2788
306f1348 2789 static const struct flash_comp gen3_flash_types[10] = {
3f0d4560
AK
2790 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, IMG_TYPE_ISCSI_ACTIVE,
2791 FLASH_IMAGE_MAX_SIZE_g3},
2792 { FLASH_REDBOOT_START_g3, IMG_TYPE_REDBOOT,
2793 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3},
2794 { FLASH_iSCSI_BIOS_START_g3, IMG_TYPE_BIOS,
2795 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2796 { FLASH_PXE_BIOS_START_g3, IMG_TYPE_PXE_BIOS,
2797 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2798 { FLASH_FCoE_BIOS_START_g3, IMG_TYPE_FCOE_BIOS,
2799 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2800 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, IMG_TYPE_ISCSI_BACKUP,
2801 FLASH_IMAGE_MAX_SIZE_g3},
2802 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, IMG_TYPE_FCOE_FW_ACTIVE,
2803 FLASH_IMAGE_MAX_SIZE_g3},
2804 { FLASH_FCoE_BACKUP_IMAGE_START_g3, IMG_TYPE_FCOE_FW_BACKUP,
9fe96934
SB
2805 FLASH_IMAGE_MAX_SIZE_g3},
2806 { FLASH_NCSI_START_g3, IMG_TYPE_NCSI_FW,
306f1348
SP
2807 FLASH_NCSI_IMAGE_MAX_SIZE_g3},
2808 { FLASH_PHY_FW_START_g3, IMG_TYPE_PHY_FW,
2809 FLASH_PHY_FW_IMAGE_MAX_SIZE_g3}
3f0d4560 2810 };
215faf9c 2811 static const struct flash_comp gen2_flash_types[8] = {
3f0d4560
AK
2812 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, IMG_TYPE_ISCSI_ACTIVE,
2813 FLASH_IMAGE_MAX_SIZE_g2},
2814 { FLASH_REDBOOT_START_g2, IMG_TYPE_REDBOOT,
2815 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2},
2816 { FLASH_iSCSI_BIOS_START_g2, IMG_TYPE_BIOS,
2817 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2818 { FLASH_PXE_BIOS_START_g2, IMG_TYPE_PXE_BIOS,
2819 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2820 { FLASH_FCoE_BIOS_START_g2, IMG_TYPE_FCOE_BIOS,
2821 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2822 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, IMG_TYPE_ISCSI_BACKUP,
2823 FLASH_IMAGE_MAX_SIZE_g2},
2824 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, IMG_TYPE_FCOE_FW_ACTIVE,
2825 FLASH_IMAGE_MAX_SIZE_g2},
2826 { FLASH_FCoE_BACKUP_IMAGE_START_g2, IMG_TYPE_FCOE_FW_BACKUP,
2827 FLASH_IMAGE_MAX_SIZE_g2}
2828 };
2829
2830 if (adapter->generation == BE_GEN3) {
2831 pflashcomp = gen3_flash_types;
2832 filehdr_size = sizeof(struct flash_file_hdr_g3);
215faf9c 2833 num_comp = ARRAY_SIZE(gen3_flash_types);
3f0d4560
AK
2834 } else {
2835 pflashcomp = gen2_flash_types;
2836 filehdr_size = sizeof(struct flash_file_hdr_g2);
215faf9c 2837 num_comp = ARRAY_SIZE(gen2_flash_types);
84517482 2838 }
9fe96934
SB
2839 for (i = 0; i < num_comp; i++) {
2840 if ((pflashcomp[i].optype == IMG_TYPE_NCSI_FW) &&
2841 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
2842 continue;
306f1348
SP
2843 if (pflashcomp[i].optype == IMG_TYPE_PHY_FW) {
2844 if (!phy_flashing_required(adapter))
2845 continue;
2846 }
3f0d4560
AK
2847 if ((pflashcomp[i].optype == IMG_TYPE_REDBOOT) &&
2848 (!be_flash_redboot(adapter, fw->data,
fae21a4d
AK
2849 pflashcomp[i].offset, pflashcomp[i].size, filehdr_size +
2850 (num_of_images * sizeof(struct image_hdr)))))
3f0d4560
AK
2851 continue;
2852 p = fw->data;
2853 p += filehdr_size + pflashcomp[i].offset
2854 + (num_of_images * sizeof(struct image_hdr));
306f1348
SP
2855 if (p + pflashcomp[i].size > fw->data + fw->size)
2856 return -1;
2857 total_bytes = pflashcomp[i].size;
3f0d4560
AK
2858 while (total_bytes) {
2859 if (total_bytes > 32*1024)
2860 num_bytes = 32*1024;
2861 else
2862 num_bytes = total_bytes;
2863 total_bytes -= num_bytes;
306f1348
SP
2864 if (!total_bytes) {
2865 if (pflashcomp[i].optype == IMG_TYPE_PHY_FW)
2866 flash_op = FLASHROM_OPER_PHY_FLASH;
2867 else
2868 flash_op = FLASHROM_OPER_FLASH;
2869 } else {
2870 if (pflashcomp[i].optype == IMG_TYPE_PHY_FW)
2871 flash_op = FLASHROM_OPER_PHY_SAVE;
2872 else
2873 flash_op = FLASHROM_OPER_SAVE;
2874 }
3f0d4560
AK
2875 memcpy(req->params.data_buf, p, num_bytes);
2876 p += num_bytes;
2877 status = be_cmd_write_flashrom(adapter, flash_cmd,
2878 pflashcomp[i].optype, flash_op, num_bytes);
2879 if (status) {
306f1348
SP
2880 if ((status == ILLEGAL_IOCTL_REQ) &&
2881 (pflashcomp[i].optype ==
2882 IMG_TYPE_PHY_FW))
2883 break;
3f0d4560
AK
2884 dev_err(&adapter->pdev->dev,
2885 "cmd to write to flash rom failed.\n");
2886 return -1;
2887 }
84517482 2888 }
84517482 2889 }
84517482
AK
2890 return 0;
2891}
2892
3f0d4560
AK
2893static int get_ufigen_type(struct flash_file_hdr_g2 *fhdr)
2894{
2895 if (fhdr == NULL)
2896 return 0;
2897 if (fhdr->build[0] == '3')
2898 return BE_GEN3;
2899 else if (fhdr->build[0] == '2')
2900 return BE_GEN2;
2901 else
2902 return 0;
2903}
2904
485bf569
SN
2905static int lancer_fw_download(struct be_adapter *adapter,
2906 const struct firmware *fw)
84517482 2907{
485bf569
SN
2908#define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024)
2909#define LANCER_FW_DOWNLOAD_LOCATION "/prg"
84517482 2910 struct be_dma_mem flash_cmd;
485bf569
SN
2911 const u8 *data_ptr = NULL;
2912 u8 *dest_image_ptr = NULL;
2913 size_t image_size = 0;
2914 u32 chunk_size = 0;
2915 u32 data_written = 0;
2916 u32 offset = 0;
2917 int status = 0;
2918 u8 add_status = 0;
84517482 2919
485bf569 2920 if (!IS_ALIGNED(fw->size, sizeof(u32))) {
d9efd2af 2921 dev_err(&adapter->pdev->dev,
485bf569
SN
2922 "FW Image not properly aligned. "
2923 "Length must be 4 byte aligned.\n");
2924 status = -EINVAL;
2925 goto lancer_fw_exit;
d9efd2af
SB
2926 }
2927
485bf569
SN
2928 flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
2929 + LANCER_FW_DOWNLOAD_CHUNK;
2930 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
2931 &flash_cmd.dma, GFP_KERNEL);
2932 if (!flash_cmd.va) {
2933 status = -ENOMEM;
2934 dev_err(&adapter->pdev->dev,
2935 "Memory allocation failure while flashing\n");
2936 goto lancer_fw_exit;
2937 }
84517482 2938
485bf569
SN
2939 dest_image_ptr = flash_cmd.va +
2940 sizeof(struct lancer_cmd_req_write_object);
2941 image_size = fw->size;
2942 data_ptr = fw->data;
2943
2944 while (image_size) {
2945 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
2946
2947 /* Copy the image chunk content. */
2948 memcpy(dest_image_ptr, data_ptr, chunk_size);
2949
2950 status = lancer_cmd_write_object(adapter, &flash_cmd,
2951 chunk_size, offset, LANCER_FW_DOWNLOAD_LOCATION,
2952 &data_written, &add_status);
2953
2954 if (status)
2955 break;
2956
2957 offset += data_written;
2958 data_ptr += data_written;
2959 image_size -= data_written;
2960 }
2961
2962 if (!status) {
2963 /* Commit the FW written */
2964 status = lancer_cmd_write_object(adapter, &flash_cmd,
2965 0, offset, LANCER_FW_DOWNLOAD_LOCATION,
2966 &data_written, &add_status);
2967 }
2968
2969 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
2970 flash_cmd.dma);
2971 if (status) {
2972 dev_err(&adapter->pdev->dev,
2973 "Firmware load error. "
2974 "Status code: 0x%x Additional Status: 0x%x\n",
2975 status, add_status);
2976 goto lancer_fw_exit;
2977 }
2978
2979 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
2980lancer_fw_exit:
2981 return status;
2982}
2983
2984static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
2985{
2986 struct flash_file_hdr_g2 *fhdr;
2987 struct flash_file_hdr_g3 *fhdr3;
2988 struct image_hdr *img_hdr_ptr = NULL;
2989 struct be_dma_mem flash_cmd;
2990 const u8 *p;
2991 int status = 0, i = 0, num_imgs = 0;
84517482
AK
2992
2993 p = fw->data;
3f0d4560 2994 fhdr = (struct flash_file_hdr_g2 *) p;
84517482 2995
84517482 2996 flash_cmd.size = sizeof(struct be_cmd_write_flashrom) + 32*1024;
2b7bcebf
IV
2997 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
2998 &flash_cmd.dma, GFP_KERNEL);
84517482
AK
2999 if (!flash_cmd.va) {
3000 status = -ENOMEM;
3001 dev_err(&adapter->pdev->dev,
3002 "Memory allocation failure while flashing\n");
485bf569 3003 goto be_fw_exit;
84517482
AK
3004 }
3005
3f0d4560
AK
3006 if ((adapter->generation == BE_GEN3) &&
3007 (get_ufigen_type(fhdr) == BE_GEN3)) {
3008 fhdr3 = (struct flash_file_hdr_g3 *) fw->data;
8b93b710
AK
3009 num_imgs = le32_to_cpu(fhdr3->num_imgs);
3010 for (i = 0; i < num_imgs; i++) {
3f0d4560
AK
3011 img_hdr_ptr = (struct image_hdr *) (fw->data +
3012 (sizeof(struct flash_file_hdr_g3) +
8b93b710
AK
3013 i * sizeof(struct image_hdr)));
3014 if (le32_to_cpu(img_hdr_ptr->imageid) == 1)
3015 status = be_flash_data(adapter, fw, &flash_cmd,
3016 num_imgs);
3f0d4560
AK
3017 }
3018 } else if ((adapter->generation == BE_GEN2) &&
3019 (get_ufigen_type(fhdr) == BE_GEN2)) {
3020 status = be_flash_data(adapter, fw, &flash_cmd, 0);
3021 } else {
3022 dev_err(&adapter->pdev->dev,
3023 "UFI and Interface are not compatible for flashing\n");
3024 status = -1;
84517482
AK
3025 }
3026
2b7bcebf
IV
3027 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3028 flash_cmd.dma);
84517482
AK
3029 if (status) {
3030 dev_err(&adapter->pdev->dev, "Firmware load error\n");
485bf569 3031 goto be_fw_exit;
84517482
AK
3032 }
3033
af901ca1 3034 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
84517482 3035
485bf569
SN
3036be_fw_exit:
3037 return status;
3038}
3039
3040int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
3041{
3042 const struct firmware *fw;
3043 int status;
3044
3045 if (!netif_running(adapter->netdev)) {
3046 dev_err(&adapter->pdev->dev,
3047 "Firmware load not allowed (interface is down)\n");
3048 return -1;
3049 }
3050
3051 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
3052 if (status)
3053 goto fw_exit;
3054
3055 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
3056
3057 if (lancer_chip(adapter))
3058 status = lancer_fw_download(adapter, fw);
3059 else
3060 status = be_fw_download(adapter, fw);
3061
84517482
AK
3062fw_exit:
3063 release_firmware(fw);
3064 return status;
3065}
3066
e5686ad8 3067static const struct net_device_ops be_netdev_ops = {
6b7c5b94
SP
3068 .ndo_open = be_open,
3069 .ndo_stop = be_close,
3070 .ndo_start_xmit = be_xmit,
a54769f5 3071 .ndo_set_rx_mode = be_set_rx_mode,
6b7c5b94
SP
3072 .ndo_set_mac_address = be_mac_addr_set,
3073 .ndo_change_mtu = be_change_mtu,
ab1594e9 3074 .ndo_get_stats64 = be_get_stats64,
6b7c5b94 3075 .ndo_validate_addr = eth_validate_addr,
6b7c5b94
SP
3076 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
3077 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
64600ea5 3078 .ndo_set_vf_mac = be_set_vf_mac,
1da87b7f 3079 .ndo_set_vf_vlan = be_set_vf_vlan,
e1d18735 3080 .ndo_set_vf_tx_rate = be_set_vf_tx_rate,
66268739
IV
3081 .ndo_get_vf_config = be_get_vf_config,
3082#ifdef CONFIG_NET_POLL_CONTROLLER
3083 .ndo_poll_controller = be_netpoll,
3084#endif
6b7c5b94
SP
3085};
3086
3087static void be_netdev_init(struct net_device *netdev)
3088{
3089 struct be_adapter *adapter = netdev_priv(netdev);
10ef9ab4 3090 struct be_eq_obj *eqo;
3abcdeda 3091 int i;
6b7c5b94 3092
6332c8d3 3093 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
8b8ddc68
MM
3094 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
3095 NETIF_F_HW_VLAN_TX;
3096 if (be_multi_rxq(adapter))
3097 netdev->hw_features |= NETIF_F_RXHASH;
6332c8d3
MM
3098
3099 netdev->features |= netdev->hw_features |
8b8ddc68 3100 NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
4b972914 3101
eb8a50d9 3102 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
79032644 3103 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
51c59870 3104
fbc13f01
AK
3105 netdev->priv_flags |= IFF_UNICAST_FLT;
3106
6b7c5b94
SP
3107 netdev->flags |= IFF_MULTICAST;
3108
c190e3c8
AK
3109 netif_set_gso_max_size(netdev, 65535);
3110
10ef9ab4 3111 netdev->netdev_ops = &be_netdev_ops;
6b7c5b94
SP
3112
3113 SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
3114
10ef9ab4
SP
3115 for_all_evt_queues(adapter, eqo, i)
3116 netif_napi_add(netdev, &eqo->napi, be_poll, BE_NAPI_WEIGHT);
6b7c5b94
SP
3117}
3118
3119static void be_unmap_pci_bars(struct be_adapter *adapter)
3120{
8788fdc2
SP
3121 if (adapter->csr)
3122 iounmap(adapter->csr);
3123 if (adapter->db)
3124 iounmap(adapter->db);
6b7c5b94
SP
3125}
3126
3127static int be_map_pci_bars(struct be_adapter *adapter)
3128{
3129 u8 __iomem *addr;
db3ea781 3130 int db_reg;
6b7c5b94 3131
fe6d2a38
SP
3132 if (lancer_chip(adapter)) {
3133 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 0),
3134 pci_resource_len(adapter->pdev, 0));
3135 if (addr == NULL)
3136 return -ENOMEM;
3137 adapter->db = addr;
3138 return 0;
3139 }
3140
ba343c77
SB
3141 if (be_physfn(adapter)) {
3142 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 2),
3143 pci_resource_len(adapter->pdev, 2));
3144 if (addr == NULL)
3145 return -ENOMEM;
3146 adapter->csr = addr;
3147 }
6b7c5b94 3148
ba343c77 3149 if (adapter->generation == BE_GEN2) {
ba343c77
SB
3150 db_reg = 4;
3151 } else {
ba343c77
SB
3152 if (be_physfn(adapter))
3153 db_reg = 4;
3154 else
3155 db_reg = 0;
3156 }
3157 addr = ioremap_nocache(pci_resource_start(adapter->pdev, db_reg),
3158 pci_resource_len(adapter->pdev, db_reg));
6b7c5b94
SP
3159 if (addr == NULL)
3160 goto pci_map_err;
ba343c77
SB
3161 adapter->db = addr;
3162
6b7c5b94
SP
3163 return 0;
3164pci_map_err:
3165 be_unmap_pci_bars(adapter);
3166 return -ENOMEM;
3167}
3168
3169
3170static void be_ctrl_cleanup(struct be_adapter *adapter)
3171{
8788fdc2 3172 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
6b7c5b94
SP
3173
3174 be_unmap_pci_bars(adapter);
3175
3176 if (mem->va)
2b7bcebf
IV
3177 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3178 mem->dma);
e7b909a6 3179
5b8821b7 3180 mem = &adapter->rx_filter;
e7b909a6 3181 if (mem->va)
2b7bcebf
IV
3182 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3183 mem->dma);
6b7c5b94
SP
3184}
3185
6b7c5b94
SP
3186static int be_ctrl_init(struct be_adapter *adapter)
3187{
8788fdc2
SP
3188 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
3189 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
5b8821b7 3190 struct be_dma_mem *rx_filter = &adapter->rx_filter;
6b7c5b94 3191 int status;
6b7c5b94
SP
3192
3193 status = be_map_pci_bars(adapter);
3194 if (status)
e7b909a6 3195 goto done;
6b7c5b94
SP
3196
3197 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
2b7bcebf
IV
3198 mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
3199 mbox_mem_alloc->size,
3200 &mbox_mem_alloc->dma,
3201 GFP_KERNEL);
6b7c5b94 3202 if (!mbox_mem_alloc->va) {
e7b909a6
SP
3203 status = -ENOMEM;
3204 goto unmap_pci_bars;
6b7c5b94
SP
3205 }
3206 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
3207 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
3208 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
3209 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
e7b909a6 3210
5b8821b7
SP
3211 rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
3212 rx_filter->va = dma_alloc_coherent(&adapter->pdev->dev, rx_filter->size,
3213 &rx_filter->dma, GFP_KERNEL);
3214 if (rx_filter->va == NULL) {
e7b909a6
SP
3215 status = -ENOMEM;
3216 goto free_mbox;
3217 }
5b8821b7 3218 memset(rx_filter->va, 0, rx_filter->size);
e7b909a6 3219
2984961c 3220 mutex_init(&adapter->mbox_lock);
8788fdc2
SP
3221 spin_lock_init(&adapter->mcc_lock);
3222 spin_lock_init(&adapter->mcc_cq_lock);
a8f447bd 3223
dd131e76 3224 init_completion(&adapter->flash_compl);
cf588477 3225 pci_save_state(adapter->pdev);
6b7c5b94 3226 return 0;
e7b909a6
SP
3227
3228free_mbox:
2b7bcebf
IV
3229 dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
3230 mbox_mem_alloc->va, mbox_mem_alloc->dma);
e7b909a6
SP
3231
3232unmap_pci_bars:
3233 be_unmap_pci_bars(adapter);
3234
3235done:
3236 return status;
6b7c5b94
SP
3237}
3238
3239static void be_stats_cleanup(struct be_adapter *adapter)
3240{
3abcdeda 3241 struct be_dma_mem *cmd = &adapter->stats_cmd;
6b7c5b94
SP
3242
3243 if (cmd->va)
2b7bcebf
IV
3244 dma_free_coherent(&adapter->pdev->dev, cmd->size,
3245 cmd->va, cmd->dma);
6b7c5b94
SP
3246}
3247
3248static int be_stats_init(struct be_adapter *adapter)
3249{
3abcdeda 3250 struct be_dma_mem *cmd = &adapter->stats_cmd;
6b7c5b94 3251
005d5696 3252 if (adapter->generation == BE_GEN2) {
89a88ab8 3253 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
005d5696
SX
3254 } else {
3255 if (lancer_chip(adapter))
3256 cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
3257 else
3258 cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
3259 }
2b7bcebf
IV
3260 cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
3261 GFP_KERNEL);
6b7c5b94
SP
3262 if (cmd->va == NULL)
3263 return -1;
d291b9af 3264 memset(cmd->va, 0, cmd->size);
6b7c5b94
SP
3265 return 0;
3266}
3267
3268static void __devexit be_remove(struct pci_dev *pdev)
3269{
3270 struct be_adapter *adapter = pci_get_drvdata(pdev);
8d56ff11 3271
6b7c5b94
SP
3272 if (!adapter)
3273 return;
3274
3275 unregister_netdev(adapter->netdev);
3276
5fb379ee
SP
3277 be_clear(adapter);
3278
6b7c5b94
SP
3279 be_stats_cleanup(adapter);
3280
3281 be_ctrl_cleanup(adapter);
3282
ba343c77
SB
3283 be_sriov_disable(adapter);
3284
6b7c5b94
SP
3285 pci_set_drvdata(pdev, NULL);
3286 pci_release_regions(pdev);
3287 pci_disable_device(pdev);
3288
3289 free_netdev(adapter->netdev);
3290}
3291
4762f6ce
AK
3292bool be_is_wol_supported(struct be_adapter *adapter)
3293{
3294 return ((adapter->wol_cap & BE_WOL_CAP) &&
3295 !be_is_wol_excluded(adapter)) ? true : false;
3296}
3297
2243e2e9 3298static int be_get_config(struct be_adapter *adapter)
6b7c5b94 3299{
6b7c5b94
SP
3300 int status;
3301
3abcdeda
SP
3302 status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
3303 &adapter->function_mode, &adapter->function_caps);
43a04fdc
SP
3304 if (status)
3305 return status;
3306
752961a1 3307 if (adapter->function_mode & FLEX10_MODE)
456d9c96 3308 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
82903e4b
AK
3309 else
3310 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
3311
fbc13f01
AK
3312 if (be_physfn(adapter))
3313 adapter->max_pmac_cnt = BE_UC_PMAC_COUNT;
3314 else
3315 adapter->max_pmac_cnt = BE_VF_UC_PMAC_COUNT;
3316
3317 /* primary mac needs 1 pmac entry */
3318 adapter->pmac_id = kcalloc(adapter->max_pmac_cnt + 1,
3319 sizeof(u32), GFP_KERNEL);
3320 if (!adapter->pmac_id)
3321 return -ENOMEM;
3322
9e1453c5
AK
3323 status = be_cmd_get_cntl_attributes(adapter);
3324 if (status)
3325 return status;
3326
4762f6ce
AK
3327 status = be_cmd_get_acpi_wol_cap(adapter);
3328 if (status) {
3329 /* in case of a failure to get wol capabillities
3330 * check the exclusion list to determine WOL capability */
3331 if (!be_is_wol_excluded(adapter))
3332 adapter->wol_cap |= BE_WOL_CAP;
3333 }
3334
3335 if (be_is_wol_supported(adapter))
3336 adapter->wol = true;
3337
2243e2e9 3338 return 0;
6b7c5b94
SP
3339}
3340
fe6d2a38
SP
3341static int be_dev_family_check(struct be_adapter *adapter)
3342{
3343 struct pci_dev *pdev = adapter->pdev;
3344 u32 sli_intf = 0, if_type;
3345
3346 switch (pdev->device) {
3347 case BE_DEVICE_ID1:
3348 case OC_DEVICE_ID1:
3349 adapter->generation = BE_GEN2;
3350 break;
3351 case BE_DEVICE_ID2:
3352 case OC_DEVICE_ID2:
ecedb6ae 3353 case OC_DEVICE_ID5:
fe6d2a38
SP
3354 adapter->generation = BE_GEN3;
3355 break;
3356 case OC_DEVICE_ID3:
12f4d0a8 3357 case OC_DEVICE_ID4:
fe6d2a38
SP
3358 pci_read_config_dword(pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3359 if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
3360 SLI_INTF_IF_TYPE_SHIFT;
3361
3362 if (((sli_intf & SLI_INTF_VALID_MASK) != SLI_INTF_VALID) ||
3363 if_type != 0x02) {
3364 dev_err(&pdev->dev, "SLI_INTF reg val is not valid\n");
3365 return -EINVAL;
3366 }
fe6d2a38
SP
3367 adapter->sli_family = ((sli_intf & SLI_INTF_FAMILY_MASK) >>
3368 SLI_INTF_FAMILY_SHIFT);
3369 adapter->generation = BE_GEN3;
3370 break;
3371 default:
3372 adapter->generation = 0;
3373 }
3374 return 0;
3375}
3376
37eed1cb
PR
3377static int lancer_wait_ready(struct be_adapter *adapter)
3378{
d8110f62 3379#define SLIPORT_READY_TIMEOUT 30
37eed1cb
PR
3380 u32 sliport_status;
3381 int status = 0, i;
3382
3383 for (i = 0; i < SLIPORT_READY_TIMEOUT; i++) {
3384 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3385 if (sliport_status & SLIPORT_STATUS_RDY_MASK)
3386 break;
3387
d8110f62 3388 msleep(1000);
37eed1cb
PR
3389 }
3390
3391 if (i == SLIPORT_READY_TIMEOUT)
3392 status = -1;
3393
3394 return status;
3395}
3396
3397static int lancer_test_and_set_rdy_state(struct be_adapter *adapter)
3398{
3399 int status;
3400 u32 sliport_status, err, reset_needed;
3401 status = lancer_wait_ready(adapter);
3402 if (!status) {
3403 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3404 err = sliport_status & SLIPORT_STATUS_ERR_MASK;
3405 reset_needed = sliport_status & SLIPORT_STATUS_RN_MASK;
3406 if (err && reset_needed) {
3407 iowrite32(SLI_PORT_CONTROL_IP_MASK,
3408 adapter->db + SLIPORT_CONTROL_OFFSET);
3409
3410 /* check adapter has corrected the error */
3411 status = lancer_wait_ready(adapter);
3412 sliport_status = ioread32(adapter->db +
3413 SLIPORT_STATUS_OFFSET);
3414 sliport_status &= (SLIPORT_STATUS_ERR_MASK |
3415 SLIPORT_STATUS_RN_MASK);
3416 if (status || sliport_status)
3417 status = -1;
3418 } else if (err || reset_needed) {
3419 status = -1;
3420 }
3421 }
3422 return status;
3423}
3424
d8110f62
PR
3425static void lancer_test_and_recover_fn_err(struct be_adapter *adapter)
3426{
3427 int status;
3428 u32 sliport_status;
3429
3430 if (adapter->eeh_err || adapter->ue_detected)
3431 return;
3432
3433 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3434
3435 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
3436 dev_err(&adapter->pdev->dev,
3437 "Adapter in error state."
3438 "Trying to recover.\n");
3439
3440 status = lancer_test_and_set_rdy_state(adapter);
3441 if (status)
3442 goto err;
3443
3444 netif_device_detach(adapter->netdev);
3445
3446 if (netif_running(adapter->netdev))
3447 be_close(adapter->netdev);
3448
3449 be_clear(adapter);
3450
3451 adapter->fw_timeout = false;
3452
3453 status = be_setup(adapter);
3454 if (status)
3455 goto err;
3456
3457 if (netif_running(adapter->netdev)) {
3458 status = be_open(adapter->netdev);
3459 if (status)
3460 goto err;
3461 }
3462
3463 netif_device_attach(adapter->netdev);
3464
3465 dev_err(&adapter->pdev->dev,
3466 "Adapter error recovery succeeded\n");
3467 }
3468 return;
3469err:
3470 dev_err(&adapter->pdev->dev,
3471 "Adapter error recovery failed\n");
3472}
3473
3474static void be_worker(struct work_struct *work)
3475{
3476 struct be_adapter *adapter =
3477 container_of(work, struct be_adapter, work.work);
3478 struct be_rx_obj *rxo;
10ef9ab4 3479 struct be_eq_obj *eqo;
d8110f62
PR
3480 int i;
3481
3482 if (lancer_chip(adapter))
3483 lancer_test_and_recover_fn_err(adapter);
3484
3485 be_detect_dump_ue(adapter);
3486
3487 /* when interrupts are not yet enabled, just reap any pending
3488 * mcc completions */
3489 if (!netif_running(adapter->netdev)) {
10ef9ab4 3490 be_process_mcc(adapter);
d8110f62
PR
3491 goto reschedule;
3492 }
3493
3494 if (!adapter->stats_cmd_sent) {
3495 if (lancer_chip(adapter))
3496 lancer_cmd_get_pport_stats(adapter,
3497 &adapter->stats_cmd);
3498 else
3499 be_cmd_get_stats(adapter, &adapter->stats_cmd);
3500 }
3501
3502 for_all_rx_queues(adapter, rxo, i) {
d8110f62
PR
3503 if (rxo->rx_post_starved) {
3504 rxo->rx_post_starved = false;
3505 be_post_rx_frags(rxo, GFP_KERNEL);
3506 }
3507 }
3508
10ef9ab4
SP
3509 for_all_evt_queues(adapter, eqo, i)
3510 be_eqd_update(adapter, eqo);
3511
d8110f62
PR
3512reschedule:
3513 adapter->work_counter++;
3514 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
3515}
3516
6b7c5b94
SP
3517static int __devinit be_probe(struct pci_dev *pdev,
3518 const struct pci_device_id *pdev_id)
3519{
3520 int status = 0;
3521 struct be_adapter *adapter;
3522 struct net_device *netdev;
6b7c5b94
SP
3523
3524 status = pci_enable_device(pdev);
3525 if (status)
3526 goto do_none;
3527
3528 status = pci_request_regions(pdev, DRV_NAME);
3529 if (status)
3530 goto disable_dev;
3531 pci_set_master(pdev);
3532
3c8def97 3533 netdev = alloc_etherdev_mq(sizeof(struct be_adapter), MAX_TX_QS);
6b7c5b94
SP
3534 if (netdev == NULL) {
3535 status = -ENOMEM;
3536 goto rel_reg;
3537 }
3538 adapter = netdev_priv(netdev);
3539 adapter->pdev = pdev;
3540 pci_set_drvdata(pdev, adapter);
fe6d2a38
SP
3541
3542 status = be_dev_family_check(adapter);
63657b9c 3543 if (status)
fe6d2a38
SP
3544 goto free_netdev;
3545
6b7c5b94 3546 adapter->netdev = netdev;
2243e2e9 3547 SET_NETDEV_DEV(netdev, &pdev->dev);
6b7c5b94 3548
2b7bcebf 3549 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
6b7c5b94
SP
3550 if (!status) {
3551 netdev->features |= NETIF_F_HIGHDMA;
3552 } else {
2b7bcebf 3553 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
6b7c5b94
SP
3554 if (status) {
3555 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
3556 goto free_netdev;
3557 }
3558 }
3559
f9449ab7
SP
3560 status = be_sriov_enable(adapter);
3561 if (status)
3562 goto free_netdev;
ba343c77 3563
6b7c5b94
SP
3564 status = be_ctrl_init(adapter);
3565 if (status)
f9449ab7 3566 goto disable_sriov;
6b7c5b94 3567
37eed1cb 3568 if (lancer_chip(adapter)) {
d8110f62
PR
3569 status = lancer_wait_ready(adapter);
3570 if (!status) {
3571 iowrite32(SLI_PORT_CONTROL_IP_MASK,
3572 adapter->db + SLIPORT_CONTROL_OFFSET);
3573 status = lancer_test_and_set_rdy_state(adapter);
3574 }
37eed1cb
PR
3575 if (status) {
3576 dev_err(&pdev->dev, "Adapter in non recoverable error\n");
48f5a191 3577 goto ctrl_clean;
37eed1cb
PR
3578 }
3579 }
3580
2243e2e9 3581 /* sync up with fw's ready state */
ba343c77
SB
3582 if (be_physfn(adapter)) {
3583 status = be_cmd_POST(adapter);
3584 if (status)
3585 goto ctrl_clean;
ba343c77 3586 }
6b7c5b94 3587
2243e2e9
SP
3588 /* tell fw we're ready to fire cmds */
3589 status = be_cmd_fw_init(adapter);
6b7c5b94 3590 if (status)
2243e2e9
SP
3591 goto ctrl_clean;
3592
a4b4dfab
AK
3593 status = be_cmd_reset_function(adapter);
3594 if (status)
3595 goto ctrl_clean;
556ae191 3596
10ef9ab4
SP
3597 /* The INTR bit may be set in the card when probed by a kdump kernel
3598 * after a crash.
3599 */
3600 if (!lancer_chip(adapter))
3601 be_intr_set(adapter, false);
3602
2243e2e9
SP
3603 status = be_stats_init(adapter);
3604 if (status)
3605 goto ctrl_clean;
3606
3607 status = be_get_config(adapter);
6b7c5b94
SP
3608 if (status)
3609 goto stats_clean;
6b7c5b94
SP
3610
3611 INIT_DELAYED_WORK(&adapter->work, be_worker);
a54769f5 3612 adapter->rx_fc = adapter->tx_fc = true;
6b7c5b94 3613
5fb379ee
SP
3614 status = be_setup(adapter);
3615 if (status)
3abcdeda 3616 goto msix_disable;
2243e2e9 3617
3abcdeda 3618 be_netdev_init(netdev);
6b7c5b94
SP
3619 status = register_netdev(netdev);
3620 if (status != 0)
5fb379ee 3621 goto unsetup;
6b7c5b94 3622
10ef9ab4
SP
3623 dev_info(&pdev->dev, "%s: %s port %d\n", netdev->name, nic_name(pdev),
3624 adapter->port_num);
34b1ef04 3625
6b7c5b94
SP
3626 return 0;
3627
5fb379ee
SP
3628unsetup:
3629 be_clear(adapter);
3abcdeda
SP
3630msix_disable:
3631 be_msix_disable(adapter);
6b7c5b94
SP
3632stats_clean:
3633 be_stats_cleanup(adapter);
3634ctrl_clean:
3635 be_ctrl_cleanup(adapter);
f9449ab7 3636disable_sriov:
ba343c77 3637 be_sriov_disable(adapter);
f9449ab7 3638free_netdev:
fe6d2a38 3639 free_netdev(netdev);
8d56ff11 3640 pci_set_drvdata(pdev, NULL);
6b7c5b94
SP
3641rel_reg:
3642 pci_release_regions(pdev);
3643disable_dev:
3644 pci_disable_device(pdev);
3645do_none:
c4ca2374 3646 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
6b7c5b94
SP
3647 return status;
3648}
3649
3650static int be_suspend(struct pci_dev *pdev, pm_message_t state)
3651{
3652 struct be_adapter *adapter = pci_get_drvdata(pdev);
3653 struct net_device *netdev = adapter->netdev;
3654
71d8d1b5
AK
3655 if (adapter->wol)
3656 be_setup_wol(adapter, true);
3657
6b7c5b94
SP
3658 netif_device_detach(netdev);
3659 if (netif_running(netdev)) {
3660 rtnl_lock();
3661 be_close(netdev);
3662 rtnl_unlock();
3663 }
9b0365f1 3664 be_clear(adapter);
6b7c5b94
SP
3665
3666 pci_save_state(pdev);
3667 pci_disable_device(pdev);
3668 pci_set_power_state(pdev, pci_choose_state(pdev, state));
3669 return 0;
3670}
3671
3672static int be_resume(struct pci_dev *pdev)
3673{
3674 int status = 0;
3675 struct be_adapter *adapter = pci_get_drvdata(pdev);
3676 struct net_device *netdev = adapter->netdev;
3677
3678 netif_device_detach(netdev);
3679
3680 status = pci_enable_device(pdev);
3681 if (status)
3682 return status;
3683
3684 pci_set_power_state(pdev, 0);
3685 pci_restore_state(pdev);
3686
2243e2e9
SP
3687 /* tell fw we're ready to fire cmds */
3688 status = be_cmd_fw_init(adapter);
3689 if (status)
3690 return status;
3691
9b0365f1 3692 be_setup(adapter);
6b7c5b94
SP
3693 if (netif_running(netdev)) {
3694 rtnl_lock();
3695 be_open(netdev);
3696 rtnl_unlock();
3697 }
3698 netif_device_attach(netdev);
71d8d1b5
AK
3699
3700 if (adapter->wol)
3701 be_setup_wol(adapter, false);
a4ca055f 3702
6b7c5b94
SP
3703 return 0;
3704}
3705
82456b03
SP
3706/*
3707 * An FLR will stop BE from DMAing any data.
3708 */
3709static void be_shutdown(struct pci_dev *pdev)
3710{
3711 struct be_adapter *adapter = pci_get_drvdata(pdev);
82456b03 3712
2d5d4154
AK
3713 if (!adapter)
3714 return;
82456b03 3715
0f4a6828 3716 cancel_delayed_work_sync(&adapter->work);
a4ca055f 3717
2d5d4154 3718 netif_device_detach(adapter->netdev);
82456b03 3719
82456b03
SP
3720 if (adapter->wol)
3721 be_setup_wol(adapter, true);
3722
57841869
AK
3723 be_cmd_reset_function(adapter);
3724
82456b03 3725 pci_disable_device(pdev);
82456b03
SP
3726}
3727
cf588477
SP
3728static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
3729 pci_channel_state_t state)
3730{
3731 struct be_adapter *adapter = pci_get_drvdata(pdev);
3732 struct net_device *netdev = adapter->netdev;
3733
3734 dev_err(&adapter->pdev->dev, "EEH error detected\n");
3735
3736 adapter->eeh_err = true;
3737
3738 netif_device_detach(netdev);
3739
3740 if (netif_running(netdev)) {
3741 rtnl_lock();
3742 be_close(netdev);
3743 rtnl_unlock();
3744 }
3745 be_clear(adapter);
3746
3747 if (state == pci_channel_io_perm_failure)
3748 return PCI_ERS_RESULT_DISCONNECT;
3749
3750 pci_disable_device(pdev);
3751
3752 return PCI_ERS_RESULT_NEED_RESET;
3753}
3754
3755static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
3756{
3757 struct be_adapter *adapter = pci_get_drvdata(pdev);
3758 int status;
3759
3760 dev_info(&adapter->pdev->dev, "EEH reset\n");
3761 adapter->eeh_err = false;
6589ade0
SP
3762 adapter->ue_detected = false;
3763 adapter->fw_timeout = false;
cf588477
SP
3764
3765 status = pci_enable_device(pdev);
3766 if (status)
3767 return PCI_ERS_RESULT_DISCONNECT;
3768
3769 pci_set_master(pdev);
3770 pci_set_power_state(pdev, 0);
3771 pci_restore_state(pdev);
3772
3773 /* Check if card is ok and fw is ready */
3774 status = be_cmd_POST(adapter);
3775 if (status)
3776 return PCI_ERS_RESULT_DISCONNECT;
3777
3778 return PCI_ERS_RESULT_RECOVERED;
3779}
3780
3781static void be_eeh_resume(struct pci_dev *pdev)
3782{
3783 int status = 0;
3784 struct be_adapter *adapter = pci_get_drvdata(pdev);
3785 struct net_device *netdev = adapter->netdev;
3786
3787 dev_info(&adapter->pdev->dev, "EEH resume\n");
3788
3789 pci_save_state(pdev);
3790
3791 /* tell fw we're ready to fire cmds */
3792 status = be_cmd_fw_init(adapter);
3793 if (status)
3794 goto err;
3795
3796 status = be_setup(adapter);
3797 if (status)
3798 goto err;
3799
3800 if (netif_running(netdev)) {
3801 status = be_open(netdev);
3802 if (status)
3803 goto err;
3804 }
3805 netif_device_attach(netdev);
3806 return;
3807err:
3808 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
cf588477
SP
3809}
3810
3811static struct pci_error_handlers be_eeh_handlers = {
3812 .error_detected = be_eeh_err_detected,
3813 .slot_reset = be_eeh_reset,
3814 .resume = be_eeh_resume,
3815};
3816
6b7c5b94
SP
3817static struct pci_driver be_driver = {
3818 .name = DRV_NAME,
3819 .id_table = be_dev_ids,
3820 .probe = be_probe,
3821 .remove = be_remove,
3822 .suspend = be_suspend,
cf588477 3823 .resume = be_resume,
82456b03 3824 .shutdown = be_shutdown,
cf588477 3825 .err_handler = &be_eeh_handlers
6b7c5b94
SP
3826};
3827
3828static int __init be_init_module(void)
3829{
8e95a202
JP
3830 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
3831 rx_frag_size != 2048) {
6b7c5b94
SP
3832 printk(KERN_WARNING DRV_NAME
3833 " : Module param rx_frag_size must be 2048/4096/8192."
3834 " Using 2048\n");
3835 rx_frag_size = 2048;
3836 }
6b7c5b94
SP
3837
3838 return pci_register_driver(&be_driver);
3839}
3840module_init(be_init_module);
3841
3842static void __exit be_exit_module(void)
3843{
3844 pci_unregister_driver(&be_driver);
3845}
3846module_exit(be_exit_module);