l2tp: fix a race in l2tp_ip_sendmsg()
[linux-2.6-block.git] / drivers / net / ethernet / emulex / benet / be_main.c
CommitLineData
6b7c5b94 1/*
d2145cde 2 * Copyright (C) 2005 - 2011 Emulex
6b7c5b94
SP
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
d2145cde 11 * linux-drivers@emulex.com
6b7c5b94 12 *
d2145cde
AK
13 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
6b7c5b94
SP
16 */
17
70c71606 18#include <linux/prefetch.h>
9d9779e7 19#include <linux/module.h>
6b7c5b94 20#include "be.h"
8788fdc2 21#include "be_cmds.h"
65f71b8b 22#include <asm/div64.h>
6b7c5b94
SP
23
24MODULE_VERSION(DRV_VER);
25MODULE_DEVICE_TABLE(pci, be_dev_ids);
26MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
27MODULE_AUTHOR("ServerEngines Corporation");
28MODULE_LICENSE("GPL");
29
ba343c77 30static unsigned int num_vfs;
ba343c77 31module_param(num_vfs, uint, S_IRUGO);
ba343c77 32MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
6b7c5b94 33
11ac75ed
SP
34static ushort rx_frag_size = 2048;
35module_param(rx_frag_size, ushort, S_IRUGO);
36MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
37
6b7c5b94 38static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
c4ca2374 39 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
59fd5d87 40 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
c4ca2374
AK
41 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
42 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
fe6d2a38 43 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
12f4d0a8 44 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
ecedb6ae 45 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
6b7c5b94
SP
46 { 0 }
47};
48MODULE_DEVICE_TABLE(pci, be_dev_ids);
7c185276 49/* UE Status Low CSR */
42c8b11e 50static const char * const ue_status_low_desc[] = {
7c185276
AK
51 "CEV",
52 "CTX",
53 "DBUF",
54 "ERX",
55 "Host",
56 "MPU",
57 "NDMA",
58 "PTC ",
59 "RDMA ",
60 "RXF ",
61 "RXIPS ",
62 "RXULP0 ",
63 "RXULP1 ",
64 "RXULP2 ",
65 "TIM ",
66 "TPOST ",
67 "TPRE ",
68 "TXIPS ",
69 "TXULP0 ",
70 "TXULP1 ",
71 "UC ",
72 "WDMA ",
73 "TXULP2 ",
74 "HOST1 ",
75 "P0_OB_LINK ",
76 "P1_OB_LINK ",
77 "HOST_GPIO ",
78 "MBOX ",
79 "AXGMAC0",
80 "AXGMAC1",
81 "JTAG",
82 "MPU_INTPEND"
83};
84/* UE Status High CSR */
42c8b11e 85static const char * const ue_status_hi_desc[] = {
7c185276
AK
86 "LPCMEMHOST",
87 "MGMT_MAC",
88 "PCS0ONLINE",
89 "MPU_IRAM",
90 "PCS1ONLINE",
91 "PCTL0",
92 "PCTL1",
93 "PMEM",
94 "RR",
95 "TXPB",
96 "RXPP",
97 "XAUI",
98 "TXP",
99 "ARM",
100 "IPC",
101 "HOST2",
102 "HOST3",
103 "HOST4",
104 "HOST5",
105 "HOST6",
106 "HOST7",
107 "HOST8",
108 "HOST9",
42c8b11e 109 "NETC",
7c185276
AK
110 "Unknown",
111 "Unknown",
112 "Unknown",
113 "Unknown",
114 "Unknown",
115 "Unknown",
116 "Unknown",
117 "Unknown"
118};
6b7c5b94 119
752961a1
SP
120/* Is BE in a multi-channel mode */
121static inline bool be_is_mc(struct be_adapter *adapter) {
122 return (adapter->function_mode & FLEX10_MODE ||
123 adapter->function_mode & VNIC_MODE ||
124 adapter->function_mode & UMC_ENABLED);
125}
126
6b7c5b94
SP
127static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
128{
129 struct be_dma_mem *mem = &q->dma_mem;
1cfafab9 130 if (mem->va) {
2b7bcebf
IV
131 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
132 mem->dma);
1cfafab9
SP
133 mem->va = NULL;
134 }
6b7c5b94
SP
135}
136
137static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
138 u16 len, u16 entry_size)
139{
140 struct be_dma_mem *mem = &q->dma_mem;
141
142 memset(q, 0, sizeof(*q));
143 q->len = len;
144 q->entry_size = entry_size;
145 mem->size = len * entry_size;
2b7bcebf
IV
146 mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
147 GFP_KERNEL);
6b7c5b94 148 if (!mem->va)
10ef9ab4 149 return -ENOMEM;
6b7c5b94
SP
150 memset(mem->va, 0, mem->size);
151 return 0;
152}
153
8788fdc2 154static void be_intr_set(struct be_adapter *adapter, bool enable)
6b7c5b94 155{
db3ea781 156 u32 reg, enabled;
5f0b849e 157
cf588477
SP
158 if (adapter->eeh_err)
159 return;
160
db3ea781
SP
161 pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
162 &reg);
163 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
164
5f0b849e 165 if (!enabled && enable)
6b7c5b94 166 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
5f0b849e 167 else if (enabled && !enable)
6b7c5b94 168 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
5f0b849e 169 else
6b7c5b94 170 return;
5f0b849e 171
db3ea781
SP
172 pci_write_config_dword(adapter->pdev,
173 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
6b7c5b94
SP
174}
175
8788fdc2 176static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
6b7c5b94
SP
177{
178 u32 val = 0;
179 val |= qid & DB_RQ_RING_ID_MASK;
180 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
f3eb62d2
SP
181
182 wmb();
8788fdc2 183 iowrite32(val, adapter->db + DB_RQ_OFFSET);
6b7c5b94
SP
184}
185
8788fdc2 186static void be_txq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
6b7c5b94
SP
187{
188 u32 val = 0;
189 val |= qid & DB_TXULP_RING_ID_MASK;
190 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
f3eb62d2
SP
191
192 wmb();
8788fdc2 193 iowrite32(val, adapter->db + DB_TXULP1_OFFSET);
6b7c5b94
SP
194}
195
8788fdc2 196static void be_eq_notify(struct be_adapter *adapter, u16 qid,
6b7c5b94
SP
197 bool arm, bool clear_int, u16 num_popped)
198{
199 u32 val = 0;
200 val |= qid & DB_EQ_RING_ID_MASK;
fe6d2a38
SP
201 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
202 DB_EQ_RING_ID_EXT_MASK_SHIFT);
cf588477
SP
203
204 if (adapter->eeh_err)
205 return;
206
6b7c5b94
SP
207 if (arm)
208 val |= 1 << DB_EQ_REARM_SHIFT;
209 if (clear_int)
210 val |= 1 << DB_EQ_CLR_SHIFT;
211 val |= 1 << DB_EQ_EVNT_SHIFT;
212 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
8788fdc2 213 iowrite32(val, adapter->db + DB_EQ_OFFSET);
6b7c5b94
SP
214}
215
8788fdc2 216void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
6b7c5b94
SP
217{
218 u32 val = 0;
219 val |= qid & DB_CQ_RING_ID_MASK;
fe6d2a38
SP
220 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
221 DB_CQ_RING_ID_EXT_MASK_SHIFT);
cf588477
SP
222
223 if (adapter->eeh_err)
224 return;
225
6b7c5b94
SP
226 if (arm)
227 val |= 1 << DB_CQ_REARM_SHIFT;
228 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
8788fdc2 229 iowrite32(val, adapter->db + DB_CQ_OFFSET);
6b7c5b94
SP
230}
231
6b7c5b94
SP
232static int be_mac_addr_set(struct net_device *netdev, void *p)
233{
234 struct be_adapter *adapter = netdev_priv(netdev);
235 struct sockaddr *addr = p;
236 int status = 0;
e3a7ae2c 237 u8 current_mac[ETH_ALEN];
fbc13f01 238 u32 pmac_id = adapter->pmac_id[0];
6b7c5b94 239
ca9e4988
AK
240 if (!is_valid_ether_addr(addr->sa_data))
241 return -EADDRNOTAVAIL;
242
e3a7ae2c 243 status = be_cmd_mac_addr_query(adapter, current_mac,
590c391d
PR
244 MAC_ADDRESS_TYPE_NETWORK, false,
245 adapter->if_handle, 0);
a65027e4 246 if (status)
e3a7ae2c 247 goto err;
6b7c5b94 248
e3a7ae2c
SK
249 if (memcmp(addr->sa_data, current_mac, ETH_ALEN)) {
250 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
fbc13f01 251 adapter->if_handle, &adapter->pmac_id[0], 0);
e3a7ae2c
SK
252 if (status)
253 goto err;
6b7c5b94 254
e3a7ae2c
SK
255 be_cmd_pmac_del(adapter, adapter->if_handle, pmac_id, 0);
256 }
257 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
258 return 0;
259err:
260 dev_err(&adapter->pdev->dev, "MAC %pM set Failed\n", addr->sa_data);
6b7c5b94
SP
261 return status;
262}
263
89a88ab8
AK
264static void populate_be2_stats(struct be_adapter *adapter)
265{
ac124ff9
SP
266 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
267 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
268 struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
89a88ab8 269 struct be_port_rxf_stats_v0 *port_stats =
ac124ff9
SP
270 &rxf_stats->port[adapter->port_num];
271 struct be_drv_stats *drvs = &adapter->drv_stats;
89a88ab8 272
ac124ff9 273 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
89a88ab8
AK
274 drvs->rx_pause_frames = port_stats->rx_pause_frames;
275 drvs->rx_crc_errors = port_stats->rx_crc_errors;
276 drvs->rx_control_frames = port_stats->rx_control_frames;
277 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
278 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
279 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
280 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
281 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
282 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
283 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
284 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
285 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
286 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
287 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
ac124ff9 288 drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
89a88ab8
AK
289 drvs->rx_dropped_header_too_small =
290 port_stats->rx_dropped_header_too_small;
d45b9d39
SP
291 drvs->rx_address_mismatch_drops =
292 port_stats->rx_address_mismatch_drops +
293 port_stats->rx_vlan_mismatch_drops;
89a88ab8
AK
294 drvs->rx_alignment_symbol_errors =
295 port_stats->rx_alignment_symbol_errors;
296
297 drvs->tx_pauseframes = port_stats->tx_pauseframes;
298 drvs->tx_controlframes = port_stats->tx_controlframes;
299
300 if (adapter->port_num)
ac124ff9 301 drvs->jabber_events = rxf_stats->port1_jabber_events;
89a88ab8 302 else
ac124ff9 303 drvs->jabber_events = rxf_stats->port0_jabber_events;
89a88ab8 304 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
89a88ab8 305 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
89a88ab8
AK
306 drvs->forwarded_packets = rxf_stats->forwarded_packets;
307 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
ac124ff9
SP
308 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
309 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
89a88ab8
AK
310 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
311}
312
313static void populate_be3_stats(struct be_adapter *adapter)
314{
ac124ff9
SP
315 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
316 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
317 struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
89a88ab8 318 struct be_port_rxf_stats_v1 *port_stats =
ac124ff9
SP
319 &rxf_stats->port[adapter->port_num];
320 struct be_drv_stats *drvs = &adapter->drv_stats;
89a88ab8 321
ac124ff9 322 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
02fe7027
AK
323 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
324 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
89a88ab8
AK
325 drvs->rx_pause_frames = port_stats->rx_pause_frames;
326 drvs->rx_crc_errors = port_stats->rx_crc_errors;
327 drvs->rx_control_frames = port_stats->rx_control_frames;
328 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
329 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
330 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
331 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
332 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
333 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
334 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
335 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
336 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
337 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
338 drvs->rx_dropped_header_too_small =
339 port_stats->rx_dropped_header_too_small;
340 drvs->rx_input_fifo_overflow_drop =
341 port_stats->rx_input_fifo_overflow_drop;
d45b9d39 342 drvs->rx_address_mismatch_drops = port_stats->rx_address_mismatch_drops;
89a88ab8
AK
343 drvs->rx_alignment_symbol_errors =
344 port_stats->rx_alignment_symbol_errors;
ac124ff9 345 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
89a88ab8
AK
346 drvs->tx_pauseframes = port_stats->tx_pauseframes;
347 drvs->tx_controlframes = port_stats->tx_controlframes;
348 drvs->jabber_events = port_stats->jabber_events;
349 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
89a88ab8 350 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
89a88ab8
AK
351 drvs->forwarded_packets = rxf_stats->forwarded_packets;
352 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
ac124ff9
SP
353 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
354 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
89a88ab8
AK
355 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
356}
357
005d5696
SX
358static void populate_lancer_stats(struct be_adapter *adapter)
359{
89a88ab8 360
005d5696 361 struct be_drv_stats *drvs = &adapter->drv_stats;
ac124ff9
SP
362 struct lancer_pport_stats *pport_stats =
363 pport_stats_from_cmd(adapter);
364
365 be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
366 drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
367 drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
368 drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
005d5696 369 drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
ac124ff9 370 drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
005d5696
SX
371 drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
372 drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
373 drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
374 drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
375 drvs->rx_dropped_tcp_length =
376 pport_stats->rx_dropped_invalid_tcp_length;
377 drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
378 drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
379 drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
380 drvs->rx_dropped_header_too_small =
381 pport_stats->rx_dropped_header_too_small;
382 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
d45b9d39
SP
383 drvs->rx_address_mismatch_drops =
384 pport_stats->rx_address_mismatch_drops +
385 pport_stats->rx_vlan_mismatch_drops;
ac124ff9 386 drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
005d5696 387 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
ac124ff9
SP
388 drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
389 drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
005d5696 390 drvs->jabber_events = pport_stats->rx_jabbers;
ac124ff9
SP
391 drvs->forwarded_packets = pport_stats->num_forwards_lo;
392 drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
005d5696 393 drvs->rx_drops_too_many_frags =
ac124ff9 394 pport_stats->rx_drops_too_many_frags_lo;
005d5696 395}
89a88ab8 396
09c1c68f
SP
397static void accumulate_16bit_val(u32 *acc, u16 val)
398{
399#define lo(x) (x & 0xFFFF)
400#define hi(x) (x & 0xFFFF0000)
401 bool wrapped = val < lo(*acc);
402 u32 newacc = hi(*acc) + val;
403
404 if (wrapped)
405 newacc += 65536;
406 ACCESS_ONCE(*acc) = newacc;
407}
408
89a88ab8
AK
409void be_parse_stats(struct be_adapter *adapter)
410{
ac124ff9
SP
411 struct be_erx_stats_v1 *erx = be_erx_stats_from_cmd(adapter);
412 struct be_rx_obj *rxo;
413 int i;
414
005d5696
SX
415 if (adapter->generation == BE_GEN3) {
416 if (lancer_chip(adapter))
417 populate_lancer_stats(adapter);
418 else
419 populate_be3_stats(adapter);
420 } else {
89a88ab8 421 populate_be2_stats(adapter);
005d5696 422 }
ac124ff9 423
d51ebd33
PR
424 if (lancer_chip(adapter))
425 goto done;
426
ac124ff9 427 /* as erx_v1 is longer than v0, ok to use v1 defn for v0 access */
09c1c68f
SP
428 for_all_rx_queues(adapter, rxo, i) {
429 /* below erx HW counter can actually wrap around after
430 * 65535. Driver accumulates a 32-bit value
431 */
432 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
433 (u16)erx->rx_drops_no_fragments[rxo->q.id]);
434 }
d51ebd33
PR
435done:
436 return;
89a88ab8
AK
437}
438
ab1594e9
SP
439static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
440 struct rtnl_link_stats64 *stats)
6b7c5b94 441{
ab1594e9 442 struct be_adapter *adapter = netdev_priv(netdev);
89a88ab8 443 struct be_drv_stats *drvs = &adapter->drv_stats;
3abcdeda 444 struct be_rx_obj *rxo;
3c8def97 445 struct be_tx_obj *txo;
ab1594e9
SP
446 u64 pkts, bytes;
447 unsigned int start;
3abcdeda 448 int i;
6b7c5b94 449
3abcdeda 450 for_all_rx_queues(adapter, rxo, i) {
ab1594e9
SP
451 const struct be_rx_stats *rx_stats = rx_stats(rxo);
452 do {
453 start = u64_stats_fetch_begin_bh(&rx_stats->sync);
454 pkts = rx_stats(rxo)->rx_pkts;
455 bytes = rx_stats(rxo)->rx_bytes;
456 } while (u64_stats_fetch_retry_bh(&rx_stats->sync, start));
457 stats->rx_packets += pkts;
458 stats->rx_bytes += bytes;
459 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
460 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
461 rx_stats(rxo)->rx_drops_no_frags;
3abcdeda
SP
462 }
463
3c8def97 464 for_all_tx_queues(adapter, txo, i) {
ab1594e9
SP
465 const struct be_tx_stats *tx_stats = tx_stats(txo);
466 do {
467 start = u64_stats_fetch_begin_bh(&tx_stats->sync);
468 pkts = tx_stats(txo)->tx_pkts;
469 bytes = tx_stats(txo)->tx_bytes;
470 } while (u64_stats_fetch_retry_bh(&tx_stats->sync, start));
471 stats->tx_packets += pkts;
472 stats->tx_bytes += bytes;
3c8def97 473 }
6b7c5b94
SP
474
475 /* bad pkts received */
ab1594e9 476 stats->rx_errors = drvs->rx_crc_errors +
89a88ab8
AK
477 drvs->rx_alignment_symbol_errors +
478 drvs->rx_in_range_errors +
479 drvs->rx_out_range_errors +
480 drvs->rx_frame_too_long +
481 drvs->rx_dropped_too_small +
482 drvs->rx_dropped_too_short +
483 drvs->rx_dropped_header_too_small +
484 drvs->rx_dropped_tcp_length +
ab1594e9 485 drvs->rx_dropped_runt;
68110868 486
6b7c5b94 487 /* detailed rx errors */
ab1594e9 488 stats->rx_length_errors = drvs->rx_in_range_errors +
89a88ab8
AK
489 drvs->rx_out_range_errors +
490 drvs->rx_frame_too_long;
68110868 491
ab1594e9 492 stats->rx_crc_errors = drvs->rx_crc_errors;
6b7c5b94
SP
493
494 /* frame alignment errors */
ab1594e9 495 stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
68110868 496
6b7c5b94
SP
497 /* receiver fifo overrun */
498 /* drops_no_pbuf is no per i/f, it's per BE card */
ab1594e9 499 stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
89a88ab8
AK
500 drvs->rx_input_fifo_overflow_drop +
501 drvs->rx_drops_no_pbuf;
ab1594e9 502 return stats;
6b7c5b94
SP
503}
504
b236916a 505void be_link_status_update(struct be_adapter *adapter, u8 link_status)
6b7c5b94 506{
6b7c5b94
SP
507 struct net_device *netdev = adapter->netdev;
508
b236916a 509 if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
ea172a01 510 netif_carrier_off(netdev);
b236916a 511 adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
6b7c5b94 512 }
b236916a
AK
513
514 if ((link_status & LINK_STATUS_MASK) == LINK_UP)
515 netif_carrier_on(netdev);
516 else
517 netif_carrier_off(netdev);
6b7c5b94
SP
518}
519
3c8def97 520static void be_tx_stats_update(struct be_tx_obj *txo,
91992e44 521 u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
6b7c5b94 522{
3c8def97
SP
523 struct be_tx_stats *stats = tx_stats(txo);
524
ab1594e9 525 u64_stats_update_begin(&stats->sync);
ac124ff9
SP
526 stats->tx_reqs++;
527 stats->tx_wrbs += wrb_cnt;
528 stats->tx_bytes += copied;
529 stats->tx_pkts += (gso_segs ? gso_segs : 1);
6b7c5b94 530 if (stopped)
ac124ff9 531 stats->tx_stops++;
ab1594e9 532 u64_stats_update_end(&stats->sync);
6b7c5b94
SP
533}
534
535/* Determine number of WRB entries needed to xmit data in an skb */
fe6d2a38
SP
536static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
537 bool *dummy)
6b7c5b94 538{
ebc8d2ab
DM
539 int cnt = (skb->len > skb->data_len);
540
541 cnt += skb_shinfo(skb)->nr_frags;
542
6b7c5b94
SP
543 /* to account for hdr wrb */
544 cnt++;
fe6d2a38
SP
545 if (lancer_chip(adapter) || !(cnt & 1)) {
546 *dummy = false;
547 } else {
6b7c5b94
SP
548 /* add a dummy to make it an even num */
549 cnt++;
550 *dummy = true;
fe6d2a38 551 }
6b7c5b94
SP
552 BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
553 return cnt;
554}
555
556static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
557{
558 wrb->frag_pa_hi = upper_32_bits(addr);
559 wrb->frag_pa_lo = addr & 0xFFFFFFFF;
560 wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
561}
562
1ded132d
AK
563static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
564 struct sk_buff *skb)
565{
566 u8 vlan_prio;
567 u16 vlan_tag;
568
569 vlan_tag = vlan_tx_tag_get(skb);
570 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
571 /* If vlan priority provided by OS is NOT in available bmap */
572 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
573 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
574 adapter->recommended_prio;
575
576 return vlan_tag;
577}
578
cc4ce020
SK
579static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
580 struct sk_buff *skb, u32 wrb_cnt, u32 len)
6b7c5b94 581{
1ded132d 582 u16 vlan_tag;
cc4ce020 583
6b7c5b94
SP
584 memset(hdr, 0, sizeof(*hdr));
585
586 AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
587
49e4b847 588 if (skb_is_gso(skb)) {
6b7c5b94
SP
589 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
590 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
591 hdr, skb_shinfo(skb)->gso_size);
fe6d2a38 592 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
49e4b847 593 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
fe6d2a38
SP
594 if (lancer_chip(adapter) && adapter->sli_family ==
595 LANCER_A0_SLI_FAMILY) {
596 AMAP_SET_BITS(struct amap_eth_hdr_wrb, ipcs, hdr, 1);
597 if (is_tcp_pkt(skb))
598 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
599 tcpcs, hdr, 1);
600 else if (is_udp_pkt(skb))
601 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
602 udpcs, hdr, 1);
603 }
6b7c5b94
SP
604 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
605 if (is_tcp_pkt(skb))
606 AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
607 else if (is_udp_pkt(skb))
608 AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
609 }
610
4c5102f9 611 if (vlan_tx_tag_present(skb)) {
6b7c5b94 612 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
1ded132d 613 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
cc4ce020 614 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
6b7c5b94
SP
615 }
616
617 AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
618 AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, 1);
619 AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
620 AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
621}
622
2b7bcebf 623static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
7101e111
SP
624 bool unmap_single)
625{
626 dma_addr_t dma;
627
628 be_dws_le_to_cpu(wrb, sizeof(*wrb));
629
630 dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
b681ee77 631 if (wrb->frag_len) {
7101e111 632 if (unmap_single)
2b7bcebf
IV
633 dma_unmap_single(dev, dma, wrb->frag_len,
634 DMA_TO_DEVICE);
7101e111 635 else
2b7bcebf 636 dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
7101e111
SP
637 }
638}
6b7c5b94 639
3c8def97 640static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
6b7c5b94
SP
641 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb)
642{
7101e111
SP
643 dma_addr_t busaddr;
644 int i, copied = 0;
2b7bcebf 645 struct device *dev = &adapter->pdev->dev;
6b7c5b94 646 struct sk_buff *first_skb = skb;
6b7c5b94
SP
647 struct be_eth_wrb *wrb;
648 struct be_eth_hdr_wrb *hdr;
7101e111
SP
649 bool map_single = false;
650 u16 map_head;
6b7c5b94 651
6b7c5b94
SP
652 hdr = queue_head_node(txq);
653 queue_head_inc(txq);
7101e111 654 map_head = txq->head;
6b7c5b94 655
ebc8d2ab 656 if (skb->len > skb->data_len) {
e743d313 657 int len = skb_headlen(skb);
2b7bcebf
IV
658 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
659 if (dma_mapping_error(dev, busaddr))
7101e111
SP
660 goto dma_err;
661 map_single = true;
ebc8d2ab
DM
662 wrb = queue_head_node(txq);
663 wrb_fill(wrb, busaddr, len);
664 be_dws_cpu_to_le(wrb, sizeof(*wrb));
665 queue_head_inc(txq);
666 copied += len;
667 }
6b7c5b94 668
ebc8d2ab 669 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
9e903e08 670 const struct skb_frag_struct *frag =
ebc8d2ab 671 &skb_shinfo(skb)->frags[i];
b061b39e 672 busaddr = skb_frag_dma_map(dev, frag, 0,
9e903e08 673 skb_frag_size(frag), DMA_TO_DEVICE);
2b7bcebf 674 if (dma_mapping_error(dev, busaddr))
7101e111 675 goto dma_err;
ebc8d2ab 676 wrb = queue_head_node(txq);
9e903e08 677 wrb_fill(wrb, busaddr, skb_frag_size(frag));
ebc8d2ab
DM
678 be_dws_cpu_to_le(wrb, sizeof(*wrb));
679 queue_head_inc(txq);
9e903e08 680 copied += skb_frag_size(frag);
6b7c5b94
SP
681 }
682
683 if (dummy_wrb) {
684 wrb = queue_head_node(txq);
685 wrb_fill(wrb, 0, 0);
686 be_dws_cpu_to_le(wrb, sizeof(*wrb));
687 queue_head_inc(txq);
688 }
689
cc4ce020 690 wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied);
6b7c5b94
SP
691 be_dws_cpu_to_le(hdr, sizeof(*hdr));
692
693 return copied;
7101e111
SP
694dma_err:
695 txq->head = map_head;
696 while (copied) {
697 wrb = queue_head_node(txq);
2b7bcebf 698 unmap_tx_frag(dev, wrb, map_single);
7101e111
SP
699 map_single = false;
700 copied -= wrb->frag_len;
701 queue_head_inc(txq);
702 }
703 return 0;
6b7c5b94
SP
704}
705
61357325 706static netdev_tx_t be_xmit(struct sk_buff *skb,
b31c50a7 707 struct net_device *netdev)
6b7c5b94
SP
708{
709 struct be_adapter *adapter = netdev_priv(netdev);
3c8def97
SP
710 struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
711 struct be_queue_info *txq = &txo->q;
6b7c5b94
SP
712 u32 wrb_cnt = 0, copied = 0;
713 u32 start = txq->head;
714 bool dummy_wrb, stopped = false;
715
1ded132d
AK
716 /* For vlan tagged pkts, BE
717 * 1) calculates checksum even when CSO is not requested
718 * 2) calculates checksum wrongly for padded pkt less than
719 * 60 bytes long.
720 * As a workaround disable TX vlan offloading in such cases.
721 */
722 if (unlikely(vlan_tx_tag_present(skb) &&
723 (skb->ip_summed != CHECKSUM_PARTIAL || skb->len <= 60))) {
724 skb = skb_share_check(skb, GFP_ATOMIC);
725 if (unlikely(!skb))
726 goto tx_drop;
727
728 skb = __vlan_put_tag(skb, be_get_tx_vlan_tag(adapter, skb));
729 if (unlikely(!skb))
730 goto tx_drop;
731
732 skb->vlan_tci = 0;
733 }
734
fe6d2a38 735 wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
6b7c5b94 736
3c8def97 737 copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb);
c190e3c8
AK
738 if (copied) {
739 /* record the sent skb in the sent_skb table */
3c8def97
SP
740 BUG_ON(txo->sent_skb_list[start]);
741 txo->sent_skb_list[start] = skb;
c190e3c8
AK
742
743 /* Ensure txq has space for the next skb; Else stop the queue
744 * *BEFORE* ringing the tx doorbell, so that we serialze the
745 * tx compls of the current transmit which'll wake up the queue
746 */
7101e111 747 atomic_add(wrb_cnt, &txq->used);
c190e3c8
AK
748 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
749 txq->len) {
3c8def97 750 netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
c190e3c8
AK
751 stopped = true;
752 }
6b7c5b94 753
c190e3c8 754 be_txq_notify(adapter, txq->id, wrb_cnt);
6b7c5b94 755
3c8def97 756 be_tx_stats_update(txo, wrb_cnt, copied,
91992e44 757 skb_shinfo(skb)->gso_segs, stopped);
c190e3c8
AK
758 } else {
759 txq->head = start;
760 dev_kfree_skb_any(skb);
6b7c5b94 761 }
1ded132d 762tx_drop:
6b7c5b94
SP
763 return NETDEV_TX_OK;
764}
765
766static int be_change_mtu(struct net_device *netdev, int new_mtu)
767{
768 struct be_adapter *adapter = netdev_priv(netdev);
769 if (new_mtu < BE_MIN_MTU ||
34a89b8c
AK
770 new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
771 (ETH_HLEN + ETH_FCS_LEN))) {
6b7c5b94
SP
772 dev_info(&adapter->pdev->dev,
773 "MTU must be between %d and %d bytes\n",
34a89b8c
AK
774 BE_MIN_MTU,
775 (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
6b7c5b94
SP
776 return -EINVAL;
777 }
778 dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
779 netdev->mtu, new_mtu);
780 netdev->mtu = new_mtu;
781 return 0;
782}
783
784/*
82903e4b
AK
785 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
786 * If the user configures more, place BE in vlan promiscuous mode.
6b7c5b94 787 */
1da87b7f 788static int be_vid_config(struct be_adapter *adapter, bool vf, u32 vf_num)
6b7c5b94 789{
11ac75ed 790 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf_num];
6b7c5b94
SP
791 u16 vtag[BE_NUM_VLANS_SUPPORTED];
792 u16 ntags = 0, i;
82903e4b 793 int status = 0;
1da87b7f
AK
794
795 if (vf) {
11ac75ed
SP
796 vtag[0] = cpu_to_le16(vf_cfg->vlan_tag);
797 status = be_cmd_vlan_config(adapter, vf_cfg->if_handle, vtag,
798 1, 1, 0);
1da87b7f 799 }
6b7c5b94 800
c0e64ef4
SP
801 /* No need to further configure vids if in promiscuous mode */
802 if (adapter->promiscuous)
803 return 0;
804
0fc16ebf
PR
805 if (adapter->vlans_added > adapter->max_vlans)
806 goto set_vlan_promisc;
807
808 /* Construct VLAN Table to give to HW */
809 for (i = 0; i < VLAN_N_VID; i++)
810 if (adapter->vlan_tag[i])
811 vtag[ntags++] = cpu_to_le16(i);
812
813 status = be_cmd_vlan_config(adapter, adapter->if_handle,
814 vtag, ntags, 1, 0);
815
816 /* Set to VLAN promisc mode as setting VLAN filter failed */
817 if (status) {
818 dev_info(&adapter->pdev->dev, "Exhausted VLAN HW filters.\n");
819 dev_info(&adapter->pdev->dev, "Disabling HW VLAN filtering.\n");
820 goto set_vlan_promisc;
6b7c5b94 821 }
1da87b7f 822
b31c50a7 823 return status;
0fc16ebf
PR
824
825set_vlan_promisc:
826 status = be_cmd_vlan_config(adapter, adapter->if_handle,
827 NULL, 0, 1, 1);
828 return status;
6b7c5b94
SP
829}
830
8e586137 831static int be_vlan_add_vid(struct net_device *netdev, u16 vid)
6b7c5b94
SP
832{
833 struct be_adapter *adapter = netdev_priv(netdev);
80817cbf 834 int status = 0;
6b7c5b94 835
80817cbf
AK
836 if (!be_physfn(adapter)) {
837 status = -EINVAL;
838 goto ret;
839 }
ba343c77 840
6b7c5b94 841 adapter->vlan_tag[vid] = 1;
82903e4b 842 if (adapter->vlans_added <= (adapter->max_vlans + 1))
80817cbf 843 status = be_vid_config(adapter, false, 0);
8e586137 844
80817cbf
AK
845 if (!status)
846 adapter->vlans_added++;
847 else
848 adapter->vlan_tag[vid] = 0;
849ret:
850 return status;
6b7c5b94
SP
851}
852
8e586137 853static int be_vlan_rem_vid(struct net_device *netdev, u16 vid)
6b7c5b94
SP
854{
855 struct be_adapter *adapter = netdev_priv(netdev);
80817cbf 856 int status = 0;
6b7c5b94 857
80817cbf
AK
858 if (!be_physfn(adapter)) {
859 status = -EINVAL;
860 goto ret;
861 }
ba343c77 862
6b7c5b94 863 adapter->vlan_tag[vid] = 0;
82903e4b 864 if (adapter->vlans_added <= adapter->max_vlans)
80817cbf 865 status = be_vid_config(adapter, false, 0);
8e586137 866
80817cbf
AK
867 if (!status)
868 adapter->vlans_added--;
869 else
870 adapter->vlan_tag[vid] = 1;
871ret:
872 return status;
6b7c5b94
SP
873}
874
a54769f5 875static void be_set_rx_mode(struct net_device *netdev)
6b7c5b94
SP
876{
877 struct be_adapter *adapter = netdev_priv(netdev);
0fc16ebf 878 int status;
6b7c5b94 879
24307eef 880 if (netdev->flags & IFF_PROMISC) {
5b8821b7 881 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
24307eef
SP
882 adapter->promiscuous = true;
883 goto done;
6b7c5b94
SP
884 }
885
25985edc 886 /* BE was previously in promiscuous mode; disable it */
24307eef
SP
887 if (adapter->promiscuous) {
888 adapter->promiscuous = false;
5b8821b7 889 be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
c0e64ef4
SP
890
891 if (adapter->vlans_added)
892 be_vid_config(adapter, false, 0);
6b7c5b94
SP
893 }
894
e7b909a6 895 /* Enable multicast promisc if num configured exceeds what we support */
4cd24eaf 896 if (netdev->flags & IFF_ALLMULTI ||
5b8821b7
SP
897 netdev_mc_count(netdev) > BE_MAX_MC) {
898 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
24307eef 899 goto done;
6b7c5b94 900 }
6b7c5b94 901
fbc13f01
AK
902 if (netdev_uc_count(netdev) != adapter->uc_macs) {
903 struct netdev_hw_addr *ha;
904 int i = 1; /* First slot is claimed by the Primary MAC */
905
906 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++) {
907 be_cmd_pmac_del(adapter, adapter->if_handle,
908 adapter->pmac_id[i], 0);
909 }
910
911 if (netdev_uc_count(netdev) > adapter->max_pmac_cnt) {
912 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
913 adapter->promiscuous = true;
914 goto done;
915 }
916
917 netdev_for_each_uc_addr(ha, adapter->netdev) {
918 adapter->uc_macs++; /* First slot is for Primary MAC */
919 be_cmd_pmac_add(adapter, (u8 *)ha->addr,
920 adapter->if_handle,
921 &adapter->pmac_id[adapter->uc_macs], 0);
922 }
923 }
924
0fc16ebf
PR
925 status = be_cmd_rx_filter(adapter, IFF_MULTICAST, ON);
926
927 /* Set to MCAST promisc mode if setting MULTICAST address fails */
928 if (status) {
929 dev_info(&adapter->pdev->dev, "Exhausted multicast HW filters.\n");
930 dev_info(&adapter->pdev->dev, "Disabling HW multicast filtering.\n");
931 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
932 }
24307eef
SP
933done:
934 return;
6b7c5b94
SP
935}
936
ba343c77
SB
937static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
938{
939 struct be_adapter *adapter = netdev_priv(netdev);
11ac75ed 940 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
ba343c77
SB
941 int status;
942
11ac75ed 943 if (!sriov_enabled(adapter))
ba343c77
SB
944 return -EPERM;
945
11ac75ed 946 if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
ba343c77
SB
947 return -EINVAL;
948
590c391d
PR
949 if (lancer_chip(adapter)) {
950 status = be_cmd_set_mac_list(adapter, mac, 1, vf + 1);
951 } else {
11ac75ed
SP
952 status = be_cmd_pmac_del(adapter, vf_cfg->if_handle,
953 vf_cfg->pmac_id, vf + 1);
ba343c77 954
11ac75ed
SP
955 status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
956 &vf_cfg->pmac_id, vf + 1);
590c391d
PR
957 }
958
64600ea5 959 if (status)
ba343c77
SB
960 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
961 mac, vf);
64600ea5 962 else
11ac75ed 963 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
64600ea5 964
ba343c77
SB
965 return status;
966}
967
64600ea5
AK
968static int be_get_vf_config(struct net_device *netdev, int vf,
969 struct ifla_vf_info *vi)
970{
971 struct be_adapter *adapter = netdev_priv(netdev);
11ac75ed 972 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
64600ea5 973
11ac75ed 974 if (!sriov_enabled(adapter))
64600ea5
AK
975 return -EPERM;
976
11ac75ed 977 if (vf >= adapter->num_vfs)
64600ea5
AK
978 return -EINVAL;
979
980 vi->vf = vf;
11ac75ed
SP
981 vi->tx_rate = vf_cfg->tx_rate;
982 vi->vlan = vf_cfg->vlan_tag;
64600ea5 983 vi->qos = 0;
11ac75ed 984 memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
64600ea5
AK
985
986 return 0;
987}
988
1da87b7f
AK
989static int be_set_vf_vlan(struct net_device *netdev,
990 int vf, u16 vlan, u8 qos)
991{
992 struct be_adapter *adapter = netdev_priv(netdev);
993 int status = 0;
994
11ac75ed 995 if (!sriov_enabled(adapter))
1da87b7f
AK
996 return -EPERM;
997
11ac75ed 998 if (vf >= adapter->num_vfs || vlan > 4095)
1da87b7f
AK
999 return -EINVAL;
1000
1001 if (vlan) {
f1f3ee1b
AK
1002 if (adapter->vf_cfg[vf].vlan_tag != vlan) {
1003 /* If this is new value, program it. Else skip. */
1004 adapter->vf_cfg[vf].vlan_tag = vlan;
1005
1006 status = be_cmd_set_hsw_config(adapter, vlan,
1007 vf + 1, adapter->vf_cfg[vf].if_handle);
1008 }
1da87b7f 1009 } else {
f1f3ee1b 1010 /* Reset Transparent Vlan Tagging. */
11ac75ed 1011 adapter->vf_cfg[vf].vlan_tag = 0;
f1f3ee1b
AK
1012 vlan = adapter->vf_cfg[vf].def_vid;
1013 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
1014 adapter->vf_cfg[vf].if_handle);
1da87b7f
AK
1015 }
1016
1da87b7f
AK
1017
1018 if (status)
1019 dev_info(&adapter->pdev->dev,
1020 "VLAN %d config on VF %d failed\n", vlan, vf);
1021 return status;
1022}
1023
e1d18735
AK
1024static int be_set_vf_tx_rate(struct net_device *netdev,
1025 int vf, int rate)
1026{
1027 struct be_adapter *adapter = netdev_priv(netdev);
1028 int status = 0;
1029
11ac75ed 1030 if (!sriov_enabled(adapter))
e1d18735
AK
1031 return -EPERM;
1032
94f434c2 1033 if (vf >= adapter->num_vfs)
e1d18735
AK
1034 return -EINVAL;
1035
94f434c2
AK
1036 if (rate < 100 || rate > 10000) {
1037 dev_err(&adapter->pdev->dev,
1038 "tx rate must be between 100 and 10000 Mbps\n");
1039 return -EINVAL;
1040 }
e1d18735 1041
856c4012 1042 status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
e1d18735
AK
1043
1044 if (status)
94f434c2 1045 dev_err(&adapter->pdev->dev,
e1d18735 1046 "tx rate %d on VF %d failed\n", rate, vf);
94f434c2
AK
1047 else
1048 adapter->vf_cfg[vf].tx_rate = rate;
e1d18735
AK
1049 return status;
1050}
1051
39f1d94d
SP
1052static int be_find_vfs(struct be_adapter *adapter, int vf_state)
1053{
1054 struct pci_dev *dev, *pdev = adapter->pdev;
1055 int vfs = 0, assigned_vfs = 0, pos, vf_fn;
1056 u16 offset, stride;
1057
1058 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
1059 pci_read_config_word(pdev, pos + PCI_SRIOV_VF_OFFSET, &offset);
1060 pci_read_config_word(pdev, pos + PCI_SRIOV_VF_STRIDE, &stride);
1061
1062 dev = pci_get_device(pdev->vendor, PCI_ANY_ID, NULL);
1063 while (dev) {
1064 vf_fn = (pdev->devfn + offset + stride * vfs) & 0xFFFF;
1065 if (dev->is_virtfn && dev->devfn == vf_fn) {
1066 vfs++;
1067 if (dev->dev_flags & PCI_DEV_FLAGS_ASSIGNED)
1068 assigned_vfs++;
1069 }
1070 dev = pci_get_device(pdev->vendor, PCI_ANY_ID, dev);
1071 }
1072 return (vf_state == ASSIGNED) ? assigned_vfs : vfs;
1073}
1074
10ef9ab4 1075static void be_eqd_update(struct be_adapter *adapter, struct be_eq_obj *eqo)
6b7c5b94 1076{
10ef9ab4 1077 struct be_rx_stats *stats = rx_stats(&adapter->rx_obj[eqo->idx]);
4097f663 1078 ulong now = jiffies;
ac124ff9 1079 ulong delta = now - stats->rx_jiffies;
ab1594e9
SP
1080 u64 pkts;
1081 unsigned int start, eqd;
ac124ff9 1082
10ef9ab4
SP
1083 if (!eqo->enable_aic) {
1084 eqd = eqo->eqd;
1085 goto modify_eqd;
1086 }
1087
1088 if (eqo->idx >= adapter->num_rx_qs)
ac124ff9 1089 return;
6b7c5b94 1090
10ef9ab4
SP
1091 stats = rx_stats(&adapter->rx_obj[eqo->idx]);
1092
4097f663 1093 /* Wrapped around */
3abcdeda
SP
1094 if (time_before(now, stats->rx_jiffies)) {
1095 stats->rx_jiffies = now;
4097f663
SP
1096 return;
1097 }
6b7c5b94 1098
ac124ff9
SP
1099 /* Update once a second */
1100 if (delta < HZ)
6b7c5b94
SP
1101 return;
1102
ab1594e9
SP
1103 do {
1104 start = u64_stats_fetch_begin_bh(&stats->sync);
1105 pkts = stats->rx_pkts;
1106 } while (u64_stats_fetch_retry_bh(&stats->sync, start));
1107
68c3e5a7 1108 stats->rx_pps = (unsigned long)(pkts - stats->rx_pkts_prev) / (delta / HZ);
ab1594e9 1109 stats->rx_pkts_prev = pkts;
3abcdeda 1110 stats->rx_jiffies = now;
10ef9ab4
SP
1111 eqd = (stats->rx_pps / 110000) << 3;
1112 eqd = min(eqd, eqo->max_eqd);
1113 eqd = max(eqd, eqo->min_eqd);
ac124ff9
SP
1114 if (eqd < 10)
1115 eqd = 0;
10ef9ab4
SP
1116
1117modify_eqd:
1118 if (eqd != eqo->cur_eqd) {
1119 be_cmd_modify_eqd(adapter, eqo->q.id, eqd);
1120 eqo->cur_eqd = eqd;
ac124ff9 1121 }
6b7c5b94
SP
1122}
1123
3abcdeda 1124static void be_rx_stats_update(struct be_rx_obj *rxo,
2e588f84 1125 struct be_rx_compl_info *rxcp)
4097f663 1126{
ac124ff9 1127 struct be_rx_stats *stats = rx_stats(rxo);
1ef78abe 1128
ab1594e9 1129 u64_stats_update_begin(&stats->sync);
3abcdeda 1130 stats->rx_compl++;
2e588f84 1131 stats->rx_bytes += rxcp->pkt_size;
3abcdeda 1132 stats->rx_pkts++;
2e588f84 1133 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
3abcdeda 1134 stats->rx_mcast_pkts++;
2e588f84 1135 if (rxcp->err)
ac124ff9 1136 stats->rx_compl_err++;
ab1594e9 1137 u64_stats_update_end(&stats->sync);
4097f663
SP
1138}
1139
2e588f84 1140static inline bool csum_passed(struct be_rx_compl_info *rxcp)
728a9972 1141{
19fad86f
PR
1142 /* L4 checksum is not reliable for non TCP/UDP packets.
1143 * Also ignore ipcksm for ipv6 pkts */
2e588f84
SP
1144 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
1145 (rxcp->ip_csum || rxcp->ipv6);
728a9972
AK
1146}
1147
10ef9ab4
SP
1148static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo,
1149 u16 frag_idx)
6b7c5b94 1150{
10ef9ab4 1151 struct be_adapter *adapter = rxo->adapter;
6b7c5b94 1152 struct be_rx_page_info *rx_page_info;
3abcdeda 1153 struct be_queue_info *rxq = &rxo->q;
6b7c5b94 1154
3abcdeda 1155 rx_page_info = &rxo->page_info_tbl[frag_idx];
6b7c5b94
SP
1156 BUG_ON(!rx_page_info->page);
1157
205859a2 1158 if (rx_page_info->last_page_user) {
2b7bcebf
IV
1159 dma_unmap_page(&adapter->pdev->dev,
1160 dma_unmap_addr(rx_page_info, bus),
1161 adapter->big_page_size, DMA_FROM_DEVICE);
205859a2
AK
1162 rx_page_info->last_page_user = false;
1163 }
6b7c5b94
SP
1164
1165 atomic_dec(&rxq->used);
1166 return rx_page_info;
1167}
1168
1169/* Throwaway the data in the Rx completion */
10ef9ab4
SP
1170static void be_rx_compl_discard(struct be_rx_obj *rxo,
1171 struct be_rx_compl_info *rxcp)
6b7c5b94 1172{
3abcdeda 1173 struct be_queue_info *rxq = &rxo->q;
6b7c5b94 1174 struct be_rx_page_info *page_info;
2e588f84 1175 u16 i, num_rcvd = rxcp->num_rcvd;
6b7c5b94 1176
e80d9da6 1177 for (i = 0; i < num_rcvd; i++) {
10ef9ab4 1178 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
e80d9da6
PR
1179 put_page(page_info->page);
1180 memset(page_info, 0, sizeof(*page_info));
2e588f84 1181 index_inc(&rxcp->rxq_idx, rxq->len);
6b7c5b94
SP
1182 }
1183}
1184
1185/*
1186 * skb_fill_rx_data forms a complete skb for an ether frame
1187 * indicated by rxcp.
1188 */
10ef9ab4
SP
1189static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
1190 struct be_rx_compl_info *rxcp)
6b7c5b94 1191{
3abcdeda 1192 struct be_queue_info *rxq = &rxo->q;
6b7c5b94 1193 struct be_rx_page_info *page_info;
2e588f84
SP
1194 u16 i, j;
1195 u16 hdr_len, curr_frag_len, remaining;
6b7c5b94 1196 u8 *start;
6b7c5b94 1197
10ef9ab4 1198 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
6b7c5b94
SP
1199 start = page_address(page_info->page) + page_info->page_offset;
1200 prefetch(start);
1201
1202 /* Copy data in the first descriptor of this completion */
2e588f84 1203 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
6b7c5b94
SP
1204
1205 /* Copy the header portion into skb_data */
2e588f84 1206 hdr_len = min(BE_HDR_LEN, curr_frag_len);
6b7c5b94
SP
1207 memcpy(skb->data, start, hdr_len);
1208 skb->len = curr_frag_len;
1209 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
1210 /* Complete packet has now been moved to data */
1211 put_page(page_info->page);
1212 skb->data_len = 0;
1213 skb->tail += curr_frag_len;
1214 } else {
1215 skb_shinfo(skb)->nr_frags = 1;
b061b39e 1216 skb_frag_set_page(skb, 0, page_info->page);
6b7c5b94
SP
1217 skb_shinfo(skb)->frags[0].page_offset =
1218 page_info->page_offset + hdr_len;
9e903e08 1219 skb_frag_size_set(&skb_shinfo(skb)->frags[0], curr_frag_len - hdr_len);
6b7c5b94 1220 skb->data_len = curr_frag_len - hdr_len;
bdb28a97 1221 skb->truesize += rx_frag_size;
6b7c5b94
SP
1222 skb->tail += hdr_len;
1223 }
205859a2 1224 page_info->page = NULL;
6b7c5b94 1225
2e588f84
SP
1226 if (rxcp->pkt_size <= rx_frag_size) {
1227 BUG_ON(rxcp->num_rcvd != 1);
1228 return;
6b7c5b94
SP
1229 }
1230
1231 /* More frags present for this completion */
2e588f84
SP
1232 index_inc(&rxcp->rxq_idx, rxq->len);
1233 remaining = rxcp->pkt_size - curr_frag_len;
1234 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
10ef9ab4 1235 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
2e588f84 1236 curr_frag_len = min(remaining, rx_frag_size);
6b7c5b94 1237
bd46cb6c
AK
1238 /* Coalesce all frags from the same physical page in one slot */
1239 if (page_info->page_offset == 0) {
1240 /* Fresh page */
1241 j++;
b061b39e 1242 skb_frag_set_page(skb, j, page_info->page);
bd46cb6c
AK
1243 skb_shinfo(skb)->frags[j].page_offset =
1244 page_info->page_offset;
9e903e08 1245 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
bd46cb6c
AK
1246 skb_shinfo(skb)->nr_frags++;
1247 } else {
1248 put_page(page_info->page);
1249 }
1250
9e903e08 1251 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
6b7c5b94
SP
1252 skb->len += curr_frag_len;
1253 skb->data_len += curr_frag_len;
bdb28a97 1254 skb->truesize += rx_frag_size;
2e588f84
SP
1255 remaining -= curr_frag_len;
1256 index_inc(&rxcp->rxq_idx, rxq->len);
205859a2 1257 page_info->page = NULL;
6b7c5b94 1258 }
bd46cb6c 1259 BUG_ON(j > MAX_SKB_FRAGS);
6b7c5b94
SP
1260}
1261
5be93b9a 1262/* Process the RX completion indicated by rxcp when GRO is disabled */
10ef9ab4
SP
1263static void be_rx_compl_process(struct be_rx_obj *rxo,
1264 struct be_rx_compl_info *rxcp)
6b7c5b94 1265{
10ef9ab4 1266 struct be_adapter *adapter = rxo->adapter;
6332c8d3 1267 struct net_device *netdev = adapter->netdev;
6b7c5b94 1268 struct sk_buff *skb;
89420424 1269
bb349bb4 1270 skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE);
a058a632 1271 if (unlikely(!skb)) {
ac124ff9 1272 rx_stats(rxo)->rx_drops_no_skbs++;
10ef9ab4 1273 be_rx_compl_discard(rxo, rxcp);
6b7c5b94
SP
1274 return;
1275 }
1276
10ef9ab4 1277 skb_fill_rx_data(rxo, skb, rxcp);
6b7c5b94 1278
6332c8d3 1279 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
728a9972 1280 skb->ip_summed = CHECKSUM_UNNECESSARY;
c6ce2f4b
SK
1281 else
1282 skb_checksum_none_assert(skb);
6b7c5b94 1283
6332c8d3 1284 skb->protocol = eth_type_trans(skb, netdev);
aaa6daec 1285 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
10ef9ab4 1286 if (netdev->features & NETIF_F_RXHASH)
4b972914
AK
1287 skb->rxhash = rxcp->rss_hash;
1288
6b7c5b94 1289
343e43c0 1290 if (rxcp->vlanf)
4c5102f9
AK
1291 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1292
1293 netif_receive_skb(skb);
6b7c5b94
SP
1294}
1295
5be93b9a 1296/* Process the RX completion indicated by rxcp when GRO is enabled */
10ef9ab4
SP
1297void be_rx_compl_process_gro(struct be_rx_obj *rxo, struct napi_struct *napi,
1298 struct be_rx_compl_info *rxcp)
6b7c5b94 1299{
10ef9ab4 1300 struct be_adapter *adapter = rxo->adapter;
6b7c5b94 1301 struct be_rx_page_info *page_info;
5be93b9a 1302 struct sk_buff *skb = NULL;
3abcdeda 1303 struct be_queue_info *rxq = &rxo->q;
2e588f84
SP
1304 u16 remaining, curr_frag_len;
1305 u16 i, j;
3968fa1e 1306
10ef9ab4 1307 skb = napi_get_frags(napi);
5be93b9a 1308 if (!skb) {
10ef9ab4 1309 be_rx_compl_discard(rxo, rxcp);
5be93b9a
AK
1310 return;
1311 }
1312
2e588f84
SP
1313 remaining = rxcp->pkt_size;
1314 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
10ef9ab4 1315 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
6b7c5b94
SP
1316
1317 curr_frag_len = min(remaining, rx_frag_size);
1318
bd46cb6c
AK
1319 /* Coalesce all frags from the same physical page in one slot */
1320 if (i == 0 || page_info->page_offset == 0) {
1321 /* First frag or Fresh page */
1322 j++;
b061b39e 1323 skb_frag_set_page(skb, j, page_info->page);
5be93b9a
AK
1324 skb_shinfo(skb)->frags[j].page_offset =
1325 page_info->page_offset;
9e903e08 1326 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
bd46cb6c
AK
1327 } else {
1328 put_page(page_info->page);
1329 }
9e903e08 1330 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
bdb28a97 1331 skb->truesize += rx_frag_size;
bd46cb6c 1332 remaining -= curr_frag_len;
2e588f84 1333 index_inc(&rxcp->rxq_idx, rxq->len);
6b7c5b94
SP
1334 memset(page_info, 0, sizeof(*page_info));
1335 }
bd46cb6c 1336 BUG_ON(j > MAX_SKB_FRAGS);
6b7c5b94 1337
5be93b9a 1338 skb_shinfo(skb)->nr_frags = j + 1;
2e588f84
SP
1339 skb->len = rxcp->pkt_size;
1340 skb->data_len = rxcp->pkt_size;
5be93b9a 1341 skb->ip_summed = CHECKSUM_UNNECESSARY;
aaa6daec 1342 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
4b972914
AK
1343 if (adapter->netdev->features & NETIF_F_RXHASH)
1344 skb->rxhash = rxcp->rss_hash;
5be93b9a 1345
343e43c0 1346 if (rxcp->vlanf)
4c5102f9
AK
1347 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1348
10ef9ab4 1349 napi_gro_frags(napi);
2e588f84
SP
1350}
1351
10ef9ab4
SP
1352static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
1353 struct be_rx_compl_info *rxcp)
2e588f84
SP
1354{
1355 rxcp->pkt_size =
1356 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1357 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1358 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1359 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
9ecb42fd 1360 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
2e588f84
SP
1361 rxcp->ip_csum =
1362 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1363 rxcp->l4_csum =
1364 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1365 rxcp->ipv6 =
1366 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
1367 rxcp->rxq_idx =
1368 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, fragndx, compl);
1369 rxcp->num_rcvd =
1370 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1371 rxcp->pkt_type =
1372 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
4b972914
AK
1373 rxcp->rss_hash =
1374 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, rxcp);
15d72184
SP
1375 if (rxcp->vlanf) {
1376 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm,
3c709f8f
DM
1377 compl);
1378 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
1379 compl);
15d72184 1380 }
12004ae9 1381 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, port, compl);
2e588f84
SP
1382}
1383
10ef9ab4
SP
1384static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
1385 struct be_rx_compl_info *rxcp)
2e588f84
SP
1386{
1387 rxcp->pkt_size =
1388 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1389 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1390 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1391 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
9ecb42fd 1392 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
2e588f84
SP
1393 rxcp->ip_csum =
1394 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1395 rxcp->l4_csum =
1396 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1397 rxcp->ipv6 =
1398 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
1399 rxcp->rxq_idx =
1400 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, fragndx, compl);
1401 rxcp->num_rcvd =
1402 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1403 rxcp->pkt_type =
1404 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
4b972914
AK
1405 rxcp->rss_hash =
1406 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, rxcp);
15d72184
SP
1407 if (rxcp->vlanf) {
1408 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm,
3c709f8f
DM
1409 compl);
1410 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
1411 compl);
15d72184 1412 }
12004ae9 1413 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, port, compl);
2e588f84
SP
1414}
1415
1416static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1417{
1418 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1419 struct be_rx_compl_info *rxcp = &rxo->rxcp;
1420 struct be_adapter *adapter = rxo->adapter;
6b7c5b94 1421
2e588f84
SP
1422 /* For checking the valid bit it is Ok to use either definition as the
1423 * valid bit is at the same position in both v0 and v1 Rx compl */
1424 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
1425 return NULL;
6b7c5b94 1426
2e588f84
SP
1427 rmb();
1428 be_dws_le_to_cpu(compl, sizeof(*compl));
6b7c5b94 1429
2e588f84 1430 if (adapter->be3_native)
10ef9ab4 1431 be_parse_rx_compl_v1(compl, rxcp);
2e588f84 1432 else
10ef9ab4 1433 be_parse_rx_compl_v0(compl, rxcp);
6b7c5b94 1434
15d72184
SP
1435 if (rxcp->vlanf) {
1436 /* vlanf could be wrongly set in some cards.
1437 * ignore if vtm is not set */
752961a1 1438 if ((adapter->function_mode & FLEX10_MODE) && !rxcp->vtm)
15d72184 1439 rxcp->vlanf = 0;
6b7c5b94 1440
15d72184 1441 if (!lancer_chip(adapter))
3c709f8f 1442 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
6b7c5b94 1443
939cf306 1444 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
3c709f8f 1445 !adapter->vlan_tag[rxcp->vlan_tag])
15d72184
SP
1446 rxcp->vlanf = 0;
1447 }
2e588f84
SP
1448
1449 /* As the compl has been parsed, reset it; we wont touch it again */
1450 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
6b7c5b94 1451
3abcdeda 1452 queue_tail_inc(&rxo->cq);
6b7c5b94
SP
1453 return rxcp;
1454}
1455
1829b086 1456static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
6b7c5b94 1457{
6b7c5b94 1458 u32 order = get_order(size);
1829b086 1459
6b7c5b94 1460 if (order > 0)
1829b086
ED
1461 gfp |= __GFP_COMP;
1462 return alloc_pages(gfp, order);
6b7c5b94
SP
1463}
1464
1465/*
1466 * Allocate a page, split it to fragments of size rx_frag_size and post as
1467 * receive buffers to BE
1468 */
1829b086 1469static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
6b7c5b94 1470{
3abcdeda 1471 struct be_adapter *adapter = rxo->adapter;
26d92f92 1472 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
3abcdeda 1473 struct be_queue_info *rxq = &rxo->q;
6b7c5b94
SP
1474 struct page *pagep = NULL;
1475 struct be_eth_rx_d *rxd;
1476 u64 page_dmaaddr = 0, frag_dmaaddr;
1477 u32 posted, page_offset = 0;
1478
3abcdeda 1479 page_info = &rxo->page_info_tbl[rxq->head];
6b7c5b94
SP
1480 for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1481 if (!pagep) {
1829b086 1482 pagep = be_alloc_pages(adapter->big_page_size, gfp);
6b7c5b94 1483 if (unlikely(!pagep)) {
ac124ff9 1484 rx_stats(rxo)->rx_post_fail++;
6b7c5b94
SP
1485 break;
1486 }
2b7bcebf
IV
1487 page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
1488 0, adapter->big_page_size,
1489 DMA_FROM_DEVICE);
6b7c5b94
SP
1490 page_info->page_offset = 0;
1491 } else {
1492 get_page(pagep);
1493 page_info->page_offset = page_offset + rx_frag_size;
1494 }
1495 page_offset = page_info->page_offset;
1496 page_info->page = pagep;
fac6da5b 1497 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
6b7c5b94
SP
1498 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1499
1500 rxd = queue_head_node(rxq);
1501 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1502 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
6b7c5b94
SP
1503
1504 /* Any space left in the current big page for another frag? */
1505 if ((page_offset + rx_frag_size + rx_frag_size) >
1506 adapter->big_page_size) {
1507 pagep = NULL;
1508 page_info->last_page_user = true;
1509 }
26d92f92
SP
1510
1511 prev_page_info = page_info;
1512 queue_head_inc(rxq);
10ef9ab4 1513 page_info = &rxo->page_info_tbl[rxq->head];
6b7c5b94
SP
1514 }
1515 if (pagep)
26d92f92 1516 prev_page_info->last_page_user = true;
6b7c5b94
SP
1517
1518 if (posted) {
6b7c5b94 1519 atomic_add(posted, &rxq->used);
8788fdc2 1520 be_rxq_notify(adapter, rxq->id, posted);
ea1dae11
SP
1521 } else if (atomic_read(&rxq->used) == 0) {
1522 /* Let be_worker replenish when memory is available */
3abcdeda 1523 rxo->rx_post_starved = true;
6b7c5b94 1524 }
6b7c5b94
SP
1525}
1526
5fb379ee 1527static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
6b7c5b94 1528{
6b7c5b94
SP
1529 struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1530
1531 if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1532 return NULL;
1533
f3eb62d2 1534 rmb();
6b7c5b94
SP
1535 be_dws_le_to_cpu(txcp, sizeof(*txcp));
1536
1537 txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1538
1539 queue_tail_inc(tx_cq);
1540 return txcp;
1541}
1542
3c8def97
SP
1543static u16 be_tx_compl_process(struct be_adapter *adapter,
1544 struct be_tx_obj *txo, u16 last_index)
6b7c5b94 1545{
3c8def97 1546 struct be_queue_info *txq = &txo->q;
a73b796e 1547 struct be_eth_wrb *wrb;
3c8def97 1548 struct sk_buff **sent_skbs = txo->sent_skb_list;
6b7c5b94 1549 struct sk_buff *sent_skb;
ec43b1a6
SP
1550 u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1551 bool unmap_skb_hdr = true;
6b7c5b94 1552
ec43b1a6 1553 sent_skb = sent_skbs[txq->tail];
6b7c5b94 1554 BUG_ON(!sent_skb);
ec43b1a6
SP
1555 sent_skbs[txq->tail] = NULL;
1556
1557 /* skip header wrb */
a73b796e 1558 queue_tail_inc(txq);
6b7c5b94 1559
ec43b1a6 1560 do {
6b7c5b94 1561 cur_index = txq->tail;
a73b796e 1562 wrb = queue_tail_node(txq);
2b7bcebf
IV
1563 unmap_tx_frag(&adapter->pdev->dev, wrb,
1564 (unmap_skb_hdr && skb_headlen(sent_skb)));
ec43b1a6
SP
1565 unmap_skb_hdr = false;
1566
6b7c5b94
SP
1567 num_wrbs++;
1568 queue_tail_inc(txq);
ec43b1a6 1569 } while (cur_index != last_index);
6b7c5b94 1570
6b7c5b94 1571 kfree_skb(sent_skb);
4d586b82 1572 return num_wrbs;
6b7c5b94
SP
1573}
1574
10ef9ab4
SP
1575/* Return the number of events in the event queue */
1576static inline int events_get(struct be_eq_obj *eqo)
859b1e4e 1577{
10ef9ab4
SP
1578 struct be_eq_entry *eqe;
1579 int num = 0;
859b1e4e 1580
10ef9ab4
SP
1581 do {
1582 eqe = queue_tail_node(&eqo->q);
1583 if (eqe->evt == 0)
1584 break;
859b1e4e 1585
10ef9ab4
SP
1586 rmb();
1587 eqe->evt = 0;
1588 num++;
1589 queue_tail_inc(&eqo->q);
1590 } while (true);
1591
1592 return num;
859b1e4e
SP
1593}
1594
10ef9ab4 1595static int event_handle(struct be_eq_obj *eqo)
859b1e4e 1596{
10ef9ab4
SP
1597 bool rearm = false;
1598 int num = events_get(eqo);
859b1e4e 1599
10ef9ab4 1600 /* Deal with any spurious interrupts that come without events */
3c8def97
SP
1601 if (!num)
1602 rearm = true;
1603
af311fe3
PR
1604 if (num || msix_enabled(eqo->adapter))
1605 be_eq_notify(eqo->adapter, eqo->q.id, rearm, true, num);
1606
859b1e4e 1607 if (num)
10ef9ab4 1608 napi_schedule(&eqo->napi);
859b1e4e
SP
1609
1610 return num;
1611}
1612
10ef9ab4
SP
1613/* Leaves the EQ is disarmed state */
1614static void be_eq_clean(struct be_eq_obj *eqo)
859b1e4e 1615{
10ef9ab4 1616 int num = events_get(eqo);
859b1e4e 1617
10ef9ab4 1618 be_eq_notify(eqo->adapter, eqo->q.id, false, true, num);
859b1e4e
SP
1619}
1620
10ef9ab4 1621static void be_rx_cq_clean(struct be_rx_obj *rxo)
6b7c5b94
SP
1622{
1623 struct be_rx_page_info *page_info;
3abcdeda
SP
1624 struct be_queue_info *rxq = &rxo->q;
1625 struct be_queue_info *rx_cq = &rxo->cq;
2e588f84 1626 struct be_rx_compl_info *rxcp;
6b7c5b94
SP
1627 u16 tail;
1628
1629 /* First cleanup pending rx completions */
3abcdeda 1630 while ((rxcp = be_rx_compl_get(rxo)) != NULL) {
10ef9ab4
SP
1631 be_rx_compl_discard(rxo, rxcp);
1632 be_cq_notify(rxo->adapter, rx_cq->id, false, 1);
6b7c5b94
SP
1633 }
1634
1635 /* Then free posted rx buffer that were not used */
1636 tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
cdab23b7 1637 for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
10ef9ab4 1638 page_info = get_rx_page_info(rxo, tail);
6b7c5b94
SP
1639 put_page(page_info->page);
1640 memset(page_info, 0, sizeof(*page_info));
1641 }
1642 BUG_ON(atomic_read(&rxq->used));
482c9e79 1643 rxq->tail = rxq->head = 0;
6b7c5b94
SP
1644}
1645
0ae57bb3 1646static void be_tx_compl_clean(struct be_adapter *adapter)
6b7c5b94 1647{
0ae57bb3
SP
1648 struct be_tx_obj *txo;
1649 struct be_queue_info *txq;
a8e9179a 1650 struct be_eth_tx_compl *txcp;
4d586b82 1651 u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
b03388d6
SP
1652 struct sk_buff *sent_skb;
1653 bool dummy_wrb;
0ae57bb3 1654 int i, pending_txqs;
a8e9179a
SP
1655
1656 /* Wait for a max of 200ms for all the tx-completions to arrive. */
1657 do {
0ae57bb3
SP
1658 pending_txqs = adapter->num_tx_qs;
1659
1660 for_all_tx_queues(adapter, txo, i) {
1661 txq = &txo->q;
1662 while ((txcp = be_tx_compl_get(&txo->cq))) {
1663 end_idx =
1664 AMAP_GET_BITS(struct amap_eth_tx_compl,
1665 wrb_index, txcp);
1666 num_wrbs += be_tx_compl_process(adapter, txo,
1667 end_idx);
1668 cmpl++;
1669 }
1670 if (cmpl) {
1671 be_cq_notify(adapter, txo->cq.id, false, cmpl);
1672 atomic_sub(num_wrbs, &txq->used);
1673 cmpl = 0;
1674 num_wrbs = 0;
1675 }
1676 if (atomic_read(&txq->used) == 0)
1677 pending_txqs--;
a8e9179a
SP
1678 }
1679
0ae57bb3 1680 if (pending_txqs == 0 || ++timeo > 200)
a8e9179a
SP
1681 break;
1682
1683 mdelay(1);
1684 } while (true);
1685
0ae57bb3
SP
1686 for_all_tx_queues(adapter, txo, i) {
1687 txq = &txo->q;
1688 if (atomic_read(&txq->used))
1689 dev_err(&adapter->pdev->dev, "%d pending tx-compls\n",
1690 atomic_read(&txq->used));
1691
1692 /* free posted tx for which compls will never arrive */
1693 while (atomic_read(&txq->used)) {
1694 sent_skb = txo->sent_skb_list[txq->tail];
1695 end_idx = txq->tail;
1696 num_wrbs = wrb_cnt_for_skb(adapter, sent_skb,
1697 &dummy_wrb);
1698 index_adv(&end_idx, num_wrbs - 1, txq->len);
1699 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
1700 atomic_sub(num_wrbs, &txq->used);
1701 }
b03388d6 1702 }
6b7c5b94
SP
1703}
1704
10ef9ab4
SP
1705static void be_evt_queues_destroy(struct be_adapter *adapter)
1706{
1707 struct be_eq_obj *eqo;
1708 int i;
1709
1710 for_all_evt_queues(adapter, eqo, i) {
1711 be_eq_clean(eqo);
1712 if (eqo->q.created)
1713 be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
1714 be_queue_free(adapter, &eqo->q);
1715 }
1716}
1717
1718static int be_evt_queues_create(struct be_adapter *adapter)
1719{
1720 struct be_queue_info *eq;
1721 struct be_eq_obj *eqo;
1722 int i, rc;
1723
1724 adapter->num_evt_qs = num_irqs(adapter);
1725
1726 for_all_evt_queues(adapter, eqo, i) {
1727 eqo->adapter = adapter;
1728 eqo->tx_budget = BE_TX_BUDGET;
1729 eqo->idx = i;
1730 eqo->max_eqd = BE_MAX_EQD;
1731 eqo->enable_aic = true;
1732
1733 eq = &eqo->q;
1734 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1735 sizeof(struct be_eq_entry));
1736 if (rc)
1737 return rc;
1738
1739 rc = be_cmd_eq_create(adapter, eq, eqo->cur_eqd);
1740 if (rc)
1741 return rc;
1742 }
1cfafab9 1743 return 0;
10ef9ab4
SP
1744}
1745
5fb379ee
SP
1746static void be_mcc_queues_destroy(struct be_adapter *adapter)
1747{
1748 struct be_queue_info *q;
5fb379ee 1749
8788fdc2 1750 q = &adapter->mcc_obj.q;
5fb379ee 1751 if (q->created)
8788fdc2 1752 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
5fb379ee
SP
1753 be_queue_free(adapter, q);
1754
8788fdc2 1755 q = &adapter->mcc_obj.cq;
5fb379ee 1756 if (q->created)
8788fdc2 1757 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
5fb379ee
SP
1758 be_queue_free(adapter, q);
1759}
1760
1761/* Must be called only after TX qs are created as MCC shares TX EQ */
1762static int be_mcc_queues_create(struct be_adapter *adapter)
1763{
1764 struct be_queue_info *q, *cq;
5fb379ee 1765
8788fdc2 1766 cq = &adapter->mcc_obj.cq;
5fb379ee 1767 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
efd2e40a 1768 sizeof(struct be_mcc_compl)))
5fb379ee
SP
1769 goto err;
1770
10ef9ab4
SP
1771 /* Use the default EQ for MCC completions */
1772 if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0))
5fb379ee
SP
1773 goto mcc_cq_free;
1774
8788fdc2 1775 q = &adapter->mcc_obj.q;
5fb379ee
SP
1776 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
1777 goto mcc_cq_destroy;
1778
8788fdc2 1779 if (be_cmd_mccq_create(adapter, q, cq))
5fb379ee
SP
1780 goto mcc_q_free;
1781
1782 return 0;
1783
1784mcc_q_free:
1785 be_queue_free(adapter, q);
1786mcc_cq_destroy:
8788fdc2 1787 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
5fb379ee
SP
1788mcc_cq_free:
1789 be_queue_free(adapter, cq);
1790err:
1791 return -1;
1792}
1793
6b7c5b94
SP
1794static void be_tx_queues_destroy(struct be_adapter *adapter)
1795{
1796 struct be_queue_info *q;
3c8def97
SP
1797 struct be_tx_obj *txo;
1798 u8 i;
6b7c5b94 1799
3c8def97
SP
1800 for_all_tx_queues(adapter, txo, i) {
1801 q = &txo->q;
1802 if (q->created)
1803 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
1804 be_queue_free(adapter, q);
6b7c5b94 1805
3c8def97
SP
1806 q = &txo->cq;
1807 if (q->created)
1808 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1809 be_queue_free(adapter, q);
1810 }
6b7c5b94
SP
1811}
1812
dafc0fe3
SP
1813static int be_num_txqs_want(struct be_adapter *adapter)
1814{
39f1d94d
SP
1815 if (sriov_want(adapter) || be_is_mc(adapter) ||
1816 lancer_chip(adapter) || !be_physfn(adapter) ||
1817 adapter->generation == BE_GEN2)
dafc0fe3
SP
1818 return 1;
1819 else
1820 return MAX_TX_QS;
1821}
1822
10ef9ab4 1823static int be_tx_cqs_create(struct be_adapter *adapter)
6b7c5b94 1824{
10ef9ab4
SP
1825 struct be_queue_info *cq, *eq;
1826 int status;
3c8def97
SP
1827 struct be_tx_obj *txo;
1828 u8 i;
6b7c5b94 1829
dafc0fe3 1830 adapter->num_tx_qs = be_num_txqs_want(adapter);
3bb62f4f
PR
1831 if (adapter->num_tx_qs != MAX_TX_QS) {
1832 rtnl_lock();
dafc0fe3
SP
1833 netif_set_real_num_tx_queues(adapter->netdev,
1834 adapter->num_tx_qs);
3bb62f4f
PR
1835 rtnl_unlock();
1836 }
dafc0fe3 1837
10ef9ab4
SP
1838 for_all_tx_queues(adapter, txo, i) {
1839 cq = &txo->cq;
1840 status = be_queue_alloc(adapter, cq, TX_CQ_LEN,
1841 sizeof(struct be_eth_tx_compl));
1842 if (status)
1843 return status;
3c8def97 1844
10ef9ab4
SP
1845 /* If num_evt_qs is less than num_tx_qs, then more than
1846 * one txq share an eq
1847 */
1848 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
1849 status = be_cmd_cq_create(adapter, cq, eq, false, 3);
1850 if (status)
1851 return status;
1852 }
1853 return 0;
1854}
6b7c5b94 1855
10ef9ab4
SP
1856static int be_tx_qs_create(struct be_adapter *adapter)
1857{
1858 struct be_tx_obj *txo;
1859 int i, status;
fe6d2a38 1860
3c8def97 1861 for_all_tx_queues(adapter, txo, i) {
10ef9ab4
SP
1862 status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
1863 sizeof(struct be_eth_wrb));
1864 if (status)
1865 return status;
6b7c5b94 1866
10ef9ab4
SP
1867 status = be_cmd_txq_create(adapter, &txo->q, &txo->cq);
1868 if (status)
1869 return status;
3c8def97 1870 }
6b7c5b94 1871
10ef9ab4 1872 return 0;
6b7c5b94
SP
1873}
1874
10ef9ab4 1875static void be_rx_cqs_destroy(struct be_adapter *adapter)
6b7c5b94
SP
1876{
1877 struct be_queue_info *q;
3abcdeda
SP
1878 struct be_rx_obj *rxo;
1879 int i;
1880
1881 for_all_rx_queues(adapter, rxo, i) {
3abcdeda
SP
1882 q = &rxo->cq;
1883 if (q->created)
1884 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1885 be_queue_free(adapter, q);
ac6a0c4a
SP
1886 }
1887}
1888
10ef9ab4 1889static int be_rx_cqs_create(struct be_adapter *adapter)
6b7c5b94 1890{
10ef9ab4 1891 struct be_queue_info *eq, *cq;
3abcdeda
SP
1892 struct be_rx_obj *rxo;
1893 int rc, i;
6b7c5b94 1894
10ef9ab4
SP
1895 /* We'll create as many RSS rings as there are irqs.
1896 * But when there's only one irq there's no use creating RSS rings
1897 */
1898 adapter->num_rx_qs = (num_irqs(adapter) > 1) ?
1899 num_irqs(adapter) + 1 : 1;
ac6a0c4a 1900
6b7c5b94 1901 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
3abcdeda
SP
1902 for_all_rx_queues(adapter, rxo, i) {
1903 rxo->adapter = adapter;
3abcdeda
SP
1904 cq = &rxo->cq;
1905 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
1906 sizeof(struct be_eth_rx_compl));
1907 if (rc)
10ef9ab4 1908 return rc;
3abcdeda 1909
10ef9ab4
SP
1910 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
1911 rc = be_cmd_cq_create(adapter, cq, eq, false, 3);
3abcdeda 1912 if (rc)
10ef9ab4 1913 return rc;
3abcdeda 1914 }
6b7c5b94 1915
10ef9ab4
SP
1916 if (adapter->num_rx_qs != MAX_RX_QS)
1917 dev_info(&adapter->pdev->dev,
1918 "Created only %d receive queues", adapter->num_rx_qs);
6b7c5b94 1919
10ef9ab4 1920 return 0;
b628bde2
SP
1921}
1922
6b7c5b94
SP
1923static irqreturn_t be_intx(int irq, void *dev)
1924{
1925 struct be_adapter *adapter = dev;
10ef9ab4 1926 int num_evts;
6b7c5b94 1927
10ef9ab4
SP
1928 /* With INTx only one EQ is used */
1929 num_evts = event_handle(&adapter->eq_obj[0]);
1930 if (num_evts)
1931 return IRQ_HANDLED;
1932 else
1933 return IRQ_NONE;
6b7c5b94
SP
1934}
1935
10ef9ab4 1936static irqreturn_t be_msix(int irq, void *dev)
6b7c5b94 1937{
10ef9ab4 1938 struct be_eq_obj *eqo = dev;
6b7c5b94 1939
10ef9ab4 1940 event_handle(eqo);
6b7c5b94
SP
1941 return IRQ_HANDLED;
1942}
1943
2e588f84 1944static inline bool do_gro(struct be_rx_compl_info *rxcp)
6b7c5b94 1945{
2e588f84 1946 return (rxcp->tcpf && !rxcp->err) ? true : false;
6b7c5b94
SP
1947}
1948
10ef9ab4
SP
1949static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
1950 int budget)
6b7c5b94 1951{
3abcdeda
SP
1952 struct be_adapter *adapter = rxo->adapter;
1953 struct be_queue_info *rx_cq = &rxo->cq;
2e588f84 1954 struct be_rx_compl_info *rxcp;
6b7c5b94
SP
1955 u32 work_done;
1956
1957 for (work_done = 0; work_done < budget; work_done++) {
3abcdeda 1958 rxcp = be_rx_compl_get(rxo);
6b7c5b94
SP
1959 if (!rxcp)
1960 break;
1961
12004ae9
SP
1962 /* Is it a flush compl that has no data */
1963 if (unlikely(rxcp->num_rcvd == 0))
1964 goto loop_continue;
1965
1966 /* Discard compl with partial DMA Lancer B0 */
1967 if (unlikely(!rxcp->pkt_size)) {
10ef9ab4 1968 be_rx_compl_discard(rxo, rxcp);
12004ae9
SP
1969 goto loop_continue;
1970 }
1971
1972 /* On BE drop pkts that arrive due to imperfect filtering in
1973 * promiscuous mode on some skews
1974 */
1975 if (unlikely(rxcp->port != adapter->port_num &&
1976 !lancer_chip(adapter))) {
10ef9ab4 1977 be_rx_compl_discard(rxo, rxcp);
12004ae9 1978 goto loop_continue;
64642811 1979 }
009dd872 1980
12004ae9 1981 if (do_gro(rxcp))
10ef9ab4 1982 be_rx_compl_process_gro(rxo, napi, rxcp);
12004ae9 1983 else
10ef9ab4 1984 be_rx_compl_process(rxo, rxcp);
12004ae9 1985loop_continue:
2e588f84 1986 be_rx_stats_update(rxo, rxcp);
6b7c5b94
SP
1987 }
1988
10ef9ab4
SP
1989 if (work_done) {
1990 be_cq_notify(adapter, rx_cq->id, true, work_done);
9372cacb 1991
10ef9ab4
SP
1992 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
1993 be_post_rx_frags(rxo, GFP_ATOMIC);
6b7c5b94 1994 }
10ef9ab4 1995
6b7c5b94
SP
1996 return work_done;
1997}
1998
10ef9ab4
SP
1999static bool be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
2000 int budget, int idx)
6b7c5b94 2001{
6b7c5b94 2002 struct be_eth_tx_compl *txcp;
10ef9ab4 2003 int num_wrbs = 0, work_done;
3c8def97 2004
10ef9ab4
SP
2005 for (work_done = 0; work_done < budget; work_done++) {
2006 txcp = be_tx_compl_get(&txo->cq);
2007 if (!txcp)
2008 break;
2009 num_wrbs += be_tx_compl_process(adapter, txo,
3c8def97
SP
2010 AMAP_GET_BITS(struct amap_eth_tx_compl,
2011 wrb_index, txcp));
10ef9ab4 2012 }
6b7c5b94 2013
10ef9ab4
SP
2014 if (work_done) {
2015 be_cq_notify(adapter, txo->cq.id, true, work_done);
2016 atomic_sub(num_wrbs, &txo->q.used);
3c8def97 2017
10ef9ab4
SP
2018 /* As Tx wrbs have been freed up, wake up netdev queue
2019 * if it was stopped due to lack of tx wrbs. */
2020 if (__netif_subqueue_stopped(adapter->netdev, idx) &&
2021 atomic_read(&txo->q.used) < txo->q.len / 2) {
2022 netif_wake_subqueue(adapter->netdev, idx);
3c8def97 2023 }
10ef9ab4
SP
2024
2025 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
2026 tx_stats(txo)->tx_compl += work_done;
2027 u64_stats_update_end(&tx_stats(txo)->sync_compl);
6b7c5b94 2028 }
10ef9ab4
SP
2029 return (work_done < budget); /* Done */
2030}
6b7c5b94 2031
10ef9ab4
SP
2032int be_poll(struct napi_struct *napi, int budget)
2033{
2034 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2035 struct be_adapter *adapter = eqo->adapter;
2036 int max_work = 0, work, i;
2037 bool tx_done;
f31e50a8 2038
10ef9ab4
SP
2039 /* Process all TXQs serviced by this EQ */
2040 for (i = eqo->idx; i < adapter->num_tx_qs; i += adapter->num_evt_qs) {
2041 tx_done = be_process_tx(adapter, &adapter->tx_obj[i],
2042 eqo->tx_budget, i);
2043 if (!tx_done)
2044 max_work = budget;
f31e50a8
SP
2045 }
2046
10ef9ab4
SP
2047 /* This loop will iterate twice for EQ0 in which
2048 * completions of the last RXQ (default one) are also processed
2049 * For other EQs the loop iterates only once
2050 */
2051 for (i = eqo->idx; i < adapter->num_rx_qs; i += adapter->num_evt_qs) {
2052 work = be_process_rx(&adapter->rx_obj[i], napi, budget);
2053 max_work = max(work, max_work);
2054 }
6b7c5b94 2055
10ef9ab4
SP
2056 if (is_mcc_eqo(eqo))
2057 be_process_mcc(adapter);
93c86700 2058
10ef9ab4
SP
2059 if (max_work < budget) {
2060 napi_complete(napi);
2061 be_eq_notify(adapter, eqo->q.id, true, false, 0);
2062 } else {
2063 /* As we'll continue in polling mode, count and clear events */
2064 be_eq_notify(adapter, eqo->q.id, false, false, events_get(eqo));
93c86700 2065 }
10ef9ab4 2066 return max_work;
6b7c5b94
SP
2067}
2068
d053de91 2069void be_detect_dump_ue(struct be_adapter *adapter)
7c185276 2070{
e1cfb67a
PR
2071 u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
2072 u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
7c185276
AK
2073 u32 i;
2074
72f02485
SP
2075 if (adapter->eeh_err || adapter->ue_detected)
2076 return;
2077
e1cfb67a
PR
2078 if (lancer_chip(adapter)) {
2079 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2080 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2081 sliport_err1 = ioread32(adapter->db +
2082 SLIPORT_ERROR1_OFFSET);
2083 sliport_err2 = ioread32(adapter->db +
2084 SLIPORT_ERROR2_OFFSET);
2085 }
2086 } else {
2087 pci_read_config_dword(adapter->pdev,
2088 PCICFG_UE_STATUS_LOW, &ue_lo);
2089 pci_read_config_dword(adapter->pdev,
2090 PCICFG_UE_STATUS_HIGH, &ue_hi);
2091 pci_read_config_dword(adapter->pdev,
2092 PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask);
2093 pci_read_config_dword(adapter->pdev,
2094 PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
2095
2096 ue_lo = (ue_lo & (~ue_lo_mask));
2097 ue_hi = (ue_hi & (~ue_hi_mask));
2098 }
7c185276 2099
e1cfb67a
PR
2100 if (ue_lo || ue_hi ||
2101 sliport_status & SLIPORT_STATUS_ERR_MASK) {
d053de91 2102 adapter->ue_detected = true;
7acc2087 2103 adapter->eeh_err = true;
434b3648
SP
2104 dev_err(&adapter->pdev->dev,
2105 "Unrecoverable error in the card\n");
d053de91
AK
2106 }
2107
e1cfb67a
PR
2108 if (ue_lo) {
2109 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
2110 if (ue_lo & 1)
7c185276
AK
2111 dev_err(&adapter->pdev->dev,
2112 "UE: %s bit set\n", ue_status_low_desc[i]);
2113 }
2114 }
e1cfb67a
PR
2115 if (ue_hi) {
2116 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
2117 if (ue_hi & 1)
7c185276
AK
2118 dev_err(&adapter->pdev->dev,
2119 "UE: %s bit set\n", ue_status_hi_desc[i]);
2120 }
2121 }
2122
e1cfb67a
PR
2123 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2124 dev_err(&adapter->pdev->dev,
2125 "sliport status 0x%x\n", sliport_status);
2126 dev_err(&adapter->pdev->dev,
2127 "sliport error1 0x%x\n", sliport_err1);
2128 dev_err(&adapter->pdev->dev,
2129 "sliport error2 0x%x\n", sliport_err2);
2130 }
7c185276
AK
2131}
2132
8d56ff11
SP
2133static void be_msix_disable(struct be_adapter *adapter)
2134{
ac6a0c4a 2135 if (msix_enabled(adapter)) {
8d56ff11 2136 pci_disable_msix(adapter->pdev);
ac6a0c4a 2137 adapter->num_msix_vec = 0;
3abcdeda
SP
2138 }
2139}
2140
10ef9ab4
SP
2141static uint be_num_rss_want(struct be_adapter *adapter)
2142{
2143 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
39f1d94d 2144 !sriov_want(adapter) && be_physfn(adapter) &&
10ef9ab4
SP
2145 !be_is_mc(adapter))
2146 return (adapter->be3_native) ? BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
2147 else
2148 return 0;
2149}
2150
6b7c5b94
SP
2151static void be_msix_enable(struct be_adapter *adapter)
2152{
10ef9ab4 2153#define BE_MIN_MSIX_VECTORS 1
045508a8 2154 int i, status, num_vec, num_roce_vec = 0;
6b7c5b94 2155
10ef9ab4
SP
2156 /* If RSS queues are not used, need a vec for default RX Q */
2157 num_vec = min(be_num_rss_want(adapter), num_online_cpus());
045508a8
PP
2158 if (be_roce_supported(adapter)) {
2159 num_roce_vec = min_t(u32, MAX_ROCE_MSIX_VECTORS,
2160 (num_online_cpus() + 1));
2161 num_roce_vec = min(num_roce_vec, MAX_ROCE_EQS);
2162 num_vec += num_roce_vec;
2163 num_vec = min(num_vec, MAX_MSIX_VECTORS);
2164 }
10ef9ab4 2165 num_vec = max(num_vec, BE_MIN_MSIX_VECTORS);
3abcdeda 2166
ac6a0c4a 2167 for (i = 0; i < num_vec; i++)
6b7c5b94
SP
2168 adapter->msix_entries[i].entry = i;
2169
ac6a0c4a 2170 status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
3abcdeda
SP
2171 if (status == 0) {
2172 goto done;
2173 } else if (status >= BE_MIN_MSIX_VECTORS) {
ac6a0c4a 2174 num_vec = status;
3abcdeda 2175 if (pci_enable_msix(adapter->pdev, adapter->msix_entries,
ac6a0c4a 2176 num_vec) == 0)
3abcdeda 2177 goto done;
3abcdeda
SP
2178 }
2179 return;
2180done:
045508a8
PP
2181 if (be_roce_supported(adapter)) {
2182 if (num_vec > num_roce_vec) {
2183 adapter->num_msix_vec = num_vec - num_roce_vec;
2184 adapter->num_msix_roce_vec =
2185 num_vec - adapter->num_msix_vec;
2186 } else {
2187 adapter->num_msix_vec = num_vec;
2188 adapter->num_msix_roce_vec = 0;
2189 }
2190 } else
2191 adapter->num_msix_vec = num_vec;
ac6a0c4a 2192 return;
6b7c5b94
SP
2193}
2194
fe6d2a38 2195static inline int be_msix_vec_get(struct be_adapter *adapter,
10ef9ab4 2196 struct be_eq_obj *eqo)
b628bde2 2197{
10ef9ab4 2198 return adapter->msix_entries[eqo->idx].vector;
b628bde2 2199}
6b7c5b94 2200
b628bde2
SP
2201static int be_msix_register(struct be_adapter *adapter)
2202{
10ef9ab4
SP
2203 struct net_device *netdev = adapter->netdev;
2204 struct be_eq_obj *eqo;
2205 int status, i, vec;
6b7c5b94 2206
10ef9ab4
SP
2207 for_all_evt_queues(adapter, eqo, i) {
2208 sprintf(eqo->desc, "%s-q%d", netdev->name, i);
2209 vec = be_msix_vec_get(adapter, eqo);
2210 status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
3abcdeda
SP
2211 if (status)
2212 goto err_msix;
2213 }
b628bde2 2214
6b7c5b94 2215 return 0;
3abcdeda 2216err_msix:
10ef9ab4
SP
2217 for (i--, eqo = &adapter->eq_obj[i]; i >= 0; i--, eqo--)
2218 free_irq(be_msix_vec_get(adapter, eqo), eqo);
2219 dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
2220 status);
ac6a0c4a 2221 be_msix_disable(adapter);
6b7c5b94
SP
2222 return status;
2223}
2224
2225static int be_irq_register(struct be_adapter *adapter)
2226{
2227 struct net_device *netdev = adapter->netdev;
2228 int status;
2229
ac6a0c4a 2230 if (msix_enabled(adapter)) {
6b7c5b94
SP
2231 status = be_msix_register(adapter);
2232 if (status == 0)
2233 goto done;
ba343c77
SB
2234 /* INTx is not supported for VF */
2235 if (!be_physfn(adapter))
2236 return status;
6b7c5b94
SP
2237 }
2238
2239 /* INTx */
2240 netdev->irq = adapter->pdev->irq;
2241 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
2242 adapter);
2243 if (status) {
2244 dev_err(&adapter->pdev->dev,
2245 "INTx request IRQ failed - err %d\n", status);
2246 return status;
2247 }
2248done:
2249 adapter->isr_registered = true;
2250 return 0;
2251}
2252
2253static void be_irq_unregister(struct be_adapter *adapter)
2254{
2255 struct net_device *netdev = adapter->netdev;
10ef9ab4 2256 struct be_eq_obj *eqo;
3abcdeda 2257 int i;
6b7c5b94
SP
2258
2259 if (!adapter->isr_registered)
2260 return;
2261
2262 /* INTx */
ac6a0c4a 2263 if (!msix_enabled(adapter)) {
6b7c5b94
SP
2264 free_irq(netdev->irq, adapter);
2265 goto done;
2266 }
2267
2268 /* MSIx */
10ef9ab4
SP
2269 for_all_evt_queues(adapter, eqo, i)
2270 free_irq(be_msix_vec_get(adapter, eqo), eqo);
3abcdeda 2271
6b7c5b94
SP
2272done:
2273 adapter->isr_registered = false;
6b7c5b94
SP
2274}
2275
10ef9ab4 2276static void be_rx_qs_destroy(struct be_adapter *adapter)
482c9e79
SP
2277{
2278 struct be_queue_info *q;
2279 struct be_rx_obj *rxo;
2280 int i;
2281
2282 for_all_rx_queues(adapter, rxo, i) {
2283 q = &rxo->q;
2284 if (q->created) {
2285 be_cmd_rxq_destroy(adapter, q);
2286 /* After the rxq is invalidated, wait for a grace time
2287 * of 1ms for all dma to end and the flush compl to
2288 * arrive
2289 */
2290 mdelay(1);
10ef9ab4 2291 be_rx_cq_clean(rxo);
482c9e79 2292 }
10ef9ab4 2293 be_queue_free(adapter, q);
482c9e79
SP
2294 }
2295}
2296
889cd4b2
SP
2297static int be_close(struct net_device *netdev)
2298{
2299 struct be_adapter *adapter = netdev_priv(netdev);
10ef9ab4
SP
2300 struct be_eq_obj *eqo;
2301 int i;
889cd4b2 2302
045508a8
PP
2303 be_roce_dev_close(adapter);
2304
889cd4b2
SP
2305 be_async_mcc_disable(adapter);
2306
fe6d2a38
SP
2307 if (!lancer_chip(adapter))
2308 be_intr_set(adapter, false);
889cd4b2 2309
10ef9ab4
SP
2310 for_all_evt_queues(adapter, eqo, i) {
2311 napi_disable(&eqo->napi);
2312 if (msix_enabled(adapter))
2313 synchronize_irq(be_msix_vec_get(adapter, eqo));
2314 else
2315 synchronize_irq(netdev->irq);
2316 be_eq_clean(eqo);
63fcb27f
PR
2317 }
2318
889cd4b2
SP
2319 be_irq_unregister(adapter);
2320
889cd4b2
SP
2321 /* Wait for all pending tx completions to arrive so that
2322 * all tx skbs are freed.
2323 */
0ae57bb3 2324 be_tx_compl_clean(adapter);
889cd4b2 2325
10ef9ab4 2326 be_rx_qs_destroy(adapter);
482c9e79
SP
2327 return 0;
2328}
2329
10ef9ab4 2330static int be_rx_qs_create(struct be_adapter *adapter)
482c9e79
SP
2331{
2332 struct be_rx_obj *rxo;
e9008ee9
PR
2333 int rc, i, j;
2334 u8 rsstable[128];
482c9e79
SP
2335
2336 for_all_rx_queues(adapter, rxo, i) {
10ef9ab4
SP
2337 rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
2338 sizeof(struct be_eth_rx_d));
2339 if (rc)
2340 return rc;
2341 }
2342
2343 /* The FW would like the default RXQ to be created first */
2344 rxo = default_rxo(adapter);
2345 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id, rx_frag_size,
2346 adapter->if_handle, false, &rxo->rss_id);
2347 if (rc)
2348 return rc;
2349
2350 for_all_rss_queues(adapter, rxo, i) {
482c9e79 2351 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
10ef9ab4
SP
2352 rx_frag_size, adapter->if_handle,
2353 true, &rxo->rss_id);
482c9e79
SP
2354 if (rc)
2355 return rc;
2356 }
2357
2358 if (be_multi_rxq(adapter)) {
e9008ee9
PR
2359 for (j = 0; j < 128; j += adapter->num_rx_qs - 1) {
2360 for_all_rss_queues(adapter, rxo, i) {
2361 if ((j + i) >= 128)
2362 break;
2363 rsstable[j + i] = rxo->rss_id;
2364 }
2365 }
2366 rc = be_cmd_rss_config(adapter, rsstable, 128);
482c9e79
SP
2367 if (rc)
2368 return rc;
2369 }
2370
2371 /* First time posting */
10ef9ab4 2372 for_all_rx_queues(adapter, rxo, i)
482c9e79 2373 be_post_rx_frags(rxo, GFP_KERNEL);
889cd4b2
SP
2374 return 0;
2375}
2376
6b7c5b94
SP
2377static int be_open(struct net_device *netdev)
2378{
2379 struct be_adapter *adapter = netdev_priv(netdev);
10ef9ab4 2380 struct be_eq_obj *eqo;
3abcdeda 2381 struct be_rx_obj *rxo;
10ef9ab4 2382 struct be_tx_obj *txo;
b236916a 2383 u8 link_status;
3abcdeda 2384 int status, i;
5fb379ee 2385
10ef9ab4 2386 status = be_rx_qs_create(adapter);
482c9e79
SP
2387 if (status)
2388 goto err;
2389
5fb379ee
SP
2390 be_irq_register(adapter);
2391
fe6d2a38
SP
2392 if (!lancer_chip(adapter))
2393 be_intr_set(adapter, true);
5fb379ee 2394
10ef9ab4 2395 for_all_rx_queues(adapter, rxo, i)
3abcdeda 2396 be_cq_notify(adapter, rxo->cq.id, true, 0);
5fb379ee 2397
10ef9ab4
SP
2398 for_all_tx_queues(adapter, txo, i)
2399 be_cq_notify(adapter, txo->cq.id, true, 0);
2400
7a1e9b20
SP
2401 be_async_mcc_enable(adapter);
2402
10ef9ab4
SP
2403 for_all_evt_queues(adapter, eqo, i) {
2404 napi_enable(&eqo->napi);
2405 be_eq_notify(adapter, eqo->q.id, true, false, 0);
2406 }
2407
b236916a
AK
2408 status = be_cmd_link_status_query(adapter, NULL, NULL,
2409 &link_status, 0);
2410 if (!status)
2411 be_link_status_update(adapter, link_status);
2412
045508a8 2413 be_roce_dev_open(adapter);
889cd4b2
SP
2414 return 0;
2415err:
2416 be_close(adapter->netdev);
2417 return -EIO;
5fb379ee
SP
2418}
2419
71d8d1b5
AK
2420static int be_setup_wol(struct be_adapter *adapter, bool enable)
2421{
2422 struct be_dma_mem cmd;
2423 int status = 0;
2424 u8 mac[ETH_ALEN];
2425
2426 memset(mac, 0, ETH_ALEN);
2427
2428 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
2b7bcebf
IV
2429 cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2430 GFP_KERNEL);
71d8d1b5
AK
2431 if (cmd.va == NULL)
2432 return -1;
2433 memset(cmd.va, 0, cmd.size);
2434
2435 if (enable) {
2436 status = pci_write_config_dword(adapter->pdev,
2437 PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2438 if (status) {
2439 dev_err(&adapter->pdev->dev,
2381a55c 2440 "Could not enable Wake-on-lan\n");
2b7bcebf
IV
2441 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2442 cmd.dma);
71d8d1b5
AK
2443 return status;
2444 }
2445 status = be_cmd_enable_magic_wol(adapter,
2446 adapter->netdev->dev_addr, &cmd);
2447 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2448 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2449 } else {
2450 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2451 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2452 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2453 }
2454
2b7bcebf 2455 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
71d8d1b5
AK
2456 return status;
2457}
2458
6d87f5c3
AK
2459/*
2460 * Generate a seed MAC address from the PF MAC Address using jhash.
2461 * MAC Address for VFs are assigned incrementally starting from the seed.
2462 * These addresses are programmed in the ASIC by the PF and the VF driver
2463 * queries for the MAC address during its probe.
2464 */
2465static inline int be_vf_eth_addr_config(struct be_adapter *adapter)
2466{
f9449ab7 2467 u32 vf;
3abcdeda 2468 int status = 0;
6d87f5c3 2469 u8 mac[ETH_ALEN];
11ac75ed 2470 struct be_vf_cfg *vf_cfg;
6d87f5c3
AK
2471
2472 be_vf_eth_addr_generate(adapter, mac);
2473
11ac75ed 2474 for_all_vfs(adapter, vf_cfg, vf) {
590c391d
PR
2475 if (lancer_chip(adapter)) {
2476 status = be_cmd_set_mac_list(adapter, mac, 1, vf + 1);
2477 } else {
2478 status = be_cmd_pmac_add(adapter, mac,
11ac75ed
SP
2479 vf_cfg->if_handle,
2480 &vf_cfg->pmac_id, vf + 1);
590c391d
PR
2481 }
2482
6d87f5c3
AK
2483 if (status)
2484 dev_err(&adapter->pdev->dev,
590c391d 2485 "Mac address assignment failed for VF %d\n", vf);
6d87f5c3 2486 else
11ac75ed 2487 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
6d87f5c3
AK
2488
2489 mac[5] += 1;
2490 }
2491 return status;
2492}
2493
f9449ab7 2494static void be_vf_clear(struct be_adapter *adapter)
6d87f5c3 2495{
11ac75ed 2496 struct be_vf_cfg *vf_cfg;
6d87f5c3
AK
2497 u32 vf;
2498
39f1d94d
SP
2499 if (be_find_vfs(adapter, ASSIGNED)) {
2500 dev_warn(&adapter->pdev->dev, "VFs are assigned to VMs\n");
2501 goto done;
2502 }
2503
11ac75ed 2504 for_all_vfs(adapter, vf_cfg, vf) {
590c391d
PR
2505 if (lancer_chip(adapter))
2506 be_cmd_set_mac_list(adapter, NULL, 0, vf + 1);
2507 else
11ac75ed
SP
2508 be_cmd_pmac_del(adapter, vf_cfg->if_handle,
2509 vf_cfg->pmac_id, vf + 1);
f9449ab7 2510
11ac75ed
SP
2511 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
2512 }
39f1d94d
SP
2513 pci_disable_sriov(adapter->pdev);
2514done:
2515 kfree(adapter->vf_cfg);
2516 adapter->num_vfs = 0;
6d87f5c3
AK
2517}
2518
a54769f5
SP
2519static int be_clear(struct be_adapter *adapter)
2520{
fbc13f01
AK
2521 int i = 1;
2522
191eb756
SP
2523 if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) {
2524 cancel_delayed_work_sync(&adapter->work);
2525 adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED;
2526 }
2527
11ac75ed 2528 if (sriov_enabled(adapter))
f9449ab7
SP
2529 be_vf_clear(adapter);
2530
fbc13f01
AK
2531 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++)
2532 be_cmd_pmac_del(adapter, adapter->if_handle,
2533 adapter->pmac_id[i], 0);
2534
f9449ab7 2535 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
a54769f5
SP
2536
2537 be_mcc_queues_destroy(adapter);
10ef9ab4 2538 be_rx_cqs_destroy(adapter);
a54769f5 2539 be_tx_queues_destroy(adapter);
10ef9ab4 2540 be_evt_queues_destroy(adapter);
a54769f5
SP
2541
2542 /* tell fw we're done with firing cmds */
2543 be_cmd_fw_clean(adapter);
10ef9ab4
SP
2544
2545 be_msix_disable(adapter);
39f1d94d 2546 pci_write_config_dword(adapter->pdev, PCICFG_CUST_SCRATCHPAD_CSR, 0);
a54769f5
SP
2547 return 0;
2548}
2549
39f1d94d 2550static int be_vf_setup_init(struct be_adapter *adapter)
30128031 2551{
11ac75ed 2552 struct be_vf_cfg *vf_cfg;
30128031
SP
2553 int vf;
2554
39f1d94d
SP
2555 adapter->vf_cfg = kcalloc(adapter->num_vfs, sizeof(*vf_cfg),
2556 GFP_KERNEL);
2557 if (!adapter->vf_cfg)
2558 return -ENOMEM;
2559
11ac75ed
SP
2560 for_all_vfs(adapter, vf_cfg, vf) {
2561 vf_cfg->if_handle = -1;
2562 vf_cfg->pmac_id = -1;
30128031 2563 }
39f1d94d 2564 return 0;
30128031
SP
2565}
2566
f9449ab7
SP
2567static int be_vf_setup(struct be_adapter *adapter)
2568{
11ac75ed 2569 struct be_vf_cfg *vf_cfg;
39f1d94d 2570 struct device *dev = &adapter->pdev->dev;
f9449ab7 2571 u32 cap_flags, en_flags, vf;
f1f3ee1b 2572 u16 def_vlan, lnk_speed;
39f1d94d
SP
2573 int status, enabled_vfs;
2574
2575 enabled_vfs = be_find_vfs(adapter, ENABLED);
2576 if (enabled_vfs) {
2577 dev_warn(dev, "%d VFs are already enabled\n", enabled_vfs);
2578 dev_warn(dev, "Ignoring num_vfs=%d setting\n", num_vfs);
2579 return 0;
2580 }
f9449ab7 2581
39f1d94d
SP
2582 if (num_vfs > adapter->dev_num_vfs) {
2583 dev_warn(dev, "Device supports %d VFs and not %d\n",
2584 adapter->dev_num_vfs, num_vfs);
2585 num_vfs = adapter->dev_num_vfs;
2586 }
2587
2588 status = pci_enable_sriov(adapter->pdev, num_vfs);
2589 if (!status) {
2590 adapter->num_vfs = num_vfs;
2591 } else {
2592 /* Platform doesn't support SRIOV though device supports it */
2593 dev_warn(dev, "SRIOV enable failed\n");
2594 return 0;
2595 }
2596
2597 status = be_vf_setup_init(adapter);
2598 if (status)
2599 goto err;
30128031 2600
590c391d
PR
2601 cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2602 BE_IF_FLAGS_MULTICAST;
11ac75ed 2603 for_all_vfs(adapter, vf_cfg, vf) {
f9449ab7 2604 status = be_cmd_if_create(adapter, cap_flags, en_flags, NULL,
11ac75ed 2605 &vf_cfg->if_handle, NULL, vf + 1);
f9449ab7
SP
2606 if (status)
2607 goto err;
f9449ab7
SP
2608 }
2609
39f1d94d
SP
2610 if (!enabled_vfs) {
2611 status = be_vf_eth_addr_config(adapter);
2612 if (status)
2613 goto err;
2614 }
f9449ab7 2615
11ac75ed 2616 for_all_vfs(adapter, vf_cfg, vf) {
f9449ab7 2617 status = be_cmd_link_status_query(adapter, NULL, &lnk_speed,
b236916a 2618 NULL, vf + 1);
f9449ab7
SP
2619 if (status)
2620 goto err;
11ac75ed 2621 vf_cfg->tx_rate = lnk_speed * 10;
f1f3ee1b
AK
2622
2623 status = be_cmd_get_hsw_config(adapter, &def_vlan,
2624 vf + 1, vf_cfg->if_handle);
2625 if (status)
2626 goto err;
2627 vf_cfg->def_vid = def_vlan;
f9449ab7
SP
2628 }
2629 return 0;
2630err:
2631 return status;
2632}
2633
30128031
SP
2634static void be_setup_init(struct be_adapter *adapter)
2635{
2636 adapter->vlan_prio_bmap = 0xff;
42f11cf2 2637 adapter->phy.link_speed = -1;
30128031
SP
2638 adapter->if_handle = -1;
2639 adapter->be3_native = false;
2640 adapter->promiscuous = false;
2641 adapter->eq_next_idx = 0;
42f11cf2 2642 adapter->phy.forced_port_speed = -1;
30128031
SP
2643}
2644
e5e1ee89 2645static int be_add_mac_from_list(struct be_adapter *adapter, u8 *mac)
590c391d
PR
2646{
2647 u32 pmac_id;
e5e1ee89
PR
2648 int status;
2649 bool pmac_id_active;
2650
2651 status = be_cmd_get_mac_from_list(adapter, 0, &pmac_id_active,
2652 &pmac_id, mac);
590c391d
PR
2653 if (status != 0)
2654 goto do_none;
e5e1ee89
PR
2655
2656 if (pmac_id_active) {
2657 status = be_cmd_mac_addr_query(adapter, mac,
2658 MAC_ADDRESS_TYPE_NETWORK,
2659 false, adapter->if_handle, pmac_id);
2660
2661 if (!status)
fbc13f01 2662 adapter->pmac_id[0] = pmac_id;
e5e1ee89
PR
2663 } else {
2664 status = be_cmd_pmac_add(adapter, mac,
fbc13f01 2665 adapter->if_handle, &adapter->pmac_id[0], 0);
e5e1ee89 2666 }
590c391d
PR
2667do_none:
2668 return status;
2669}
2670
39f1d94d
SP
2671/* Routine to query per function resource limits */
2672static int be_get_config(struct be_adapter *adapter)
2673{
2674 int pos;
2675 u16 dev_num_vfs;
2676
2677 pos = pci_find_ext_capability(adapter->pdev, PCI_EXT_CAP_ID_SRIOV);
2678 if (pos) {
2679 pci_read_config_word(adapter->pdev, pos + PCI_SRIOV_TOTAL_VF,
2680 &dev_num_vfs);
2681 adapter->dev_num_vfs = dev_num_vfs;
2682 }
2683 return 0;
2684}
2685
5fb379ee
SP
2686static int be_setup(struct be_adapter *adapter)
2687{
5fb379ee 2688 struct net_device *netdev = adapter->netdev;
39f1d94d 2689 struct device *dev = &adapter->pdev->dev;
f9449ab7 2690 u32 cap_flags, en_flags;
a54769f5 2691 u32 tx_fc, rx_fc;
10ef9ab4 2692 int status;
ba343c77
SB
2693 u8 mac[ETH_ALEN];
2694
30128031 2695 be_setup_init(adapter);
6b7c5b94 2696
39f1d94d
SP
2697 be_get_config(adapter);
2698
f9449ab7 2699 be_cmd_req_native_mode(adapter);
73d540f2 2700
10ef9ab4
SP
2701 be_msix_enable(adapter);
2702
2703 status = be_evt_queues_create(adapter);
2704 if (status)
a54769f5 2705 goto err;
6b7c5b94 2706
10ef9ab4
SP
2707 status = be_tx_cqs_create(adapter);
2708 if (status)
2709 goto err;
2710
2711 status = be_rx_cqs_create(adapter);
2712 if (status)
a54769f5 2713 goto err;
6b7c5b94 2714
f9449ab7 2715 status = be_mcc_queues_create(adapter);
10ef9ab4 2716 if (status)
a54769f5 2717 goto err;
6b7c5b94 2718
f9449ab7
SP
2719 memset(mac, 0, ETH_ALEN);
2720 status = be_cmd_mac_addr_query(adapter, mac, MAC_ADDRESS_TYPE_NETWORK,
590c391d 2721 true /*permanent */, 0, 0);
f9449ab7
SP
2722 if (status)
2723 return status;
2724 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2725 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2903dd65 2726
f9449ab7
SP
2727 en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2728 BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
2729 cap_flags = en_flags | BE_IF_FLAGS_MCAST_PROMISCUOUS |
5d5adb93
PR
2730 BE_IF_FLAGS_VLAN_PROMISCUOUS | BE_IF_FLAGS_PROMISCUOUS;
2731
f9449ab7
SP
2732 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS) {
2733 cap_flags |= BE_IF_FLAGS_RSS;
2734 en_flags |= BE_IF_FLAGS_RSS;
2735 }
2736 status = be_cmd_if_create(adapter, cap_flags, en_flags,
2737 netdev->dev_addr, &adapter->if_handle,
fbc13f01 2738 &adapter->pmac_id[0], 0);
5fb379ee 2739 if (status != 0)
a54769f5 2740 goto err;
6b7c5b94 2741
590c391d
PR
2742 /* The VF's permanent mac queried from card is incorrect.
2743 * For BEx: Query the mac configued by the PF using if_handle
2744 * For Lancer: Get and use mac_list to obtain mac address.
2745 */
2746 if (!be_physfn(adapter)) {
2747 if (lancer_chip(adapter))
e5e1ee89 2748 status = be_add_mac_from_list(adapter, mac);
590c391d
PR
2749 else
2750 status = be_cmd_mac_addr_query(adapter, mac,
2751 MAC_ADDRESS_TYPE_NETWORK, false,
2752 adapter->if_handle, 0);
f9449ab7
SP
2753 if (!status) {
2754 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2755 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2756 }
2757 }
0dffc83e 2758
10ef9ab4
SP
2759 status = be_tx_qs_create(adapter);
2760 if (status)
2761 goto err;
2762
04b71175 2763 be_cmd_get_fw_ver(adapter, adapter->fw_ver, NULL);
5a56eb10 2764
ddc3f5cb 2765 be_vid_config(adapter, false, 0);
7ab8b0b4 2766
a54769f5 2767 be_set_rx_mode(adapter->netdev);
5fb379ee 2768
ddc3f5cb 2769 be_cmd_get_flow_control(adapter, &tx_fc, &rx_fc);
590c391d 2770
ddc3f5cb
AK
2771 if (rx_fc != adapter->rx_fc || tx_fc != adapter->tx_fc)
2772 be_cmd_set_flow_control(adapter, adapter->tx_fc,
a54769f5 2773 adapter->rx_fc);
2dc1deb6 2774
a54769f5 2775 pcie_set_readrq(adapter->pdev, 4096);
5fb379ee 2776
39f1d94d
SP
2777 if (be_physfn(adapter) && num_vfs) {
2778 if (adapter->dev_num_vfs)
2779 be_vf_setup(adapter);
2780 else
2781 dev_warn(dev, "device doesn't support SRIOV\n");
f9449ab7
SP
2782 }
2783
42f11cf2
AK
2784 be_cmd_get_phy_info(adapter);
2785 if (be_pause_supported(adapter))
2786 adapter->phy.fc_autoneg = 1;
2787
191eb756
SP
2788 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
2789 adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
2790
39f1d94d 2791 pci_write_config_dword(adapter->pdev, PCICFG_CUST_SCRATCHPAD_CSR, 1);
f9449ab7 2792 return 0;
a54769f5
SP
2793err:
2794 be_clear(adapter);
2795 return status;
2796}
6b7c5b94 2797
66268739
IV
2798#ifdef CONFIG_NET_POLL_CONTROLLER
2799static void be_netpoll(struct net_device *netdev)
2800{
2801 struct be_adapter *adapter = netdev_priv(netdev);
10ef9ab4 2802 struct be_eq_obj *eqo;
66268739
IV
2803 int i;
2804
10ef9ab4
SP
2805 for_all_evt_queues(adapter, eqo, i)
2806 event_handle(eqo);
2807
2808 return;
66268739
IV
2809}
2810#endif
2811
84517482 2812#define FW_FILE_HDR_SIGN "ServerEngines Corp. "
c165541e
PR
2813char flash_cookie[2][16] = {"*** SE FLAS", "H DIRECTORY *** "};
2814
fa9a6fed 2815static bool be_flash_redboot(struct be_adapter *adapter,
3f0d4560
AK
2816 const u8 *p, u32 img_start, int image_size,
2817 int hdr_size)
fa9a6fed
SB
2818{
2819 u32 crc_offset;
2820 u8 flashed_crc[4];
2821 int status;
3f0d4560
AK
2822
2823 crc_offset = hdr_size + img_start + image_size - 4;
2824
fa9a6fed 2825 p += crc_offset;
3f0d4560
AK
2826
2827 status = be_cmd_get_flash_crc(adapter, flashed_crc,
f510fc64 2828 (image_size - 4));
fa9a6fed
SB
2829 if (status) {
2830 dev_err(&adapter->pdev->dev,
2831 "could not get crc from flash, not flashing redboot\n");
2832 return false;
2833 }
2834
2835 /*update redboot only if crc does not match*/
2836 if (!memcmp(flashed_crc, p, 4))
2837 return false;
2838 else
2839 return true;
fa9a6fed
SB
2840}
2841
306f1348
SP
2842static bool phy_flashing_required(struct be_adapter *adapter)
2843{
42f11cf2
AK
2844 return (adapter->phy.phy_type == TN_8022 &&
2845 adapter->phy.interface_type == PHY_TYPE_BASET_10GB);
306f1348
SP
2846}
2847
c165541e
PR
2848static bool is_comp_in_ufi(struct be_adapter *adapter,
2849 struct flash_section_info *fsec, int type)
2850{
2851 int i = 0, img_type = 0;
2852 struct flash_section_info_g2 *fsec_g2 = NULL;
2853
2854 if (adapter->generation != BE_GEN3)
2855 fsec_g2 = (struct flash_section_info_g2 *)fsec;
2856
2857 for (i = 0; i < MAX_FLASH_COMP; i++) {
2858 if (fsec_g2)
2859 img_type = le32_to_cpu(fsec_g2->fsec_entry[i].type);
2860 else
2861 img_type = le32_to_cpu(fsec->fsec_entry[i].type);
2862
2863 if (img_type == type)
2864 return true;
2865 }
2866 return false;
2867
2868}
2869
2870struct flash_section_info *get_fsec_info(struct be_adapter *adapter,
2871 int header_size,
2872 const struct firmware *fw)
2873{
2874 struct flash_section_info *fsec = NULL;
2875 const u8 *p = fw->data;
2876
2877 p += header_size;
2878 while (p < (fw->data + fw->size)) {
2879 fsec = (struct flash_section_info *)p;
2880 if (!memcmp(flash_cookie, fsec->cookie, sizeof(flash_cookie)))
2881 return fsec;
2882 p += 32;
2883 }
2884 return NULL;
2885}
2886
3f0d4560 2887static int be_flash_data(struct be_adapter *adapter,
c165541e
PR
2888 const struct firmware *fw,
2889 struct be_dma_mem *flash_cmd,
2890 int num_of_images)
3f0d4560 2891
84517482 2892{
3f0d4560 2893 int status = 0, i, filehdr_size = 0;
c165541e 2894 int img_hdrs_size = (num_of_images * sizeof(struct image_hdr));
3f0d4560 2895 u32 total_bytes = 0, flash_op;
84517482
AK
2896 int num_bytes;
2897 const u8 *p = fw->data;
2898 struct be_cmd_write_flashrom *req = flash_cmd->va;
215faf9c 2899 const struct flash_comp *pflashcomp;
c165541e
PR
2900 int num_comp, hdr_size;
2901 struct flash_section_info *fsec = NULL;
2902
2903 struct flash_comp gen3_flash_types[] = {
2904 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, OPTYPE_ISCSI_ACTIVE,
2905 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_iSCSI},
2906 { FLASH_REDBOOT_START_g3, OPTYPE_REDBOOT,
2907 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3, IMAGE_BOOT_CODE},
2908 { FLASH_iSCSI_BIOS_START_g3, OPTYPE_BIOS,
2909 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_ISCSI},
2910 { FLASH_PXE_BIOS_START_g3, OPTYPE_PXE_BIOS,
2911 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_PXE},
2912 { FLASH_FCoE_BIOS_START_g3, OPTYPE_FCOE_BIOS,
2913 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_FCoE},
2914 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, OPTYPE_ISCSI_BACKUP,
2915 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_iSCSI},
2916 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, OPTYPE_FCOE_FW_ACTIVE,
2917 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_FCoE},
2918 { FLASH_FCoE_BACKUP_IMAGE_START_g3, OPTYPE_FCOE_FW_BACKUP,
2919 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_FCoE},
2920 { FLASH_NCSI_START_g3, OPTYPE_NCSI_FW,
2921 FLASH_NCSI_IMAGE_MAX_SIZE_g3, IMAGE_NCSI},
2922 { FLASH_PHY_FW_START_g3, OPTYPE_PHY_FW,
2923 FLASH_PHY_FW_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_PHY}
3f0d4560 2924 };
c165541e
PR
2925
2926 struct flash_comp gen2_flash_types[] = {
2927 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, OPTYPE_ISCSI_ACTIVE,
2928 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_iSCSI},
2929 { FLASH_REDBOOT_START_g2, OPTYPE_REDBOOT,
2930 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2, IMAGE_BOOT_CODE},
2931 { FLASH_iSCSI_BIOS_START_g2, OPTYPE_BIOS,
2932 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_ISCSI},
2933 { FLASH_PXE_BIOS_START_g2, OPTYPE_PXE_BIOS,
2934 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_PXE},
2935 { FLASH_FCoE_BIOS_START_g2, OPTYPE_FCOE_BIOS,
2936 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_FCoE},
2937 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, OPTYPE_ISCSI_BACKUP,
2938 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_iSCSI},
2939 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, OPTYPE_FCOE_FW_ACTIVE,
2940 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_FCoE},
2941 { FLASH_FCoE_BACKUP_IMAGE_START_g2, OPTYPE_FCOE_FW_BACKUP,
2942 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_FCoE}
3f0d4560
AK
2943 };
2944
2945 if (adapter->generation == BE_GEN3) {
2946 pflashcomp = gen3_flash_types;
2947 filehdr_size = sizeof(struct flash_file_hdr_g3);
215faf9c 2948 num_comp = ARRAY_SIZE(gen3_flash_types);
3f0d4560
AK
2949 } else {
2950 pflashcomp = gen2_flash_types;
2951 filehdr_size = sizeof(struct flash_file_hdr_g2);
215faf9c 2952 num_comp = ARRAY_SIZE(gen2_flash_types);
84517482 2953 }
c165541e
PR
2954 /* Get flash section info*/
2955 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
2956 if (!fsec) {
2957 dev_err(&adapter->pdev->dev,
2958 "Invalid Cookie. UFI corrupted ?\n");
2959 return -1;
2960 }
9fe96934 2961 for (i = 0; i < num_comp; i++) {
c165541e 2962 if (!is_comp_in_ufi(adapter, fsec, pflashcomp[i].img_type))
9fe96934 2963 continue;
c165541e
PR
2964
2965 if ((pflashcomp[i].optype == OPTYPE_NCSI_FW) &&
2966 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
2967 continue;
2968
2969 if (pflashcomp[i].optype == OPTYPE_PHY_FW) {
306f1348
SP
2970 if (!phy_flashing_required(adapter))
2971 continue;
2972 }
c165541e
PR
2973
2974 hdr_size = filehdr_size +
2975 (num_of_images * sizeof(struct image_hdr));
2976
2977 if ((pflashcomp[i].optype == OPTYPE_REDBOOT) &&
2978 (!be_flash_redboot(adapter, fw->data, pflashcomp[i].offset,
2979 pflashcomp[i].size, hdr_size)))
3f0d4560 2980 continue;
c165541e
PR
2981
2982 /* Flash the component */
3f0d4560 2983 p = fw->data;
c165541e 2984 p += filehdr_size + pflashcomp[i].offset + img_hdrs_size;
306f1348
SP
2985 if (p + pflashcomp[i].size > fw->data + fw->size)
2986 return -1;
2987 total_bytes = pflashcomp[i].size;
3f0d4560
AK
2988 while (total_bytes) {
2989 if (total_bytes > 32*1024)
2990 num_bytes = 32*1024;
2991 else
2992 num_bytes = total_bytes;
2993 total_bytes -= num_bytes;
306f1348 2994 if (!total_bytes) {
c165541e 2995 if (pflashcomp[i].optype == OPTYPE_PHY_FW)
306f1348
SP
2996 flash_op = FLASHROM_OPER_PHY_FLASH;
2997 else
2998 flash_op = FLASHROM_OPER_FLASH;
2999 } else {
c165541e 3000 if (pflashcomp[i].optype == OPTYPE_PHY_FW)
306f1348
SP
3001 flash_op = FLASHROM_OPER_PHY_SAVE;
3002 else
3003 flash_op = FLASHROM_OPER_SAVE;
3004 }
3f0d4560
AK
3005 memcpy(req->params.data_buf, p, num_bytes);
3006 p += num_bytes;
3007 status = be_cmd_write_flashrom(adapter, flash_cmd,
3008 pflashcomp[i].optype, flash_op, num_bytes);
3009 if (status) {
306f1348
SP
3010 if ((status == ILLEGAL_IOCTL_REQ) &&
3011 (pflashcomp[i].optype ==
c165541e 3012 OPTYPE_PHY_FW))
306f1348 3013 break;
3f0d4560
AK
3014 dev_err(&adapter->pdev->dev,
3015 "cmd to write to flash rom failed.\n");
3016 return -1;
3017 }
84517482 3018 }
84517482 3019 }
84517482
AK
3020 return 0;
3021}
3022
3f0d4560
AK
3023static int get_ufigen_type(struct flash_file_hdr_g2 *fhdr)
3024{
3025 if (fhdr == NULL)
3026 return 0;
3027 if (fhdr->build[0] == '3')
3028 return BE_GEN3;
3029 else if (fhdr->build[0] == '2')
3030 return BE_GEN2;
3031 else
3032 return 0;
3033}
3034
485bf569
SN
3035static int lancer_fw_download(struct be_adapter *adapter,
3036 const struct firmware *fw)
84517482 3037{
485bf569
SN
3038#define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024)
3039#define LANCER_FW_DOWNLOAD_LOCATION "/prg"
84517482 3040 struct be_dma_mem flash_cmd;
485bf569
SN
3041 const u8 *data_ptr = NULL;
3042 u8 *dest_image_ptr = NULL;
3043 size_t image_size = 0;
3044 u32 chunk_size = 0;
3045 u32 data_written = 0;
3046 u32 offset = 0;
3047 int status = 0;
3048 u8 add_status = 0;
84517482 3049
485bf569 3050 if (!IS_ALIGNED(fw->size, sizeof(u32))) {
d9efd2af 3051 dev_err(&adapter->pdev->dev,
485bf569
SN
3052 "FW Image not properly aligned. "
3053 "Length must be 4 byte aligned.\n");
3054 status = -EINVAL;
3055 goto lancer_fw_exit;
d9efd2af
SB
3056 }
3057
485bf569
SN
3058 flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
3059 + LANCER_FW_DOWNLOAD_CHUNK;
3060 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
3061 &flash_cmd.dma, GFP_KERNEL);
3062 if (!flash_cmd.va) {
3063 status = -ENOMEM;
3064 dev_err(&adapter->pdev->dev,
3065 "Memory allocation failure while flashing\n");
3066 goto lancer_fw_exit;
3067 }
84517482 3068
485bf569
SN
3069 dest_image_ptr = flash_cmd.va +
3070 sizeof(struct lancer_cmd_req_write_object);
3071 image_size = fw->size;
3072 data_ptr = fw->data;
3073
3074 while (image_size) {
3075 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
3076
3077 /* Copy the image chunk content. */
3078 memcpy(dest_image_ptr, data_ptr, chunk_size);
3079
3080 status = lancer_cmd_write_object(adapter, &flash_cmd,
3081 chunk_size, offset, LANCER_FW_DOWNLOAD_LOCATION,
3082 &data_written, &add_status);
3083
3084 if (status)
3085 break;
3086
3087 offset += data_written;
3088 data_ptr += data_written;
3089 image_size -= data_written;
3090 }
3091
3092 if (!status) {
3093 /* Commit the FW written */
3094 status = lancer_cmd_write_object(adapter, &flash_cmd,
3095 0, offset, LANCER_FW_DOWNLOAD_LOCATION,
3096 &data_written, &add_status);
3097 }
3098
3099 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3100 flash_cmd.dma);
3101 if (status) {
3102 dev_err(&adapter->pdev->dev,
3103 "Firmware load error. "
3104 "Status code: 0x%x Additional Status: 0x%x\n",
3105 status, add_status);
3106 goto lancer_fw_exit;
3107 }
3108
3109 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
3110lancer_fw_exit:
3111 return status;
3112}
3113
3114static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
3115{
3116 struct flash_file_hdr_g2 *fhdr;
3117 struct flash_file_hdr_g3 *fhdr3;
3118 struct image_hdr *img_hdr_ptr = NULL;
3119 struct be_dma_mem flash_cmd;
3120 const u8 *p;
3121 int status = 0, i = 0, num_imgs = 0;
84517482
AK
3122
3123 p = fw->data;
3f0d4560 3124 fhdr = (struct flash_file_hdr_g2 *) p;
84517482 3125
84517482 3126 flash_cmd.size = sizeof(struct be_cmd_write_flashrom) + 32*1024;
2b7bcebf
IV
3127 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
3128 &flash_cmd.dma, GFP_KERNEL);
84517482
AK
3129 if (!flash_cmd.va) {
3130 status = -ENOMEM;
3131 dev_err(&adapter->pdev->dev,
3132 "Memory allocation failure while flashing\n");
485bf569 3133 goto be_fw_exit;
84517482
AK
3134 }
3135
3f0d4560
AK
3136 if ((adapter->generation == BE_GEN3) &&
3137 (get_ufigen_type(fhdr) == BE_GEN3)) {
3138 fhdr3 = (struct flash_file_hdr_g3 *) fw->data;
8b93b710
AK
3139 num_imgs = le32_to_cpu(fhdr3->num_imgs);
3140 for (i = 0; i < num_imgs; i++) {
3f0d4560
AK
3141 img_hdr_ptr = (struct image_hdr *) (fw->data +
3142 (sizeof(struct flash_file_hdr_g3) +
8b93b710
AK
3143 i * sizeof(struct image_hdr)));
3144 if (le32_to_cpu(img_hdr_ptr->imageid) == 1)
3145 status = be_flash_data(adapter, fw, &flash_cmd,
3146 num_imgs);
3f0d4560
AK
3147 }
3148 } else if ((adapter->generation == BE_GEN2) &&
3149 (get_ufigen_type(fhdr) == BE_GEN2)) {
3150 status = be_flash_data(adapter, fw, &flash_cmd, 0);
3151 } else {
3152 dev_err(&adapter->pdev->dev,
3153 "UFI and Interface are not compatible for flashing\n");
3154 status = -1;
84517482
AK
3155 }
3156
2b7bcebf
IV
3157 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3158 flash_cmd.dma);
84517482
AK
3159 if (status) {
3160 dev_err(&adapter->pdev->dev, "Firmware load error\n");
485bf569 3161 goto be_fw_exit;
84517482
AK
3162 }
3163
af901ca1 3164 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
84517482 3165
485bf569
SN
3166be_fw_exit:
3167 return status;
3168}
3169
3170int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
3171{
3172 const struct firmware *fw;
3173 int status;
3174
3175 if (!netif_running(adapter->netdev)) {
3176 dev_err(&adapter->pdev->dev,
3177 "Firmware load not allowed (interface is down)\n");
3178 return -1;
3179 }
3180
3181 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
3182 if (status)
3183 goto fw_exit;
3184
3185 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
3186
3187 if (lancer_chip(adapter))
3188 status = lancer_fw_download(adapter, fw);
3189 else
3190 status = be_fw_download(adapter, fw);
3191
84517482
AK
3192fw_exit:
3193 release_firmware(fw);
3194 return status;
3195}
3196
e5686ad8 3197static const struct net_device_ops be_netdev_ops = {
6b7c5b94
SP
3198 .ndo_open = be_open,
3199 .ndo_stop = be_close,
3200 .ndo_start_xmit = be_xmit,
a54769f5 3201 .ndo_set_rx_mode = be_set_rx_mode,
6b7c5b94
SP
3202 .ndo_set_mac_address = be_mac_addr_set,
3203 .ndo_change_mtu = be_change_mtu,
ab1594e9 3204 .ndo_get_stats64 = be_get_stats64,
6b7c5b94 3205 .ndo_validate_addr = eth_validate_addr,
6b7c5b94
SP
3206 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
3207 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
64600ea5 3208 .ndo_set_vf_mac = be_set_vf_mac,
1da87b7f 3209 .ndo_set_vf_vlan = be_set_vf_vlan,
e1d18735 3210 .ndo_set_vf_tx_rate = be_set_vf_tx_rate,
66268739
IV
3211 .ndo_get_vf_config = be_get_vf_config,
3212#ifdef CONFIG_NET_POLL_CONTROLLER
3213 .ndo_poll_controller = be_netpoll,
3214#endif
6b7c5b94
SP
3215};
3216
3217static void be_netdev_init(struct net_device *netdev)
3218{
3219 struct be_adapter *adapter = netdev_priv(netdev);
10ef9ab4 3220 struct be_eq_obj *eqo;
3abcdeda 3221 int i;
6b7c5b94 3222
6332c8d3 3223 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
8b8ddc68
MM
3224 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
3225 NETIF_F_HW_VLAN_TX;
3226 if (be_multi_rxq(adapter))
3227 netdev->hw_features |= NETIF_F_RXHASH;
6332c8d3
MM
3228
3229 netdev->features |= netdev->hw_features |
8b8ddc68 3230 NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
4b972914 3231
eb8a50d9 3232 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
79032644 3233 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
51c59870 3234
fbc13f01
AK
3235 netdev->priv_flags |= IFF_UNICAST_FLT;
3236
6b7c5b94
SP
3237 netdev->flags |= IFF_MULTICAST;
3238
c190e3c8
AK
3239 netif_set_gso_max_size(netdev, 65535);
3240
10ef9ab4 3241 netdev->netdev_ops = &be_netdev_ops;
6b7c5b94
SP
3242
3243 SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
3244
10ef9ab4
SP
3245 for_all_evt_queues(adapter, eqo, i)
3246 netif_napi_add(netdev, &eqo->napi, be_poll, BE_NAPI_WEIGHT);
6b7c5b94
SP
3247}
3248
3249static void be_unmap_pci_bars(struct be_adapter *adapter)
3250{
8788fdc2
SP
3251 if (adapter->csr)
3252 iounmap(adapter->csr);
3253 if (adapter->db)
3254 iounmap(adapter->db);
045508a8
PP
3255 if (adapter->roce_db.base)
3256 pci_iounmap(adapter->pdev, adapter->roce_db.base);
3257}
3258
3259static int lancer_roce_map_pci_bars(struct be_adapter *adapter)
3260{
3261 struct pci_dev *pdev = adapter->pdev;
3262 u8 __iomem *addr;
3263
3264 addr = pci_iomap(pdev, 2, 0);
3265 if (addr == NULL)
3266 return -ENOMEM;
3267
3268 adapter->roce_db.base = addr;
3269 adapter->roce_db.io_addr = pci_resource_start(pdev, 2);
3270 adapter->roce_db.size = 8192;
3271 adapter->roce_db.total_size = pci_resource_len(pdev, 2);
3272 return 0;
6b7c5b94
SP
3273}
3274
3275static int be_map_pci_bars(struct be_adapter *adapter)
3276{
3277 u8 __iomem *addr;
db3ea781 3278 int db_reg;
6b7c5b94 3279
fe6d2a38 3280 if (lancer_chip(adapter)) {
045508a8
PP
3281 if (be_type_2_3(adapter)) {
3282 addr = ioremap_nocache(
3283 pci_resource_start(adapter->pdev, 0),
3284 pci_resource_len(adapter->pdev, 0));
3285 if (addr == NULL)
3286 return -ENOMEM;
3287 adapter->db = addr;
3288 }
3289 if (adapter->if_type == SLI_INTF_TYPE_3) {
3290 if (lancer_roce_map_pci_bars(adapter))
3291 goto pci_map_err;
3292 }
fe6d2a38
SP
3293 return 0;
3294 }
3295
ba343c77
SB
3296 if (be_physfn(adapter)) {
3297 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 2),
3298 pci_resource_len(adapter->pdev, 2));
3299 if (addr == NULL)
3300 return -ENOMEM;
3301 adapter->csr = addr;
3302 }
6b7c5b94 3303
ba343c77 3304 if (adapter->generation == BE_GEN2) {
ba343c77
SB
3305 db_reg = 4;
3306 } else {
ba343c77
SB
3307 if (be_physfn(adapter))
3308 db_reg = 4;
3309 else
3310 db_reg = 0;
3311 }
3312 addr = ioremap_nocache(pci_resource_start(adapter->pdev, db_reg),
3313 pci_resource_len(adapter->pdev, db_reg));
6b7c5b94
SP
3314 if (addr == NULL)
3315 goto pci_map_err;
ba343c77 3316 adapter->db = addr;
045508a8
PP
3317 if (adapter->sli_family == SKYHAWK_SLI_FAMILY) {
3318 adapter->roce_db.size = 4096;
3319 adapter->roce_db.io_addr =
3320 pci_resource_start(adapter->pdev, db_reg);
3321 adapter->roce_db.total_size =
3322 pci_resource_len(adapter->pdev, db_reg);
3323 }
6b7c5b94
SP
3324 return 0;
3325pci_map_err:
3326 be_unmap_pci_bars(adapter);
3327 return -ENOMEM;
3328}
3329
6b7c5b94
SP
3330static void be_ctrl_cleanup(struct be_adapter *adapter)
3331{
8788fdc2 3332 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
6b7c5b94
SP
3333
3334 be_unmap_pci_bars(adapter);
3335
3336 if (mem->va)
2b7bcebf
IV
3337 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3338 mem->dma);
e7b909a6 3339
5b8821b7 3340 mem = &adapter->rx_filter;
e7b909a6 3341 if (mem->va)
2b7bcebf
IV
3342 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3343 mem->dma);
6b7c5b94
SP
3344}
3345
6b7c5b94
SP
3346static int be_ctrl_init(struct be_adapter *adapter)
3347{
8788fdc2
SP
3348 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
3349 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
5b8821b7 3350 struct be_dma_mem *rx_filter = &adapter->rx_filter;
6b7c5b94 3351 int status;
6b7c5b94
SP
3352
3353 status = be_map_pci_bars(adapter);
3354 if (status)
e7b909a6 3355 goto done;
6b7c5b94
SP
3356
3357 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
2b7bcebf
IV
3358 mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
3359 mbox_mem_alloc->size,
3360 &mbox_mem_alloc->dma,
3361 GFP_KERNEL);
6b7c5b94 3362 if (!mbox_mem_alloc->va) {
e7b909a6
SP
3363 status = -ENOMEM;
3364 goto unmap_pci_bars;
6b7c5b94
SP
3365 }
3366 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
3367 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
3368 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
3369 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
e7b909a6 3370
5b8821b7
SP
3371 rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
3372 rx_filter->va = dma_alloc_coherent(&adapter->pdev->dev, rx_filter->size,
3373 &rx_filter->dma, GFP_KERNEL);
3374 if (rx_filter->va == NULL) {
e7b909a6
SP
3375 status = -ENOMEM;
3376 goto free_mbox;
3377 }
5b8821b7 3378 memset(rx_filter->va, 0, rx_filter->size);
e7b909a6 3379
2984961c 3380 mutex_init(&adapter->mbox_lock);
8788fdc2
SP
3381 spin_lock_init(&adapter->mcc_lock);
3382 spin_lock_init(&adapter->mcc_cq_lock);
a8f447bd 3383
dd131e76 3384 init_completion(&adapter->flash_compl);
cf588477 3385 pci_save_state(adapter->pdev);
6b7c5b94 3386 return 0;
e7b909a6
SP
3387
3388free_mbox:
2b7bcebf
IV
3389 dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
3390 mbox_mem_alloc->va, mbox_mem_alloc->dma);
e7b909a6
SP
3391
3392unmap_pci_bars:
3393 be_unmap_pci_bars(adapter);
3394
3395done:
3396 return status;
6b7c5b94
SP
3397}
3398
3399static void be_stats_cleanup(struct be_adapter *adapter)
3400{
3abcdeda 3401 struct be_dma_mem *cmd = &adapter->stats_cmd;
6b7c5b94
SP
3402
3403 if (cmd->va)
2b7bcebf
IV
3404 dma_free_coherent(&adapter->pdev->dev, cmd->size,
3405 cmd->va, cmd->dma);
6b7c5b94
SP
3406}
3407
3408static int be_stats_init(struct be_adapter *adapter)
3409{
3abcdeda 3410 struct be_dma_mem *cmd = &adapter->stats_cmd;
6b7c5b94 3411
005d5696 3412 if (adapter->generation == BE_GEN2) {
89a88ab8 3413 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
005d5696
SX
3414 } else {
3415 if (lancer_chip(adapter))
3416 cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
3417 else
3418 cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
3419 }
2b7bcebf
IV
3420 cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
3421 GFP_KERNEL);
6b7c5b94
SP
3422 if (cmd->va == NULL)
3423 return -1;
d291b9af 3424 memset(cmd->va, 0, cmd->size);
6b7c5b94
SP
3425 return 0;
3426}
3427
3428static void __devexit be_remove(struct pci_dev *pdev)
3429{
3430 struct be_adapter *adapter = pci_get_drvdata(pdev);
8d56ff11 3431
6b7c5b94
SP
3432 if (!adapter)
3433 return;
3434
045508a8
PP
3435 be_roce_dev_remove(adapter);
3436
6b7c5b94
SP
3437 unregister_netdev(adapter->netdev);
3438
5fb379ee
SP
3439 be_clear(adapter);
3440
6b7c5b94
SP
3441 be_stats_cleanup(adapter);
3442
3443 be_ctrl_cleanup(adapter);
3444
6b7c5b94
SP
3445 pci_set_drvdata(pdev, NULL);
3446 pci_release_regions(pdev);
3447 pci_disable_device(pdev);
3448
3449 free_netdev(adapter->netdev);
3450}
3451
4762f6ce
AK
3452bool be_is_wol_supported(struct be_adapter *adapter)
3453{
3454 return ((adapter->wol_cap & BE_WOL_CAP) &&
3455 !be_is_wol_excluded(adapter)) ? true : false;
3456}
3457
941a77d5
SK
3458u32 be_get_fw_log_level(struct be_adapter *adapter)
3459{
3460 struct be_dma_mem extfat_cmd;
3461 struct be_fat_conf_params *cfgs;
3462 int status;
3463 u32 level = 0;
3464 int j;
3465
3466 memset(&extfat_cmd, 0, sizeof(struct be_dma_mem));
3467 extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps);
3468 extfat_cmd.va = pci_alloc_consistent(adapter->pdev, extfat_cmd.size,
3469 &extfat_cmd.dma);
3470
3471 if (!extfat_cmd.va) {
3472 dev_err(&adapter->pdev->dev, "%s: Memory allocation failure\n",
3473 __func__);
3474 goto err;
3475 }
3476
3477 status = be_cmd_get_ext_fat_capabilites(adapter, &extfat_cmd);
3478 if (!status) {
3479 cfgs = (struct be_fat_conf_params *)(extfat_cmd.va +
3480 sizeof(struct be_cmd_resp_hdr));
3481 for (j = 0; j < cfgs->module[0].num_modes; j++) {
3482 if (cfgs->module[0].trace_lvl[j].mode == MODE_UART)
3483 level = cfgs->module[0].trace_lvl[j].dbg_lvl;
3484 }
3485 }
3486 pci_free_consistent(adapter->pdev, extfat_cmd.size, extfat_cmd.va,
3487 extfat_cmd.dma);
3488err:
3489 return level;
3490}
39f1d94d 3491static int be_get_initial_config(struct be_adapter *adapter)
6b7c5b94 3492{
6b7c5b94 3493 int status;
941a77d5 3494 u32 level;
6b7c5b94 3495
3abcdeda
SP
3496 status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
3497 &adapter->function_mode, &adapter->function_caps);
43a04fdc
SP
3498 if (status)
3499 return status;
3500
752961a1 3501 if (adapter->function_mode & FLEX10_MODE)
456d9c96 3502 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
82903e4b
AK
3503 else
3504 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
3505
fbc13f01
AK
3506 if (be_physfn(adapter))
3507 adapter->max_pmac_cnt = BE_UC_PMAC_COUNT;
3508 else
3509 adapter->max_pmac_cnt = BE_VF_UC_PMAC_COUNT;
3510
3511 /* primary mac needs 1 pmac entry */
3512 adapter->pmac_id = kcalloc(adapter->max_pmac_cnt + 1,
3513 sizeof(u32), GFP_KERNEL);
3514 if (!adapter->pmac_id)
3515 return -ENOMEM;
3516
9e1453c5
AK
3517 status = be_cmd_get_cntl_attributes(adapter);
3518 if (status)
3519 return status;
3520
4762f6ce
AK
3521 status = be_cmd_get_acpi_wol_cap(adapter);
3522 if (status) {
3523 /* in case of a failure to get wol capabillities
3524 * check the exclusion list to determine WOL capability */
3525 if (!be_is_wol_excluded(adapter))
3526 adapter->wol_cap |= BE_WOL_CAP;
3527 }
3528
3529 if (be_is_wol_supported(adapter))
3530 adapter->wol = true;
3531
941a77d5
SK
3532 level = be_get_fw_log_level(adapter);
3533 adapter->msg_enable = level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0;
3534
2243e2e9 3535 return 0;
6b7c5b94
SP
3536}
3537
39f1d94d 3538static int be_dev_type_check(struct be_adapter *adapter)
fe6d2a38
SP
3539{
3540 struct pci_dev *pdev = adapter->pdev;
3541 u32 sli_intf = 0, if_type;
3542
3543 switch (pdev->device) {
3544 case BE_DEVICE_ID1:
3545 case OC_DEVICE_ID1:
3546 adapter->generation = BE_GEN2;
3547 break;
3548 case BE_DEVICE_ID2:
3549 case OC_DEVICE_ID2:
3550 adapter->generation = BE_GEN3;
3551 break;
3552 case OC_DEVICE_ID3:
12f4d0a8 3553 case OC_DEVICE_ID4:
fe6d2a38 3554 pci_read_config_dword(pdev, SLI_INTF_REG_OFFSET, &sli_intf);
045508a8
PP
3555 adapter->if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
3556 SLI_INTF_IF_TYPE_SHIFT;
fe6d2a38
SP
3557 if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
3558 SLI_INTF_IF_TYPE_SHIFT;
fe6d2a38 3559 if (((sli_intf & SLI_INTF_VALID_MASK) != SLI_INTF_VALID) ||
045508a8
PP
3560 !be_type_2_3(adapter)) {
3561 dev_err(&pdev->dev, "SLI_INTF reg val is not valid\n");
3562 return -EINVAL;
3563 }
3564 adapter->sli_family = ((sli_intf & SLI_INTF_FAMILY_MASK) >>
3565 SLI_INTF_FAMILY_SHIFT);
3566 adapter->generation = BE_GEN3;
3567 break;
3568 case OC_DEVICE_ID5:
3569 pci_read_config_dword(pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3570 if ((sli_intf & SLI_INTF_VALID_MASK) != SLI_INTF_VALID) {
fe6d2a38
SP
3571 dev_err(&pdev->dev, "SLI_INTF reg val is not valid\n");
3572 return -EINVAL;
3573 }
fe6d2a38
SP
3574 adapter->sli_family = ((sli_intf & SLI_INTF_FAMILY_MASK) >>
3575 SLI_INTF_FAMILY_SHIFT);
3576 adapter->generation = BE_GEN3;
3577 break;
3578 default:
3579 adapter->generation = 0;
3580 }
39f1d94d
SP
3581
3582 pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3583 adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
fe6d2a38
SP
3584 return 0;
3585}
3586
37eed1cb
PR
3587static int lancer_wait_ready(struct be_adapter *adapter)
3588{
d8110f62 3589#define SLIPORT_READY_TIMEOUT 30
37eed1cb
PR
3590 u32 sliport_status;
3591 int status = 0, i;
3592
3593 for (i = 0; i < SLIPORT_READY_TIMEOUT; i++) {
3594 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3595 if (sliport_status & SLIPORT_STATUS_RDY_MASK)
3596 break;
3597
d8110f62 3598 msleep(1000);
37eed1cb
PR
3599 }
3600
3601 if (i == SLIPORT_READY_TIMEOUT)
3602 status = -1;
3603
3604 return status;
3605}
3606
3607static int lancer_test_and_set_rdy_state(struct be_adapter *adapter)
3608{
3609 int status;
3610 u32 sliport_status, err, reset_needed;
3611 status = lancer_wait_ready(adapter);
3612 if (!status) {
3613 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3614 err = sliport_status & SLIPORT_STATUS_ERR_MASK;
3615 reset_needed = sliport_status & SLIPORT_STATUS_RN_MASK;
3616 if (err && reset_needed) {
3617 iowrite32(SLI_PORT_CONTROL_IP_MASK,
3618 adapter->db + SLIPORT_CONTROL_OFFSET);
3619
3620 /* check adapter has corrected the error */
3621 status = lancer_wait_ready(adapter);
3622 sliport_status = ioread32(adapter->db +
3623 SLIPORT_STATUS_OFFSET);
3624 sliport_status &= (SLIPORT_STATUS_ERR_MASK |
3625 SLIPORT_STATUS_RN_MASK);
3626 if (status || sliport_status)
3627 status = -1;
3628 } else if (err || reset_needed) {
3629 status = -1;
3630 }
3631 }
3632 return status;
3633}
3634
d8110f62
PR
3635static void lancer_test_and_recover_fn_err(struct be_adapter *adapter)
3636{
3637 int status;
3638 u32 sliport_status;
3639
3640 if (adapter->eeh_err || adapter->ue_detected)
3641 return;
3642
3643 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3644
3645 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
3646 dev_err(&adapter->pdev->dev,
3647 "Adapter in error state."
3648 "Trying to recover.\n");
3649
3650 status = lancer_test_and_set_rdy_state(adapter);
3651 if (status)
3652 goto err;
3653
3654 netif_device_detach(adapter->netdev);
3655
3656 if (netif_running(adapter->netdev))
3657 be_close(adapter->netdev);
3658
3659 be_clear(adapter);
3660
3661 adapter->fw_timeout = false;
3662
3663 status = be_setup(adapter);
3664 if (status)
3665 goto err;
3666
3667 if (netif_running(adapter->netdev)) {
3668 status = be_open(adapter->netdev);
3669 if (status)
3670 goto err;
3671 }
3672
3673 netif_device_attach(adapter->netdev);
3674
3675 dev_err(&adapter->pdev->dev,
3676 "Adapter error recovery succeeded\n");
3677 }
3678 return;
3679err:
3680 dev_err(&adapter->pdev->dev,
3681 "Adapter error recovery failed\n");
3682}
3683
3684static void be_worker(struct work_struct *work)
3685{
3686 struct be_adapter *adapter =
3687 container_of(work, struct be_adapter, work.work);
3688 struct be_rx_obj *rxo;
10ef9ab4 3689 struct be_eq_obj *eqo;
d8110f62
PR
3690 int i;
3691
3692 if (lancer_chip(adapter))
3693 lancer_test_and_recover_fn_err(adapter);
3694
3695 be_detect_dump_ue(adapter);
3696
3697 /* when interrupts are not yet enabled, just reap any pending
3698 * mcc completions */
3699 if (!netif_running(adapter->netdev)) {
10ef9ab4 3700 be_process_mcc(adapter);
d8110f62
PR
3701 goto reschedule;
3702 }
3703
3704 if (!adapter->stats_cmd_sent) {
3705 if (lancer_chip(adapter))
3706 lancer_cmd_get_pport_stats(adapter,
3707 &adapter->stats_cmd);
3708 else
3709 be_cmd_get_stats(adapter, &adapter->stats_cmd);
3710 }
3711
3712 for_all_rx_queues(adapter, rxo, i) {
d8110f62
PR
3713 if (rxo->rx_post_starved) {
3714 rxo->rx_post_starved = false;
3715 be_post_rx_frags(rxo, GFP_KERNEL);
3716 }
3717 }
3718
10ef9ab4
SP
3719 for_all_evt_queues(adapter, eqo, i)
3720 be_eqd_update(adapter, eqo);
3721
d8110f62
PR
3722reschedule:
3723 adapter->work_counter++;
3724 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
3725}
3726
39f1d94d
SP
3727static bool be_reset_required(struct be_adapter *adapter)
3728{
3729 u32 reg;
3730
3731 pci_read_config_dword(adapter->pdev, PCICFG_CUST_SCRATCHPAD_CSR, &reg);
3732 return reg;
3733}
3734
6b7c5b94
SP
3735static int __devinit be_probe(struct pci_dev *pdev,
3736 const struct pci_device_id *pdev_id)
3737{
3738 int status = 0;
3739 struct be_adapter *adapter;
3740 struct net_device *netdev;
6b7c5b94
SP
3741
3742 status = pci_enable_device(pdev);
3743 if (status)
3744 goto do_none;
3745
3746 status = pci_request_regions(pdev, DRV_NAME);
3747 if (status)
3748 goto disable_dev;
3749 pci_set_master(pdev);
3750
3c8def97 3751 netdev = alloc_etherdev_mq(sizeof(struct be_adapter), MAX_TX_QS);
6b7c5b94
SP
3752 if (netdev == NULL) {
3753 status = -ENOMEM;
3754 goto rel_reg;
3755 }
3756 adapter = netdev_priv(netdev);
3757 adapter->pdev = pdev;
3758 pci_set_drvdata(pdev, adapter);
fe6d2a38 3759
39f1d94d 3760 status = be_dev_type_check(adapter);
63657b9c 3761 if (status)
fe6d2a38
SP
3762 goto free_netdev;
3763
6b7c5b94 3764 adapter->netdev = netdev;
2243e2e9 3765 SET_NETDEV_DEV(netdev, &pdev->dev);
6b7c5b94 3766
2b7bcebf 3767 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
6b7c5b94
SP
3768 if (!status) {
3769 netdev->features |= NETIF_F_HIGHDMA;
3770 } else {
2b7bcebf 3771 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
6b7c5b94
SP
3772 if (status) {
3773 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
3774 goto free_netdev;
3775 }
3776 }
3777
6b7c5b94
SP
3778 status = be_ctrl_init(adapter);
3779 if (status)
39f1d94d 3780 goto free_netdev;
6b7c5b94 3781
37eed1cb 3782 if (lancer_chip(adapter)) {
d8110f62
PR
3783 status = lancer_wait_ready(adapter);
3784 if (!status) {
3785 iowrite32(SLI_PORT_CONTROL_IP_MASK,
3786 adapter->db + SLIPORT_CONTROL_OFFSET);
3787 status = lancer_test_and_set_rdy_state(adapter);
3788 }
37eed1cb
PR
3789 if (status) {
3790 dev_err(&pdev->dev, "Adapter in non recoverable error\n");
48f5a191 3791 goto ctrl_clean;
37eed1cb
PR
3792 }
3793 }
3794
2243e2e9 3795 /* sync up with fw's ready state */
ba343c77
SB
3796 if (be_physfn(adapter)) {
3797 status = be_cmd_POST(adapter);
3798 if (status)
3799 goto ctrl_clean;
ba343c77 3800 }
6b7c5b94 3801
2243e2e9
SP
3802 /* tell fw we're ready to fire cmds */
3803 status = be_cmd_fw_init(adapter);
6b7c5b94 3804 if (status)
2243e2e9
SP
3805 goto ctrl_clean;
3806
39f1d94d
SP
3807 if (be_reset_required(adapter)) {
3808 status = be_cmd_reset_function(adapter);
3809 if (status)
3810 goto ctrl_clean;
3811 }
556ae191 3812
10ef9ab4
SP
3813 /* The INTR bit may be set in the card when probed by a kdump kernel
3814 * after a crash.
3815 */
3816 if (!lancer_chip(adapter))
3817 be_intr_set(adapter, false);
3818
2243e2e9
SP
3819 status = be_stats_init(adapter);
3820 if (status)
3821 goto ctrl_clean;
3822
39f1d94d 3823 status = be_get_initial_config(adapter);
6b7c5b94
SP
3824 if (status)
3825 goto stats_clean;
6b7c5b94
SP
3826
3827 INIT_DELAYED_WORK(&adapter->work, be_worker);
a54769f5 3828 adapter->rx_fc = adapter->tx_fc = true;
6b7c5b94 3829
5fb379ee
SP
3830 status = be_setup(adapter);
3831 if (status)
3abcdeda 3832 goto msix_disable;
2243e2e9 3833
3abcdeda 3834 be_netdev_init(netdev);
6b7c5b94
SP
3835 status = register_netdev(netdev);
3836 if (status != 0)
5fb379ee 3837 goto unsetup;
6b7c5b94 3838
045508a8
PP
3839 be_roce_dev_add(adapter);
3840
10ef9ab4
SP
3841 dev_info(&pdev->dev, "%s: %s port %d\n", netdev->name, nic_name(pdev),
3842 adapter->port_num);
34b1ef04 3843
6b7c5b94
SP
3844 return 0;
3845
5fb379ee
SP
3846unsetup:
3847 be_clear(adapter);
3abcdeda
SP
3848msix_disable:
3849 be_msix_disable(adapter);
6b7c5b94
SP
3850stats_clean:
3851 be_stats_cleanup(adapter);
3852ctrl_clean:
3853 be_ctrl_cleanup(adapter);
f9449ab7 3854free_netdev:
fe6d2a38 3855 free_netdev(netdev);
8d56ff11 3856 pci_set_drvdata(pdev, NULL);
6b7c5b94
SP
3857rel_reg:
3858 pci_release_regions(pdev);
3859disable_dev:
3860 pci_disable_device(pdev);
3861do_none:
c4ca2374 3862 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
6b7c5b94
SP
3863 return status;
3864}
3865
3866static int be_suspend(struct pci_dev *pdev, pm_message_t state)
3867{
3868 struct be_adapter *adapter = pci_get_drvdata(pdev);
3869 struct net_device *netdev = adapter->netdev;
3870
71d8d1b5
AK
3871 if (adapter->wol)
3872 be_setup_wol(adapter, true);
3873
6b7c5b94
SP
3874 netif_device_detach(netdev);
3875 if (netif_running(netdev)) {
3876 rtnl_lock();
3877 be_close(netdev);
3878 rtnl_unlock();
3879 }
9b0365f1 3880 be_clear(adapter);
6b7c5b94
SP
3881
3882 pci_save_state(pdev);
3883 pci_disable_device(pdev);
3884 pci_set_power_state(pdev, pci_choose_state(pdev, state));
3885 return 0;
3886}
3887
3888static int be_resume(struct pci_dev *pdev)
3889{
3890 int status = 0;
3891 struct be_adapter *adapter = pci_get_drvdata(pdev);
3892 struct net_device *netdev = adapter->netdev;
3893
3894 netif_device_detach(netdev);
3895
3896 status = pci_enable_device(pdev);
3897 if (status)
3898 return status;
3899
3900 pci_set_power_state(pdev, 0);
3901 pci_restore_state(pdev);
3902
2243e2e9
SP
3903 /* tell fw we're ready to fire cmds */
3904 status = be_cmd_fw_init(adapter);
3905 if (status)
3906 return status;
3907
9b0365f1 3908 be_setup(adapter);
6b7c5b94
SP
3909 if (netif_running(netdev)) {
3910 rtnl_lock();
3911 be_open(netdev);
3912 rtnl_unlock();
3913 }
3914 netif_device_attach(netdev);
71d8d1b5
AK
3915
3916 if (adapter->wol)
3917 be_setup_wol(adapter, false);
a4ca055f 3918
6b7c5b94
SP
3919 return 0;
3920}
3921
82456b03
SP
3922/*
3923 * An FLR will stop BE from DMAing any data.
3924 */
3925static void be_shutdown(struct pci_dev *pdev)
3926{
3927 struct be_adapter *adapter = pci_get_drvdata(pdev);
82456b03 3928
2d5d4154
AK
3929 if (!adapter)
3930 return;
82456b03 3931
0f4a6828 3932 cancel_delayed_work_sync(&adapter->work);
a4ca055f 3933
2d5d4154 3934 netif_device_detach(adapter->netdev);
82456b03 3935
82456b03
SP
3936 if (adapter->wol)
3937 be_setup_wol(adapter, true);
3938
57841869
AK
3939 be_cmd_reset_function(adapter);
3940
82456b03 3941 pci_disable_device(pdev);
82456b03
SP
3942}
3943
cf588477
SP
3944static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
3945 pci_channel_state_t state)
3946{
3947 struct be_adapter *adapter = pci_get_drvdata(pdev);
3948 struct net_device *netdev = adapter->netdev;
3949
3950 dev_err(&adapter->pdev->dev, "EEH error detected\n");
3951
3952 adapter->eeh_err = true;
3953
3954 netif_device_detach(netdev);
3955
3956 if (netif_running(netdev)) {
3957 rtnl_lock();
3958 be_close(netdev);
3959 rtnl_unlock();
3960 }
3961 be_clear(adapter);
3962
3963 if (state == pci_channel_io_perm_failure)
3964 return PCI_ERS_RESULT_DISCONNECT;
3965
3966 pci_disable_device(pdev);
3967
eeb7fc7b
SK
3968 /* The error could cause the FW to trigger a flash debug dump.
3969 * Resetting the card while flash dump is in progress
3970 * can cause it not to recover; wait for it to finish
3971 */
3972 ssleep(30);
cf588477
SP
3973 return PCI_ERS_RESULT_NEED_RESET;
3974}
3975
3976static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
3977{
3978 struct be_adapter *adapter = pci_get_drvdata(pdev);
3979 int status;
3980
3981 dev_info(&adapter->pdev->dev, "EEH reset\n");
3982 adapter->eeh_err = false;
6589ade0
SP
3983 adapter->ue_detected = false;
3984 adapter->fw_timeout = false;
cf588477
SP
3985
3986 status = pci_enable_device(pdev);
3987 if (status)
3988 return PCI_ERS_RESULT_DISCONNECT;
3989
3990 pci_set_master(pdev);
3991 pci_set_power_state(pdev, 0);
3992 pci_restore_state(pdev);
3993
3994 /* Check if card is ok and fw is ready */
3995 status = be_cmd_POST(adapter);
3996 if (status)
3997 return PCI_ERS_RESULT_DISCONNECT;
3998
3999 return PCI_ERS_RESULT_RECOVERED;
4000}
4001
4002static void be_eeh_resume(struct pci_dev *pdev)
4003{
4004 int status = 0;
4005 struct be_adapter *adapter = pci_get_drvdata(pdev);
4006 struct net_device *netdev = adapter->netdev;
4007
4008 dev_info(&adapter->pdev->dev, "EEH resume\n");
4009
4010 pci_save_state(pdev);
4011
4012 /* tell fw we're ready to fire cmds */
4013 status = be_cmd_fw_init(adapter);
4014 if (status)
4015 goto err;
4016
4017 status = be_setup(adapter);
4018 if (status)
4019 goto err;
4020
4021 if (netif_running(netdev)) {
4022 status = be_open(netdev);
4023 if (status)
4024 goto err;
4025 }
4026 netif_device_attach(netdev);
4027 return;
4028err:
4029 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
cf588477
SP
4030}
4031
4032static struct pci_error_handlers be_eeh_handlers = {
4033 .error_detected = be_eeh_err_detected,
4034 .slot_reset = be_eeh_reset,
4035 .resume = be_eeh_resume,
4036};
4037
6b7c5b94
SP
4038static struct pci_driver be_driver = {
4039 .name = DRV_NAME,
4040 .id_table = be_dev_ids,
4041 .probe = be_probe,
4042 .remove = be_remove,
4043 .suspend = be_suspend,
cf588477 4044 .resume = be_resume,
82456b03 4045 .shutdown = be_shutdown,
cf588477 4046 .err_handler = &be_eeh_handlers
6b7c5b94
SP
4047};
4048
4049static int __init be_init_module(void)
4050{
8e95a202
JP
4051 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
4052 rx_frag_size != 2048) {
6b7c5b94
SP
4053 printk(KERN_WARNING DRV_NAME
4054 " : Module param rx_frag_size must be 2048/4096/8192."
4055 " Using 2048\n");
4056 rx_frag_size = 2048;
4057 }
6b7c5b94
SP
4058
4059 return pci_register_driver(&be_driver);
4060}
4061module_init(be_init_module);
4062
4063static void __exit be_exit_module(void)
4064{
4065 pci_unregister_driver(&be_driver);
4066}
4067module_exit(be_exit_module);