macvtap: use prepare_to_wait/finish_wait to ensure mb
[linux-2.6-block.git] / drivers / net / ethernet / emulex / benet / be_main.c
CommitLineData
6b7c5b94 1/*
d2145cde 2 * Copyright (C) 2005 - 2011 Emulex
6b7c5b94
SP
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
d2145cde 11 * linux-drivers@emulex.com
6b7c5b94 12 *
d2145cde
AK
13 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
6b7c5b94
SP
16 */
17
70c71606 18#include <linux/prefetch.h>
9d9779e7 19#include <linux/module.h>
6b7c5b94 20#include "be.h"
8788fdc2 21#include "be_cmds.h"
65f71b8b 22#include <asm/div64.h>
6b7c5b94
SP
23
24MODULE_VERSION(DRV_VER);
25MODULE_DEVICE_TABLE(pci, be_dev_ids);
26MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
27MODULE_AUTHOR("ServerEngines Corporation");
28MODULE_LICENSE("GPL");
29
ba343c77 30static unsigned int num_vfs;
ba343c77 31module_param(num_vfs, uint, S_IRUGO);
ba343c77 32MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
6b7c5b94 33
11ac75ed
SP
34static ushort rx_frag_size = 2048;
35module_param(rx_frag_size, ushort, S_IRUGO);
36MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
37
6b7c5b94 38static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
c4ca2374 39 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
59fd5d87 40 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
c4ca2374
AK
41 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
42 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
fe6d2a38 43 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
12f4d0a8 44 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
ecedb6ae 45 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
6b7c5b94
SP
46 { 0 }
47};
48MODULE_DEVICE_TABLE(pci, be_dev_ids);
7c185276 49/* UE Status Low CSR */
42c8b11e 50static const char * const ue_status_low_desc[] = {
7c185276
AK
51 "CEV",
52 "CTX",
53 "DBUF",
54 "ERX",
55 "Host",
56 "MPU",
57 "NDMA",
58 "PTC ",
59 "RDMA ",
60 "RXF ",
61 "RXIPS ",
62 "RXULP0 ",
63 "RXULP1 ",
64 "RXULP2 ",
65 "TIM ",
66 "TPOST ",
67 "TPRE ",
68 "TXIPS ",
69 "TXULP0 ",
70 "TXULP1 ",
71 "UC ",
72 "WDMA ",
73 "TXULP2 ",
74 "HOST1 ",
75 "P0_OB_LINK ",
76 "P1_OB_LINK ",
77 "HOST_GPIO ",
78 "MBOX ",
79 "AXGMAC0",
80 "AXGMAC1",
81 "JTAG",
82 "MPU_INTPEND"
83};
84/* UE Status High CSR */
42c8b11e 85static const char * const ue_status_hi_desc[] = {
7c185276
AK
86 "LPCMEMHOST",
87 "MGMT_MAC",
88 "PCS0ONLINE",
89 "MPU_IRAM",
90 "PCS1ONLINE",
91 "PCTL0",
92 "PCTL1",
93 "PMEM",
94 "RR",
95 "TXPB",
96 "RXPP",
97 "XAUI",
98 "TXP",
99 "ARM",
100 "IPC",
101 "HOST2",
102 "HOST3",
103 "HOST4",
104 "HOST5",
105 "HOST6",
106 "HOST7",
107 "HOST8",
108 "HOST9",
42c8b11e 109 "NETC",
7c185276
AK
110 "Unknown",
111 "Unknown",
112 "Unknown",
113 "Unknown",
114 "Unknown",
115 "Unknown",
116 "Unknown",
117 "Unknown"
118};
6b7c5b94 119
752961a1
SP
120/* Is BE in a multi-channel mode */
121static inline bool be_is_mc(struct be_adapter *adapter) {
122 return (adapter->function_mode & FLEX10_MODE ||
123 adapter->function_mode & VNIC_MODE ||
124 adapter->function_mode & UMC_ENABLED);
125}
126
6b7c5b94
SP
127static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
128{
129 struct be_dma_mem *mem = &q->dma_mem;
1cfafab9 130 if (mem->va) {
2b7bcebf
IV
131 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
132 mem->dma);
1cfafab9
SP
133 mem->va = NULL;
134 }
6b7c5b94
SP
135}
136
137static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
138 u16 len, u16 entry_size)
139{
140 struct be_dma_mem *mem = &q->dma_mem;
141
142 memset(q, 0, sizeof(*q));
143 q->len = len;
144 q->entry_size = entry_size;
145 mem->size = len * entry_size;
2b7bcebf
IV
146 mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
147 GFP_KERNEL);
6b7c5b94 148 if (!mem->va)
10ef9ab4 149 return -ENOMEM;
6b7c5b94
SP
150 memset(mem->va, 0, mem->size);
151 return 0;
152}
153
8788fdc2 154static void be_intr_set(struct be_adapter *adapter, bool enable)
6b7c5b94 155{
db3ea781 156 u32 reg, enabled;
5f0b849e 157
cf588477
SP
158 if (adapter->eeh_err)
159 return;
160
db3ea781
SP
161 pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
162 &reg);
163 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
164
5f0b849e 165 if (!enabled && enable)
6b7c5b94 166 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
5f0b849e 167 else if (enabled && !enable)
6b7c5b94 168 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
5f0b849e 169 else
6b7c5b94 170 return;
5f0b849e 171
db3ea781
SP
172 pci_write_config_dword(adapter->pdev,
173 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
6b7c5b94
SP
174}
175
8788fdc2 176static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
6b7c5b94
SP
177{
178 u32 val = 0;
179 val |= qid & DB_RQ_RING_ID_MASK;
180 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
f3eb62d2
SP
181
182 wmb();
8788fdc2 183 iowrite32(val, adapter->db + DB_RQ_OFFSET);
6b7c5b94
SP
184}
185
8788fdc2 186static void be_txq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
6b7c5b94
SP
187{
188 u32 val = 0;
189 val |= qid & DB_TXULP_RING_ID_MASK;
190 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
f3eb62d2
SP
191
192 wmb();
8788fdc2 193 iowrite32(val, adapter->db + DB_TXULP1_OFFSET);
6b7c5b94
SP
194}
195
8788fdc2 196static void be_eq_notify(struct be_adapter *adapter, u16 qid,
6b7c5b94
SP
197 bool arm, bool clear_int, u16 num_popped)
198{
199 u32 val = 0;
200 val |= qid & DB_EQ_RING_ID_MASK;
fe6d2a38
SP
201 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
202 DB_EQ_RING_ID_EXT_MASK_SHIFT);
cf588477
SP
203
204 if (adapter->eeh_err)
205 return;
206
6b7c5b94
SP
207 if (arm)
208 val |= 1 << DB_EQ_REARM_SHIFT;
209 if (clear_int)
210 val |= 1 << DB_EQ_CLR_SHIFT;
211 val |= 1 << DB_EQ_EVNT_SHIFT;
212 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
8788fdc2 213 iowrite32(val, adapter->db + DB_EQ_OFFSET);
6b7c5b94
SP
214}
215
8788fdc2 216void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
6b7c5b94
SP
217{
218 u32 val = 0;
219 val |= qid & DB_CQ_RING_ID_MASK;
fe6d2a38
SP
220 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
221 DB_CQ_RING_ID_EXT_MASK_SHIFT);
cf588477
SP
222
223 if (adapter->eeh_err)
224 return;
225
6b7c5b94
SP
226 if (arm)
227 val |= 1 << DB_CQ_REARM_SHIFT;
228 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
8788fdc2 229 iowrite32(val, adapter->db + DB_CQ_OFFSET);
6b7c5b94
SP
230}
231
6b7c5b94
SP
232static int be_mac_addr_set(struct net_device *netdev, void *p)
233{
234 struct be_adapter *adapter = netdev_priv(netdev);
235 struct sockaddr *addr = p;
236 int status = 0;
e3a7ae2c 237 u8 current_mac[ETH_ALEN];
fbc13f01 238 u32 pmac_id = adapter->pmac_id[0];
6b7c5b94 239
ca9e4988
AK
240 if (!is_valid_ether_addr(addr->sa_data))
241 return -EADDRNOTAVAIL;
242
e3a7ae2c 243 status = be_cmd_mac_addr_query(adapter, current_mac,
590c391d
PR
244 MAC_ADDRESS_TYPE_NETWORK, false,
245 adapter->if_handle, 0);
a65027e4 246 if (status)
e3a7ae2c 247 goto err;
6b7c5b94 248
e3a7ae2c
SK
249 if (memcmp(addr->sa_data, current_mac, ETH_ALEN)) {
250 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
fbc13f01 251 adapter->if_handle, &adapter->pmac_id[0], 0);
e3a7ae2c
SK
252 if (status)
253 goto err;
6b7c5b94 254
e3a7ae2c
SK
255 be_cmd_pmac_del(adapter, adapter->if_handle, pmac_id, 0);
256 }
257 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
258 return 0;
259err:
260 dev_err(&adapter->pdev->dev, "MAC %pM set Failed\n", addr->sa_data);
6b7c5b94
SP
261 return status;
262}
263
89a88ab8
AK
264static void populate_be2_stats(struct be_adapter *adapter)
265{
ac124ff9
SP
266 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
267 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
268 struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
89a88ab8 269 struct be_port_rxf_stats_v0 *port_stats =
ac124ff9
SP
270 &rxf_stats->port[adapter->port_num];
271 struct be_drv_stats *drvs = &adapter->drv_stats;
89a88ab8 272
ac124ff9 273 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
89a88ab8
AK
274 drvs->rx_pause_frames = port_stats->rx_pause_frames;
275 drvs->rx_crc_errors = port_stats->rx_crc_errors;
276 drvs->rx_control_frames = port_stats->rx_control_frames;
277 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
278 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
279 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
280 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
281 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
282 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
283 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
284 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
285 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
286 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
287 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
ac124ff9 288 drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
89a88ab8
AK
289 drvs->rx_dropped_header_too_small =
290 port_stats->rx_dropped_header_too_small;
d45b9d39
SP
291 drvs->rx_address_mismatch_drops =
292 port_stats->rx_address_mismatch_drops +
293 port_stats->rx_vlan_mismatch_drops;
89a88ab8
AK
294 drvs->rx_alignment_symbol_errors =
295 port_stats->rx_alignment_symbol_errors;
296
297 drvs->tx_pauseframes = port_stats->tx_pauseframes;
298 drvs->tx_controlframes = port_stats->tx_controlframes;
299
300 if (adapter->port_num)
ac124ff9 301 drvs->jabber_events = rxf_stats->port1_jabber_events;
89a88ab8 302 else
ac124ff9 303 drvs->jabber_events = rxf_stats->port0_jabber_events;
89a88ab8 304 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
89a88ab8 305 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
89a88ab8
AK
306 drvs->forwarded_packets = rxf_stats->forwarded_packets;
307 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
ac124ff9
SP
308 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
309 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
89a88ab8
AK
310 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
311}
312
313static void populate_be3_stats(struct be_adapter *adapter)
314{
ac124ff9
SP
315 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
316 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
317 struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
89a88ab8 318 struct be_port_rxf_stats_v1 *port_stats =
ac124ff9
SP
319 &rxf_stats->port[adapter->port_num];
320 struct be_drv_stats *drvs = &adapter->drv_stats;
89a88ab8 321
ac124ff9 322 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
02fe7027
AK
323 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
324 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
89a88ab8
AK
325 drvs->rx_pause_frames = port_stats->rx_pause_frames;
326 drvs->rx_crc_errors = port_stats->rx_crc_errors;
327 drvs->rx_control_frames = port_stats->rx_control_frames;
328 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
329 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
330 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
331 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
332 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
333 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
334 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
335 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
336 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
337 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
338 drvs->rx_dropped_header_too_small =
339 port_stats->rx_dropped_header_too_small;
340 drvs->rx_input_fifo_overflow_drop =
341 port_stats->rx_input_fifo_overflow_drop;
d45b9d39 342 drvs->rx_address_mismatch_drops = port_stats->rx_address_mismatch_drops;
89a88ab8
AK
343 drvs->rx_alignment_symbol_errors =
344 port_stats->rx_alignment_symbol_errors;
ac124ff9 345 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
89a88ab8
AK
346 drvs->tx_pauseframes = port_stats->tx_pauseframes;
347 drvs->tx_controlframes = port_stats->tx_controlframes;
348 drvs->jabber_events = port_stats->jabber_events;
349 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
89a88ab8 350 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
89a88ab8
AK
351 drvs->forwarded_packets = rxf_stats->forwarded_packets;
352 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
ac124ff9
SP
353 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
354 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
89a88ab8
AK
355 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
356}
357
005d5696
SX
358static void populate_lancer_stats(struct be_adapter *adapter)
359{
89a88ab8 360
005d5696 361 struct be_drv_stats *drvs = &adapter->drv_stats;
ac124ff9
SP
362 struct lancer_pport_stats *pport_stats =
363 pport_stats_from_cmd(adapter);
364
365 be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
366 drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
367 drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
368 drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
005d5696 369 drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
ac124ff9 370 drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
005d5696
SX
371 drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
372 drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
373 drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
374 drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
375 drvs->rx_dropped_tcp_length =
376 pport_stats->rx_dropped_invalid_tcp_length;
377 drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
378 drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
379 drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
380 drvs->rx_dropped_header_too_small =
381 pport_stats->rx_dropped_header_too_small;
382 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
d45b9d39
SP
383 drvs->rx_address_mismatch_drops =
384 pport_stats->rx_address_mismatch_drops +
385 pport_stats->rx_vlan_mismatch_drops;
ac124ff9 386 drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
005d5696 387 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
ac124ff9
SP
388 drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
389 drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
005d5696 390 drvs->jabber_events = pport_stats->rx_jabbers;
ac124ff9
SP
391 drvs->forwarded_packets = pport_stats->num_forwards_lo;
392 drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
005d5696 393 drvs->rx_drops_too_many_frags =
ac124ff9 394 pport_stats->rx_drops_too_many_frags_lo;
005d5696 395}
89a88ab8 396
09c1c68f
SP
397static void accumulate_16bit_val(u32 *acc, u16 val)
398{
399#define lo(x) (x & 0xFFFF)
400#define hi(x) (x & 0xFFFF0000)
401 bool wrapped = val < lo(*acc);
402 u32 newacc = hi(*acc) + val;
403
404 if (wrapped)
405 newacc += 65536;
406 ACCESS_ONCE(*acc) = newacc;
407}
408
89a88ab8
AK
409void be_parse_stats(struct be_adapter *adapter)
410{
ac124ff9
SP
411 struct be_erx_stats_v1 *erx = be_erx_stats_from_cmd(adapter);
412 struct be_rx_obj *rxo;
413 int i;
414
005d5696
SX
415 if (adapter->generation == BE_GEN3) {
416 if (lancer_chip(adapter))
417 populate_lancer_stats(adapter);
418 else
419 populate_be3_stats(adapter);
420 } else {
89a88ab8 421 populate_be2_stats(adapter);
005d5696 422 }
ac124ff9 423
d51ebd33
PR
424 if (lancer_chip(adapter))
425 goto done;
426
ac124ff9 427 /* as erx_v1 is longer than v0, ok to use v1 defn for v0 access */
09c1c68f
SP
428 for_all_rx_queues(adapter, rxo, i) {
429 /* below erx HW counter can actually wrap around after
430 * 65535. Driver accumulates a 32-bit value
431 */
432 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
433 (u16)erx->rx_drops_no_fragments[rxo->q.id]);
434 }
d51ebd33
PR
435done:
436 return;
89a88ab8
AK
437}
438
ab1594e9
SP
439static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
440 struct rtnl_link_stats64 *stats)
6b7c5b94 441{
ab1594e9 442 struct be_adapter *adapter = netdev_priv(netdev);
89a88ab8 443 struct be_drv_stats *drvs = &adapter->drv_stats;
3abcdeda 444 struct be_rx_obj *rxo;
3c8def97 445 struct be_tx_obj *txo;
ab1594e9
SP
446 u64 pkts, bytes;
447 unsigned int start;
3abcdeda 448 int i;
6b7c5b94 449
3abcdeda 450 for_all_rx_queues(adapter, rxo, i) {
ab1594e9
SP
451 const struct be_rx_stats *rx_stats = rx_stats(rxo);
452 do {
453 start = u64_stats_fetch_begin_bh(&rx_stats->sync);
454 pkts = rx_stats(rxo)->rx_pkts;
455 bytes = rx_stats(rxo)->rx_bytes;
456 } while (u64_stats_fetch_retry_bh(&rx_stats->sync, start));
457 stats->rx_packets += pkts;
458 stats->rx_bytes += bytes;
459 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
460 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
461 rx_stats(rxo)->rx_drops_no_frags;
3abcdeda
SP
462 }
463
3c8def97 464 for_all_tx_queues(adapter, txo, i) {
ab1594e9
SP
465 const struct be_tx_stats *tx_stats = tx_stats(txo);
466 do {
467 start = u64_stats_fetch_begin_bh(&tx_stats->sync);
468 pkts = tx_stats(txo)->tx_pkts;
469 bytes = tx_stats(txo)->tx_bytes;
470 } while (u64_stats_fetch_retry_bh(&tx_stats->sync, start));
471 stats->tx_packets += pkts;
472 stats->tx_bytes += bytes;
3c8def97 473 }
6b7c5b94
SP
474
475 /* bad pkts received */
ab1594e9 476 stats->rx_errors = drvs->rx_crc_errors +
89a88ab8
AK
477 drvs->rx_alignment_symbol_errors +
478 drvs->rx_in_range_errors +
479 drvs->rx_out_range_errors +
480 drvs->rx_frame_too_long +
481 drvs->rx_dropped_too_small +
482 drvs->rx_dropped_too_short +
483 drvs->rx_dropped_header_too_small +
484 drvs->rx_dropped_tcp_length +
ab1594e9 485 drvs->rx_dropped_runt;
68110868 486
6b7c5b94 487 /* detailed rx errors */
ab1594e9 488 stats->rx_length_errors = drvs->rx_in_range_errors +
89a88ab8
AK
489 drvs->rx_out_range_errors +
490 drvs->rx_frame_too_long;
68110868 491
ab1594e9 492 stats->rx_crc_errors = drvs->rx_crc_errors;
6b7c5b94
SP
493
494 /* frame alignment errors */
ab1594e9 495 stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
68110868 496
6b7c5b94
SP
497 /* receiver fifo overrun */
498 /* drops_no_pbuf is no per i/f, it's per BE card */
ab1594e9 499 stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
89a88ab8
AK
500 drvs->rx_input_fifo_overflow_drop +
501 drvs->rx_drops_no_pbuf;
ab1594e9 502 return stats;
6b7c5b94
SP
503}
504
b236916a 505void be_link_status_update(struct be_adapter *adapter, u8 link_status)
6b7c5b94 506{
6b7c5b94
SP
507 struct net_device *netdev = adapter->netdev;
508
b236916a 509 if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
ea172a01 510 netif_carrier_off(netdev);
b236916a 511 adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
6b7c5b94 512 }
b236916a
AK
513
514 if ((link_status & LINK_STATUS_MASK) == LINK_UP)
515 netif_carrier_on(netdev);
516 else
517 netif_carrier_off(netdev);
6b7c5b94
SP
518}
519
3c8def97 520static void be_tx_stats_update(struct be_tx_obj *txo,
91992e44 521 u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
6b7c5b94 522{
3c8def97
SP
523 struct be_tx_stats *stats = tx_stats(txo);
524
ab1594e9 525 u64_stats_update_begin(&stats->sync);
ac124ff9
SP
526 stats->tx_reqs++;
527 stats->tx_wrbs += wrb_cnt;
528 stats->tx_bytes += copied;
529 stats->tx_pkts += (gso_segs ? gso_segs : 1);
6b7c5b94 530 if (stopped)
ac124ff9 531 stats->tx_stops++;
ab1594e9 532 u64_stats_update_end(&stats->sync);
6b7c5b94
SP
533}
534
535/* Determine number of WRB entries needed to xmit data in an skb */
fe6d2a38
SP
536static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
537 bool *dummy)
6b7c5b94 538{
ebc8d2ab
DM
539 int cnt = (skb->len > skb->data_len);
540
541 cnt += skb_shinfo(skb)->nr_frags;
542
6b7c5b94
SP
543 /* to account for hdr wrb */
544 cnt++;
fe6d2a38
SP
545 if (lancer_chip(adapter) || !(cnt & 1)) {
546 *dummy = false;
547 } else {
6b7c5b94
SP
548 /* add a dummy to make it an even num */
549 cnt++;
550 *dummy = true;
fe6d2a38 551 }
6b7c5b94
SP
552 BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
553 return cnt;
554}
555
556static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
557{
558 wrb->frag_pa_hi = upper_32_bits(addr);
559 wrb->frag_pa_lo = addr & 0xFFFFFFFF;
560 wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
561}
562
1ded132d
AK
563static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
564 struct sk_buff *skb)
565{
566 u8 vlan_prio;
567 u16 vlan_tag;
568
569 vlan_tag = vlan_tx_tag_get(skb);
570 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
571 /* If vlan priority provided by OS is NOT in available bmap */
572 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
573 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
574 adapter->recommended_prio;
575
576 return vlan_tag;
577}
578
cc4ce020
SK
579static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
580 struct sk_buff *skb, u32 wrb_cnt, u32 len)
6b7c5b94 581{
1ded132d 582 u16 vlan_tag;
cc4ce020 583
6b7c5b94
SP
584 memset(hdr, 0, sizeof(*hdr));
585
586 AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
587
49e4b847 588 if (skb_is_gso(skb)) {
6b7c5b94
SP
589 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
590 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
591 hdr, skb_shinfo(skb)->gso_size);
fe6d2a38 592 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
49e4b847 593 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
fe6d2a38
SP
594 if (lancer_chip(adapter) && adapter->sli_family ==
595 LANCER_A0_SLI_FAMILY) {
596 AMAP_SET_BITS(struct amap_eth_hdr_wrb, ipcs, hdr, 1);
597 if (is_tcp_pkt(skb))
598 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
599 tcpcs, hdr, 1);
600 else if (is_udp_pkt(skb))
601 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
602 udpcs, hdr, 1);
603 }
6b7c5b94
SP
604 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
605 if (is_tcp_pkt(skb))
606 AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
607 else if (is_udp_pkt(skb))
608 AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
609 }
610
4c5102f9 611 if (vlan_tx_tag_present(skb)) {
6b7c5b94 612 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
1ded132d 613 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
cc4ce020 614 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
6b7c5b94
SP
615 }
616
617 AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
618 AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, 1);
619 AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
620 AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
621}
622
2b7bcebf 623static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
7101e111
SP
624 bool unmap_single)
625{
626 dma_addr_t dma;
627
628 be_dws_le_to_cpu(wrb, sizeof(*wrb));
629
630 dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
b681ee77 631 if (wrb->frag_len) {
7101e111 632 if (unmap_single)
2b7bcebf
IV
633 dma_unmap_single(dev, dma, wrb->frag_len,
634 DMA_TO_DEVICE);
7101e111 635 else
2b7bcebf 636 dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
7101e111
SP
637 }
638}
6b7c5b94 639
3c8def97 640static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
6b7c5b94
SP
641 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb)
642{
7101e111
SP
643 dma_addr_t busaddr;
644 int i, copied = 0;
2b7bcebf 645 struct device *dev = &adapter->pdev->dev;
6b7c5b94 646 struct sk_buff *first_skb = skb;
6b7c5b94
SP
647 struct be_eth_wrb *wrb;
648 struct be_eth_hdr_wrb *hdr;
7101e111
SP
649 bool map_single = false;
650 u16 map_head;
6b7c5b94 651
6b7c5b94
SP
652 hdr = queue_head_node(txq);
653 queue_head_inc(txq);
7101e111 654 map_head = txq->head;
6b7c5b94 655
ebc8d2ab 656 if (skb->len > skb->data_len) {
e743d313 657 int len = skb_headlen(skb);
2b7bcebf
IV
658 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
659 if (dma_mapping_error(dev, busaddr))
7101e111
SP
660 goto dma_err;
661 map_single = true;
ebc8d2ab
DM
662 wrb = queue_head_node(txq);
663 wrb_fill(wrb, busaddr, len);
664 be_dws_cpu_to_le(wrb, sizeof(*wrb));
665 queue_head_inc(txq);
666 copied += len;
667 }
6b7c5b94 668
ebc8d2ab 669 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
9e903e08 670 const struct skb_frag_struct *frag =
ebc8d2ab 671 &skb_shinfo(skb)->frags[i];
b061b39e 672 busaddr = skb_frag_dma_map(dev, frag, 0,
9e903e08 673 skb_frag_size(frag), DMA_TO_DEVICE);
2b7bcebf 674 if (dma_mapping_error(dev, busaddr))
7101e111 675 goto dma_err;
ebc8d2ab 676 wrb = queue_head_node(txq);
9e903e08 677 wrb_fill(wrb, busaddr, skb_frag_size(frag));
ebc8d2ab
DM
678 be_dws_cpu_to_le(wrb, sizeof(*wrb));
679 queue_head_inc(txq);
9e903e08 680 copied += skb_frag_size(frag);
6b7c5b94
SP
681 }
682
683 if (dummy_wrb) {
684 wrb = queue_head_node(txq);
685 wrb_fill(wrb, 0, 0);
686 be_dws_cpu_to_le(wrb, sizeof(*wrb));
687 queue_head_inc(txq);
688 }
689
cc4ce020 690 wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied);
6b7c5b94
SP
691 be_dws_cpu_to_le(hdr, sizeof(*hdr));
692
693 return copied;
7101e111
SP
694dma_err:
695 txq->head = map_head;
696 while (copied) {
697 wrb = queue_head_node(txq);
2b7bcebf 698 unmap_tx_frag(dev, wrb, map_single);
7101e111
SP
699 map_single = false;
700 copied -= wrb->frag_len;
701 queue_head_inc(txq);
702 }
703 return 0;
6b7c5b94
SP
704}
705
61357325 706static netdev_tx_t be_xmit(struct sk_buff *skb,
b31c50a7 707 struct net_device *netdev)
6b7c5b94
SP
708{
709 struct be_adapter *adapter = netdev_priv(netdev);
3c8def97
SP
710 struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
711 struct be_queue_info *txq = &txo->q;
6b7c5b94
SP
712 u32 wrb_cnt = 0, copied = 0;
713 u32 start = txq->head;
714 bool dummy_wrb, stopped = false;
715
1ded132d
AK
716 /* For vlan tagged pkts, BE
717 * 1) calculates checksum even when CSO is not requested
718 * 2) calculates checksum wrongly for padded pkt less than
719 * 60 bytes long.
720 * As a workaround disable TX vlan offloading in such cases.
721 */
421737bd
SP
722 if (vlan_tx_tag_present(skb) &&
723 (skb->ip_summed != CHECKSUM_PARTIAL || skb->len <= 60)) {
1ded132d
AK
724 skb = skb_share_check(skb, GFP_ATOMIC);
725 if (unlikely(!skb))
726 goto tx_drop;
727
728 skb = __vlan_put_tag(skb, be_get_tx_vlan_tag(adapter, skb));
729 if (unlikely(!skb))
730 goto tx_drop;
731
732 skb->vlan_tci = 0;
733 }
734
fe6d2a38 735 wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
6b7c5b94 736
3c8def97 737 copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb);
c190e3c8
AK
738 if (copied) {
739 /* record the sent skb in the sent_skb table */
3c8def97
SP
740 BUG_ON(txo->sent_skb_list[start]);
741 txo->sent_skb_list[start] = skb;
c190e3c8
AK
742
743 /* Ensure txq has space for the next skb; Else stop the queue
744 * *BEFORE* ringing the tx doorbell, so that we serialze the
745 * tx compls of the current transmit which'll wake up the queue
746 */
7101e111 747 atomic_add(wrb_cnt, &txq->used);
c190e3c8
AK
748 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
749 txq->len) {
3c8def97 750 netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
c190e3c8
AK
751 stopped = true;
752 }
6b7c5b94 753
c190e3c8 754 be_txq_notify(adapter, txq->id, wrb_cnt);
6b7c5b94 755
3c8def97 756 be_tx_stats_update(txo, wrb_cnt, copied,
91992e44 757 skb_shinfo(skb)->gso_segs, stopped);
c190e3c8
AK
758 } else {
759 txq->head = start;
760 dev_kfree_skb_any(skb);
6b7c5b94 761 }
1ded132d 762tx_drop:
6b7c5b94
SP
763 return NETDEV_TX_OK;
764}
765
766static int be_change_mtu(struct net_device *netdev, int new_mtu)
767{
768 struct be_adapter *adapter = netdev_priv(netdev);
769 if (new_mtu < BE_MIN_MTU ||
34a89b8c
AK
770 new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
771 (ETH_HLEN + ETH_FCS_LEN))) {
6b7c5b94
SP
772 dev_info(&adapter->pdev->dev,
773 "MTU must be between %d and %d bytes\n",
34a89b8c
AK
774 BE_MIN_MTU,
775 (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
6b7c5b94
SP
776 return -EINVAL;
777 }
778 dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
779 netdev->mtu, new_mtu);
780 netdev->mtu = new_mtu;
781 return 0;
782}
783
784/*
82903e4b
AK
785 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
786 * If the user configures more, place BE in vlan promiscuous mode.
6b7c5b94 787 */
10329df8 788static int be_vid_config(struct be_adapter *adapter)
6b7c5b94 789{
10329df8
SP
790 u16 vids[BE_NUM_VLANS_SUPPORTED];
791 u16 num = 0, i;
82903e4b 792 int status = 0;
1da87b7f 793
c0e64ef4
SP
794 /* No need to further configure vids if in promiscuous mode */
795 if (adapter->promiscuous)
796 return 0;
797
0fc16ebf
PR
798 if (adapter->vlans_added > adapter->max_vlans)
799 goto set_vlan_promisc;
800
801 /* Construct VLAN Table to give to HW */
802 for (i = 0; i < VLAN_N_VID; i++)
803 if (adapter->vlan_tag[i])
10329df8 804 vids[num++] = cpu_to_le16(i);
0fc16ebf
PR
805
806 status = be_cmd_vlan_config(adapter, adapter->if_handle,
10329df8 807 vids, num, 1, 0);
0fc16ebf
PR
808
809 /* Set to VLAN promisc mode as setting VLAN filter failed */
810 if (status) {
811 dev_info(&adapter->pdev->dev, "Exhausted VLAN HW filters.\n");
812 dev_info(&adapter->pdev->dev, "Disabling HW VLAN filtering.\n");
813 goto set_vlan_promisc;
6b7c5b94 814 }
1da87b7f 815
b31c50a7 816 return status;
0fc16ebf
PR
817
818set_vlan_promisc:
819 status = be_cmd_vlan_config(adapter, adapter->if_handle,
820 NULL, 0, 1, 1);
821 return status;
6b7c5b94
SP
822}
823
8e586137 824static int be_vlan_add_vid(struct net_device *netdev, u16 vid)
6b7c5b94
SP
825{
826 struct be_adapter *adapter = netdev_priv(netdev);
80817cbf 827 int status = 0;
6b7c5b94 828
80817cbf
AK
829 if (!be_physfn(adapter)) {
830 status = -EINVAL;
831 goto ret;
832 }
ba343c77 833
6b7c5b94 834 adapter->vlan_tag[vid] = 1;
82903e4b 835 if (adapter->vlans_added <= (adapter->max_vlans + 1))
10329df8 836 status = be_vid_config(adapter);
8e586137 837
80817cbf
AK
838 if (!status)
839 adapter->vlans_added++;
840 else
841 adapter->vlan_tag[vid] = 0;
842ret:
843 return status;
6b7c5b94
SP
844}
845
8e586137 846static int be_vlan_rem_vid(struct net_device *netdev, u16 vid)
6b7c5b94
SP
847{
848 struct be_adapter *adapter = netdev_priv(netdev);
80817cbf 849 int status = 0;
6b7c5b94 850
80817cbf
AK
851 if (!be_physfn(adapter)) {
852 status = -EINVAL;
853 goto ret;
854 }
ba343c77 855
6b7c5b94 856 adapter->vlan_tag[vid] = 0;
82903e4b 857 if (adapter->vlans_added <= adapter->max_vlans)
10329df8 858 status = be_vid_config(adapter);
8e586137 859
80817cbf
AK
860 if (!status)
861 adapter->vlans_added--;
862 else
863 adapter->vlan_tag[vid] = 1;
864ret:
865 return status;
6b7c5b94
SP
866}
867
a54769f5 868static void be_set_rx_mode(struct net_device *netdev)
6b7c5b94
SP
869{
870 struct be_adapter *adapter = netdev_priv(netdev);
0fc16ebf 871 int status;
6b7c5b94 872
24307eef 873 if (netdev->flags & IFF_PROMISC) {
5b8821b7 874 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
24307eef
SP
875 adapter->promiscuous = true;
876 goto done;
6b7c5b94
SP
877 }
878
25985edc 879 /* BE was previously in promiscuous mode; disable it */
24307eef
SP
880 if (adapter->promiscuous) {
881 adapter->promiscuous = false;
5b8821b7 882 be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
c0e64ef4
SP
883
884 if (adapter->vlans_added)
10329df8 885 be_vid_config(adapter);
6b7c5b94
SP
886 }
887
e7b909a6 888 /* Enable multicast promisc if num configured exceeds what we support */
4cd24eaf 889 if (netdev->flags & IFF_ALLMULTI ||
5b8821b7
SP
890 netdev_mc_count(netdev) > BE_MAX_MC) {
891 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
24307eef 892 goto done;
6b7c5b94 893 }
6b7c5b94 894
fbc13f01
AK
895 if (netdev_uc_count(netdev) != adapter->uc_macs) {
896 struct netdev_hw_addr *ha;
897 int i = 1; /* First slot is claimed by the Primary MAC */
898
899 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++) {
900 be_cmd_pmac_del(adapter, adapter->if_handle,
901 adapter->pmac_id[i], 0);
902 }
903
904 if (netdev_uc_count(netdev) > adapter->max_pmac_cnt) {
905 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
906 adapter->promiscuous = true;
907 goto done;
908 }
909
910 netdev_for_each_uc_addr(ha, adapter->netdev) {
911 adapter->uc_macs++; /* First slot is for Primary MAC */
912 be_cmd_pmac_add(adapter, (u8 *)ha->addr,
913 adapter->if_handle,
914 &adapter->pmac_id[adapter->uc_macs], 0);
915 }
916 }
917
0fc16ebf
PR
918 status = be_cmd_rx_filter(adapter, IFF_MULTICAST, ON);
919
920 /* Set to MCAST promisc mode if setting MULTICAST address fails */
921 if (status) {
922 dev_info(&adapter->pdev->dev, "Exhausted multicast HW filters.\n");
923 dev_info(&adapter->pdev->dev, "Disabling HW multicast filtering.\n");
924 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
925 }
24307eef
SP
926done:
927 return;
6b7c5b94
SP
928}
929
ba343c77
SB
930static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
931{
932 struct be_adapter *adapter = netdev_priv(netdev);
11ac75ed 933 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
ba343c77
SB
934 int status;
935
11ac75ed 936 if (!sriov_enabled(adapter))
ba343c77
SB
937 return -EPERM;
938
11ac75ed 939 if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
ba343c77
SB
940 return -EINVAL;
941
590c391d
PR
942 if (lancer_chip(adapter)) {
943 status = be_cmd_set_mac_list(adapter, mac, 1, vf + 1);
944 } else {
11ac75ed
SP
945 status = be_cmd_pmac_del(adapter, vf_cfg->if_handle,
946 vf_cfg->pmac_id, vf + 1);
ba343c77 947
11ac75ed
SP
948 status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
949 &vf_cfg->pmac_id, vf + 1);
590c391d
PR
950 }
951
64600ea5 952 if (status)
ba343c77
SB
953 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
954 mac, vf);
64600ea5 955 else
11ac75ed 956 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
64600ea5 957
ba343c77
SB
958 return status;
959}
960
64600ea5
AK
961static int be_get_vf_config(struct net_device *netdev, int vf,
962 struct ifla_vf_info *vi)
963{
964 struct be_adapter *adapter = netdev_priv(netdev);
11ac75ed 965 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
64600ea5 966
11ac75ed 967 if (!sriov_enabled(adapter))
64600ea5
AK
968 return -EPERM;
969
11ac75ed 970 if (vf >= adapter->num_vfs)
64600ea5
AK
971 return -EINVAL;
972
973 vi->vf = vf;
11ac75ed
SP
974 vi->tx_rate = vf_cfg->tx_rate;
975 vi->vlan = vf_cfg->vlan_tag;
64600ea5 976 vi->qos = 0;
11ac75ed 977 memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
64600ea5
AK
978
979 return 0;
980}
981
1da87b7f
AK
982static int be_set_vf_vlan(struct net_device *netdev,
983 int vf, u16 vlan, u8 qos)
984{
985 struct be_adapter *adapter = netdev_priv(netdev);
986 int status = 0;
987
11ac75ed 988 if (!sriov_enabled(adapter))
1da87b7f
AK
989 return -EPERM;
990
11ac75ed 991 if (vf >= adapter->num_vfs || vlan > 4095)
1da87b7f
AK
992 return -EINVAL;
993
994 if (vlan) {
f1f3ee1b
AK
995 if (adapter->vf_cfg[vf].vlan_tag != vlan) {
996 /* If this is new value, program it. Else skip. */
997 adapter->vf_cfg[vf].vlan_tag = vlan;
998
999 status = be_cmd_set_hsw_config(adapter, vlan,
1000 vf + 1, adapter->vf_cfg[vf].if_handle);
1001 }
1da87b7f 1002 } else {
f1f3ee1b 1003 /* Reset Transparent Vlan Tagging. */
11ac75ed 1004 adapter->vf_cfg[vf].vlan_tag = 0;
f1f3ee1b
AK
1005 vlan = adapter->vf_cfg[vf].def_vid;
1006 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
1007 adapter->vf_cfg[vf].if_handle);
1da87b7f
AK
1008 }
1009
1da87b7f
AK
1010
1011 if (status)
1012 dev_info(&adapter->pdev->dev,
1013 "VLAN %d config on VF %d failed\n", vlan, vf);
1014 return status;
1015}
1016
e1d18735
AK
1017static int be_set_vf_tx_rate(struct net_device *netdev,
1018 int vf, int rate)
1019{
1020 struct be_adapter *adapter = netdev_priv(netdev);
1021 int status = 0;
1022
11ac75ed 1023 if (!sriov_enabled(adapter))
e1d18735
AK
1024 return -EPERM;
1025
94f434c2 1026 if (vf >= adapter->num_vfs)
e1d18735
AK
1027 return -EINVAL;
1028
94f434c2
AK
1029 if (rate < 100 || rate > 10000) {
1030 dev_err(&adapter->pdev->dev,
1031 "tx rate must be between 100 and 10000 Mbps\n");
1032 return -EINVAL;
1033 }
e1d18735 1034
856c4012 1035 status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
e1d18735
AK
1036
1037 if (status)
94f434c2 1038 dev_err(&adapter->pdev->dev,
e1d18735 1039 "tx rate %d on VF %d failed\n", rate, vf);
94f434c2
AK
1040 else
1041 adapter->vf_cfg[vf].tx_rate = rate;
e1d18735
AK
1042 return status;
1043}
1044
39f1d94d
SP
1045static int be_find_vfs(struct be_adapter *adapter, int vf_state)
1046{
1047 struct pci_dev *dev, *pdev = adapter->pdev;
1048 int vfs = 0, assigned_vfs = 0, pos, vf_fn;
1049 u16 offset, stride;
1050
1051 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
d79c0a20
SP
1052 if (!pos)
1053 return 0;
39f1d94d
SP
1054 pci_read_config_word(pdev, pos + PCI_SRIOV_VF_OFFSET, &offset);
1055 pci_read_config_word(pdev, pos + PCI_SRIOV_VF_STRIDE, &stride);
1056
1057 dev = pci_get_device(pdev->vendor, PCI_ANY_ID, NULL);
1058 while (dev) {
1059 vf_fn = (pdev->devfn + offset + stride * vfs) & 0xFFFF;
1060 if (dev->is_virtfn && dev->devfn == vf_fn) {
1061 vfs++;
1062 if (dev->dev_flags & PCI_DEV_FLAGS_ASSIGNED)
1063 assigned_vfs++;
1064 }
1065 dev = pci_get_device(pdev->vendor, PCI_ANY_ID, dev);
1066 }
1067 return (vf_state == ASSIGNED) ? assigned_vfs : vfs;
1068}
1069
10ef9ab4 1070static void be_eqd_update(struct be_adapter *adapter, struct be_eq_obj *eqo)
6b7c5b94 1071{
10ef9ab4 1072 struct be_rx_stats *stats = rx_stats(&adapter->rx_obj[eqo->idx]);
4097f663 1073 ulong now = jiffies;
ac124ff9 1074 ulong delta = now - stats->rx_jiffies;
ab1594e9
SP
1075 u64 pkts;
1076 unsigned int start, eqd;
ac124ff9 1077
10ef9ab4
SP
1078 if (!eqo->enable_aic) {
1079 eqd = eqo->eqd;
1080 goto modify_eqd;
1081 }
1082
1083 if (eqo->idx >= adapter->num_rx_qs)
ac124ff9 1084 return;
6b7c5b94 1085
10ef9ab4
SP
1086 stats = rx_stats(&adapter->rx_obj[eqo->idx]);
1087
4097f663 1088 /* Wrapped around */
3abcdeda
SP
1089 if (time_before(now, stats->rx_jiffies)) {
1090 stats->rx_jiffies = now;
4097f663
SP
1091 return;
1092 }
6b7c5b94 1093
ac124ff9
SP
1094 /* Update once a second */
1095 if (delta < HZ)
6b7c5b94
SP
1096 return;
1097
ab1594e9
SP
1098 do {
1099 start = u64_stats_fetch_begin_bh(&stats->sync);
1100 pkts = stats->rx_pkts;
1101 } while (u64_stats_fetch_retry_bh(&stats->sync, start));
1102
68c3e5a7 1103 stats->rx_pps = (unsigned long)(pkts - stats->rx_pkts_prev) / (delta / HZ);
ab1594e9 1104 stats->rx_pkts_prev = pkts;
3abcdeda 1105 stats->rx_jiffies = now;
10ef9ab4
SP
1106 eqd = (stats->rx_pps / 110000) << 3;
1107 eqd = min(eqd, eqo->max_eqd);
1108 eqd = max(eqd, eqo->min_eqd);
ac124ff9
SP
1109 if (eqd < 10)
1110 eqd = 0;
10ef9ab4
SP
1111
1112modify_eqd:
1113 if (eqd != eqo->cur_eqd) {
1114 be_cmd_modify_eqd(adapter, eqo->q.id, eqd);
1115 eqo->cur_eqd = eqd;
ac124ff9 1116 }
6b7c5b94
SP
1117}
1118
3abcdeda 1119static void be_rx_stats_update(struct be_rx_obj *rxo,
2e588f84 1120 struct be_rx_compl_info *rxcp)
4097f663 1121{
ac124ff9 1122 struct be_rx_stats *stats = rx_stats(rxo);
1ef78abe 1123
ab1594e9 1124 u64_stats_update_begin(&stats->sync);
3abcdeda 1125 stats->rx_compl++;
2e588f84 1126 stats->rx_bytes += rxcp->pkt_size;
3abcdeda 1127 stats->rx_pkts++;
2e588f84 1128 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
3abcdeda 1129 stats->rx_mcast_pkts++;
2e588f84 1130 if (rxcp->err)
ac124ff9 1131 stats->rx_compl_err++;
ab1594e9 1132 u64_stats_update_end(&stats->sync);
4097f663
SP
1133}
1134
2e588f84 1135static inline bool csum_passed(struct be_rx_compl_info *rxcp)
728a9972 1136{
19fad86f
PR
1137 /* L4 checksum is not reliable for non TCP/UDP packets.
1138 * Also ignore ipcksm for ipv6 pkts */
2e588f84
SP
1139 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
1140 (rxcp->ip_csum || rxcp->ipv6);
728a9972
AK
1141}
1142
10ef9ab4
SP
1143static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo,
1144 u16 frag_idx)
6b7c5b94 1145{
10ef9ab4 1146 struct be_adapter *adapter = rxo->adapter;
6b7c5b94 1147 struct be_rx_page_info *rx_page_info;
3abcdeda 1148 struct be_queue_info *rxq = &rxo->q;
6b7c5b94 1149
3abcdeda 1150 rx_page_info = &rxo->page_info_tbl[frag_idx];
6b7c5b94
SP
1151 BUG_ON(!rx_page_info->page);
1152
205859a2 1153 if (rx_page_info->last_page_user) {
2b7bcebf
IV
1154 dma_unmap_page(&adapter->pdev->dev,
1155 dma_unmap_addr(rx_page_info, bus),
1156 adapter->big_page_size, DMA_FROM_DEVICE);
205859a2
AK
1157 rx_page_info->last_page_user = false;
1158 }
6b7c5b94
SP
1159
1160 atomic_dec(&rxq->used);
1161 return rx_page_info;
1162}
1163
1164/* Throwaway the data in the Rx completion */
10ef9ab4
SP
1165static void be_rx_compl_discard(struct be_rx_obj *rxo,
1166 struct be_rx_compl_info *rxcp)
6b7c5b94 1167{
3abcdeda 1168 struct be_queue_info *rxq = &rxo->q;
6b7c5b94 1169 struct be_rx_page_info *page_info;
2e588f84 1170 u16 i, num_rcvd = rxcp->num_rcvd;
6b7c5b94 1171
e80d9da6 1172 for (i = 0; i < num_rcvd; i++) {
10ef9ab4 1173 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
e80d9da6
PR
1174 put_page(page_info->page);
1175 memset(page_info, 0, sizeof(*page_info));
2e588f84 1176 index_inc(&rxcp->rxq_idx, rxq->len);
6b7c5b94
SP
1177 }
1178}
1179
1180/*
1181 * skb_fill_rx_data forms a complete skb for an ether frame
1182 * indicated by rxcp.
1183 */
10ef9ab4
SP
1184static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
1185 struct be_rx_compl_info *rxcp)
6b7c5b94 1186{
3abcdeda 1187 struct be_queue_info *rxq = &rxo->q;
6b7c5b94 1188 struct be_rx_page_info *page_info;
2e588f84
SP
1189 u16 i, j;
1190 u16 hdr_len, curr_frag_len, remaining;
6b7c5b94 1191 u8 *start;
6b7c5b94 1192
10ef9ab4 1193 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
6b7c5b94
SP
1194 start = page_address(page_info->page) + page_info->page_offset;
1195 prefetch(start);
1196
1197 /* Copy data in the first descriptor of this completion */
2e588f84 1198 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
6b7c5b94
SP
1199
1200 /* Copy the header portion into skb_data */
2e588f84 1201 hdr_len = min(BE_HDR_LEN, curr_frag_len);
6b7c5b94
SP
1202 memcpy(skb->data, start, hdr_len);
1203 skb->len = curr_frag_len;
1204 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
1205 /* Complete packet has now been moved to data */
1206 put_page(page_info->page);
1207 skb->data_len = 0;
1208 skb->tail += curr_frag_len;
1209 } else {
1210 skb_shinfo(skb)->nr_frags = 1;
b061b39e 1211 skb_frag_set_page(skb, 0, page_info->page);
6b7c5b94
SP
1212 skb_shinfo(skb)->frags[0].page_offset =
1213 page_info->page_offset + hdr_len;
9e903e08 1214 skb_frag_size_set(&skb_shinfo(skb)->frags[0], curr_frag_len - hdr_len);
6b7c5b94 1215 skb->data_len = curr_frag_len - hdr_len;
bdb28a97 1216 skb->truesize += rx_frag_size;
6b7c5b94
SP
1217 skb->tail += hdr_len;
1218 }
205859a2 1219 page_info->page = NULL;
6b7c5b94 1220
2e588f84
SP
1221 if (rxcp->pkt_size <= rx_frag_size) {
1222 BUG_ON(rxcp->num_rcvd != 1);
1223 return;
6b7c5b94
SP
1224 }
1225
1226 /* More frags present for this completion */
2e588f84
SP
1227 index_inc(&rxcp->rxq_idx, rxq->len);
1228 remaining = rxcp->pkt_size - curr_frag_len;
1229 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
10ef9ab4 1230 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
2e588f84 1231 curr_frag_len = min(remaining, rx_frag_size);
6b7c5b94 1232
bd46cb6c
AK
1233 /* Coalesce all frags from the same physical page in one slot */
1234 if (page_info->page_offset == 0) {
1235 /* Fresh page */
1236 j++;
b061b39e 1237 skb_frag_set_page(skb, j, page_info->page);
bd46cb6c
AK
1238 skb_shinfo(skb)->frags[j].page_offset =
1239 page_info->page_offset;
9e903e08 1240 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
bd46cb6c
AK
1241 skb_shinfo(skb)->nr_frags++;
1242 } else {
1243 put_page(page_info->page);
1244 }
1245
9e903e08 1246 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
6b7c5b94
SP
1247 skb->len += curr_frag_len;
1248 skb->data_len += curr_frag_len;
bdb28a97 1249 skb->truesize += rx_frag_size;
2e588f84
SP
1250 remaining -= curr_frag_len;
1251 index_inc(&rxcp->rxq_idx, rxq->len);
205859a2 1252 page_info->page = NULL;
6b7c5b94 1253 }
bd46cb6c 1254 BUG_ON(j > MAX_SKB_FRAGS);
6b7c5b94
SP
1255}
1256
5be93b9a 1257/* Process the RX completion indicated by rxcp when GRO is disabled */
10ef9ab4
SP
1258static void be_rx_compl_process(struct be_rx_obj *rxo,
1259 struct be_rx_compl_info *rxcp)
6b7c5b94 1260{
10ef9ab4 1261 struct be_adapter *adapter = rxo->adapter;
6332c8d3 1262 struct net_device *netdev = adapter->netdev;
6b7c5b94 1263 struct sk_buff *skb;
89420424 1264
bb349bb4 1265 skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE);
a058a632 1266 if (unlikely(!skb)) {
ac124ff9 1267 rx_stats(rxo)->rx_drops_no_skbs++;
10ef9ab4 1268 be_rx_compl_discard(rxo, rxcp);
6b7c5b94
SP
1269 return;
1270 }
1271
10ef9ab4 1272 skb_fill_rx_data(rxo, skb, rxcp);
6b7c5b94 1273
6332c8d3 1274 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
728a9972 1275 skb->ip_summed = CHECKSUM_UNNECESSARY;
c6ce2f4b
SK
1276 else
1277 skb_checksum_none_assert(skb);
6b7c5b94 1278
6332c8d3 1279 skb->protocol = eth_type_trans(skb, netdev);
aaa6daec 1280 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
10ef9ab4 1281 if (netdev->features & NETIF_F_RXHASH)
4b972914
AK
1282 skb->rxhash = rxcp->rss_hash;
1283
6b7c5b94 1284
343e43c0 1285 if (rxcp->vlanf)
4c5102f9
AK
1286 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1287
1288 netif_receive_skb(skb);
6b7c5b94
SP
1289}
1290
5be93b9a 1291/* Process the RX completion indicated by rxcp when GRO is enabled */
10ef9ab4
SP
1292void be_rx_compl_process_gro(struct be_rx_obj *rxo, struct napi_struct *napi,
1293 struct be_rx_compl_info *rxcp)
6b7c5b94 1294{
10ef9ab4 1295 struct be_adapter *adapter = rxo->adapter;
6b7c5b94 1296 struct be_rx_page_info *page_info;
5be93b9a 1297 struct sk_buff *skb = NULL;
3abcdeda 1298 struct be_queue_info *rxq = &rxo->q;
2e588f84
SP
1299 u16 remaining, curr_frag_len;
1300 u16 i, j;
3968fa1e 1301
10ef9ab4 1302 skb = napi_get_frags(napi);
5be93b9a 1303 if (!skb) {
10ef9ab4 1304 be_rx_compl_discard(rxo, rxcp);
5be93b9a
AK
1305 return;
1306 }
1307
2e588f84
SP
1308 remaining = rxcp->pkt_size;
1309 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
10ef9ab4 1310 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
6b7c5b94
SP
1311
1312 curr_frag_len = min(remaining, rx_frag_size);
1313
bd46cb6c
AK
1314 /* Coalesce all frags from the same physical page in one slot */
1315 if (i == 0 || page_info->page_offset == 0) {
1316 /* First frag or Fresh page */
1317 j++;
b061b39e 1318 skb_frag_set_page(skb, j, page_info->page);
5be93b9a
AK
1319 skb_shinfo(skb)->frags[j].page_offset =
1320 page_info->page_offset;
9e903e08 1321 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
bd46cb6c
AK
1322 } else {
1323 put_page(page_info->page);
1324 }
9e903e08 1325 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
bdb28a97 1326 skb->truesize += rx_frag_size;
bd46cb6c 1327 remaining -= curr_frag_len;
2e588f84 1328 index_inc(&rxcp->rxq_idx, rxq->len);
6b7c5b94
SP
1329 memset(page_info, 0, sizeof(*page_info));
1330 }
bd46cb6c 1331 BUG_ON(j > MAX_SKB_FRAGS);
6b7c5b94 1332
5be93b9a 1333 skb_shinfo(skb)->nr_frags = j + 1;
2e588f84
SP
1334 skb->len = rxcp->pkt_size;
1335 skb->data_len = rxcp->pkt_size;
5be93b9a 1336 skb->ip_summed = CHECKSUM_UNNECESSARY;
aaa6daec 1337 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
4b972914
AK
1338 if (adapter->netdev->features & NETIF_F_RXHASH)
1339 skb->rxhash = rxcp->rss_hash;
5be93b9a 1340
343e43c0 1341 if (rxcp->vlanf)
4c5102f9
AK
1342 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1343
10ef9ab4 1344 napi_gro_frags(napi);
2e588f84
SP
1345}
1346
10ef9ab4
SP
1347static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
1348 struct be_rx_compl_info *rxcp)
2e588f84
SP
1349{
1350 rxcp->pkt_size =
1351 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1352 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1353 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1354 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
9ecb42fd 1355 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
2e588f84
SP
1356 rxcp->ip_csum =
1357 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1358 rxcp->l4_csum =
1359 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1360 rxcp->ipv6 =
1361 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
1362 rxcp->rxq_idx =
1363 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, fragndx, compl);
1364 rxcp->num_rcvd =
1365 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1366 rxcp->pkt_type =
1367 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
4b972914
AK
1368 rxcp->rss_hash =
1369 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, rxcp);
15d72184
SP
1370 if (rxcp->vlanf) {
1371 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm,
3c709f8f
DM
1372 compl);
1373 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
1374 compl);
15d72184 1375 }
12004ae9 1376 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, port, compl);
2e588f84
SP
1377}
1378
10ef9ab4
SP
1379static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
1380 struct be_rx_compl_info *rxcp)
2e588f84
SP
1381{
1382 rxcp->pkt_size =
1383 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1384 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1385 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1386 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
9ecb42fd 1387 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
2e588f84
SP
1388 rxcp->ip_csum =
1389 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1390 rxcp->l4_csum =
1391 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1392 rxcp->ipv6 =
1393 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
1394 rxcp->rxq_idx =
1395 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, fragndx, compl);
1396 rxcp->num_rcvd =
1397 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1398 rxcp->pkt_type =
1399 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
4b972914
AK
1400 rxcp->rss_hash =
1401 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, rxcp);
15d72184
SP
1402 if (rxcp->vlanf) {
1403 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm,
3c709f8f
DM
1404 compl);
1405 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
1406 compl);
15d72184 1407 }
12004ae9 1408 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, port, compl);
2e588f84
SP
1409}
1410
1411static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1412{
1413 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1414 struct be_rx_compl_info *rxcp = &rxo->rxcp;
1415 struct be_adapter *adapter = rxo->adapter;
6b7c5b94 1416
2e588f84
SP
1417 /* For checking the valid bit it is Ok to use either definition as the
1418 * valid bit is at the same position in both v0 and v1 Rx compl */
1419 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
1420 return NULL;
6b7c5b94 1421
2e588f84
SP
1422 rmb();
1423 be_dws_le_to_cpu(compl, sizeof(*compl));
6b7c5b94 1424
2e588f84 1425 if (adapter->be3_native)
10ef9ab4 1426 be_parse_rx_compl_v1(compl, rxcp);
2e588f84 1427 else
10ef9ab4 1428 be_parse_rx_compl_v0(compl, rxcp);
6b7c5b94 1429
15d72184
SP
1430 if (rxcp->vlanf) {
1431 /* vlanf could be wrongly set in some cards.
1432 * ignore if vtm is not set */
752961a1 1433 if ((adapter->function_mode & FLEX10_MODE) && !rxcp->vtm)
15d72184 1434 rxcp->vlanf = 0;
6b7c5b94 1435
15d72184 1436 if (!lancer_chip(adapter))
3c709f8f 1437 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
6b7c5b94 1438
939cf306 1439 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
3c709f8f 1440 !adapter->vlan_tag[rxcp->vlan_tag])
15d72184
SP
1441 rxcp->vlanf = 0;
1442 }
2e588f84
SP
1443
1444 /* As the compl has been parsed, reset it; we wont touch it again */
1445 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
6b7c5b94 1446
3abcdeda 1447 queue_tail_inc(&rxo->cq);
6b7c5b94
SP
1448 return rxcp;
1449}
1450
1829b086 1451static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
6b7c5b94 1452{
6b7c5b94 1453 u32 order = get_order(size);
1829b086 1454
6b7c5b94 1455 if (order > 0)
1829b086
ED
1456 gfp |= __GFP_COMP;
1457 return alloc_pages(gfp, order);
6b7c5b94
SP
1458}
1459
1460/*
1461 * Allocate a page, split it to fragments of size rx_frag_size and post as
1462 * receive buffers to BE
1463 */
1829b086 1464static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
6b7c5b94 1465{
3abcdeda 1466 struct be_adapter *adapter = rxo->adapter;
26d92f92 1467 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
3abcdeda 1468 struct be_queue_info *rxq = &rxo->q;
6b7c5b94
SP
1469 struct page *pagep = NULL;
1470 struct be_eth_rx_d *rxd;
1471 u64 page_dmaaddr = 0, frag_dmaaddr;
1472 u32 posted, page_offset = 0;
1473
3abcdeda 1474 page_info = &rxo->page_info_tbl[rxq->head];
6b7c5b94
SP
1475 for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1476 if (!pagep) {
1829b086 1477 pagep = be_alloc_pages(adapter->big_page_size, gfp);
6b7c5b94 1478 if (unlikely(!pagep)) {
ac124ff9 1479 rx_stats(rxo)->rx_post_fail++;
6b7c5b94
SP
1480 break;
1481 }
2b7bcebf
IV
1482 page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
1483 0, adapter->big_page_size,
1484 DMA_FROM_DEVICE);
6b7c5b94
SP
1485 page_info->page_offset = 0;
1486 } else {
1487 get_page(pagep);
1488 page_info->page_offset = page_offset + rx_frag_size;
1489 }
1490 page_offset = page_info->page_offset;
1491 page_info->page = pagep;
fac6da5b 1492 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
6b7c5b94
SP
1493 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1494
1495 rxd = queue_head_node(rxq);
1496 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1497 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
6b7c5b94
SP
1498
1499 /* Any space left in the current big page for another frag? */
1500 if ((page_offset + rx_frag_size + rx_frag_size) >
1501 adapter->big_page_size) {
1502 pagep = NULL;
1503 page_info->last_page_user = true;
1504 }
26d92f92
SP
1505
1506 prev_page_info = page_info;
1507 queue_head_inc(rxq);
10ef9ab4 1508 page_info = &rxo->page_info_tbl[rxq->head];
6b7c5b94
SP
1509 }
1510 if (pagep)
26d92f92 1511 prev_page_info->last_page_user = true;
6b7c5b94
SP
1512
1513 if (posted) {
6b7c5b94 1514 atomic_add(posted, &rxq->used);
8788fdc2 1515 be_rxq_notify(adapter, rxq->id, posted);
ea1dae11
SP
1516 } else if (atomic_read(&rxq->used) == 0) {
1517 /* Let be_worker replenish when memory is available */
3abcdeda 1518 rxo->rx_post_starved = true;
6b7c5b94 1519 }
6b7c5b94
SP
1520}
1521
5fb379ee 1522static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
6b7c5b94 1523{
6b7c5b94
SP
1524 struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1525
1526 if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1527 return NULL;
1528
f3eb62d2 1529 rmb();
6b7c5b94
SP
1530 be_dws_le_to_cpu(txcp, sizeof(*txcp));
1531
1532 txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1533
1534 queue_tail_inc(tx_cq);
1535 return txcp;
1536}
1537
3c8def97
SP
1538static u16 be_tx_compl_process(struct be_adapter *adapter,
1539 struct be_tx_obj *txo, u16 last_index)
6b7c5b94 1540{
3c8def97 1541 struct be_queue_info *txq = &txo->q;
a73b796e 1542 struct be_eth_wrb *wrb;
3c8def97 1543 struct sk_buff **sent_skbs = txo->sent_skb_list;
6b7c5b94 1544 struct sk_buff *sent_skb;
ec43b1a6
SP
1545 u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1546 bool unmap_skb_hdr = true;
6b7c5b94 1547
ec43b1a6 1548 sent_skb = sent_skbs[txq->tail];
6b7c5b94 1549 BUG_ON(!sent_skb);
ec43b1a6
SP
1550 sent_skbs[txq->tail] = NULL;
1551
1552 /* skip header wrb */
a73b796e 1553 queue_tail_inc(txq);
6b7c5b94 1554
ec43b1a6 1555 do {
6b7c5b94 1556 cur_index = txq->tail;
a73b796e 1557 wrb = queue_tail_node(txq);
2b7bcebf
IV
1558 unmap_tx_frag(&adapter->pdev->dev, wrb,
1559 (unmap_skb_hdr && skb_headlen(sent_skb)));
ec43b1a6
SP
1560 unmap_skb_hdr = false;
1561
6b7c5b94
SP
1562 num_wrbs++;
1563 queue_tail_inc(txq);
ec43b1a6 1564 } while (cur_index != last_index);
6b7c5b94 1565
6b7c5b94 1566 kfree_skb(sent_skb);
4d586b82 1567 return num_wrbs;
6b7c5b94
SP
1568}
1569
10ef9ab4
SP
1570/* Return the number of events in the event queue */
1571static inline int events_get(struct be_eq_obj *eqo)
859b1e4e 1572{
10ef9ab4
SP
1573 struct be_eq_entry *eqe;
1574 int num = 0;
859b1e4e 1575
10ef9ab4
SP
1576 do {
1577 eqe = queue_tail_node(&eqo->q);
1578 if (eqe->evt == 0)
1579 break;
859b1e4e 1580
10ef9ab4
SP
1581 rmb();
1582 eqe->evt = 0;
1583 num++;
1584 queue_tail_inc(&eqo->q);
1585 } while (true);
1586
1587 return num;
859b1e4e
SP
1588}
1589
10ef9ab4 1590static int event_handle(struct be_eq_obj *eqo)
859b1e4e 1591{
10ef9ab4
SP
1592 bool rearm = false;
1593 int num = events_get(eqo);
859b1e4e 1594
10ef9ab4 1595 /* Deal with any spurious interrupts that come without events */
3c8def97
SP
1596 if (!num)
1597 rearm = true;
1598
af311fe3
PR
1599 if (num || msix_enabled(eqo->adapter))
1600 be_eq_notify(eqo->adapter, eqo->q.id, rearm, true, num);
1601
859b1e4e 1602 if (num)
10ef9ab4 1603 napi_schedule(&eqo->napi);
859b1e4e
SP
1604
1605 return num;
1606}
1607
10ef9ab4
SP
1608/* Leaves the EQ is disarmed state */
1609static void be_eq_clean(struct be_eq_obj *eqo)
859b1e4e 1610{
10ef9ab4 1611 int num = events_get(eqo);
859b1e4e 1612
10ef9ab4 1613 be_eq_notify(eqo->adapter, eqo->q.id, false, true, num);
859b1e4e
SP
1614}
1615
10ef9ab4 1616static void be_rx_cq_clean(struct be_rx_obj *rxo)
6b7c5b94
SP
1617{
1618 struct be_rx_page_info *page_info;
3abcdeda
SP
1619 struct be_queue_info *rxq = &rxo->q;
1620 struct be_queue_info *rx_cq = &rxo->cq;
2e588f84 1621 struct be_rx_compl_info *rxcp;
6b7c5b94
SP
1622 u16 tail;
1623
1624 /* First cleanup pending rx completions */
3abcdeda 1625 while ((rxcp = be_rx_compl_get(rxo)) != NULL) {
10ef9ab4
SP
1626 be_rx_compl_discard(rxo, rxcp);
1627 be_cq_notify(rxo->adapter, rx_cq->id, false, 1);
6b7c5b94
SP
1628 }
1629
1630 /* Then free posted rx buffer that were not used */
1631 tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
cdab23b7 1632 for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
10ef9ab4 1633 page_info = get_rx_page_info(rxo, tail);
6b7c5b94
SP
1634 put_page(page_info->page);
1635 memset(page_info, 0, sizeof(*page_info));
1636 }
1637 BUG_ON(atomic_read(&rxq->used));
482c9e79 1638 rxq->tail = rxq->head = 0;
6b7c5b94
SP
1639}
1640
0ae57bb3 1641static void be_tx_compl_clean(struct be_adapter *adapter)
6b7c5b94 1642{
0ae57bb3
SP
1643 struct be_tx_obj *txo;
1644 struct be_queue_info *txq;
a8e9179a 1645 struct be_eth_tx_compl *txcp;
4d586b82 1646 u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
b03388d6
SP
1647 struct sk_buff *sent_skb;
1648 bool dummy_wrb;
0ae57bb3 1649 int i, pending_txqs;
a8e9179a
SP
1650
1651 /* Wait for a max of 200ms for all the tx-completions to arrive. */
1652 do {
0ae57bb3
SP
1653 pending_txqs = adapter->num_tx_qs;
1654
1655 for_all_tx_queues(adapter, txo, i) {
1656 txq = &txo->q;
1657 while ((txcp = be_tx_compl_get(&txo->cq))) {
1658 end_idx =
1659 AMAP_GET_BITS(struct amap_eth_tx_compl,
1660 wrb_index, txcp);
1661 num_wrbs += be_tx_compl_process(adapter, txo,
1662 end_idx);
1663 cmpl++;
1664 }
1665 if (cmpl) {
1666 be_cq_notify(adapter, txo->cq.id, false, cmpl);
1667 atomic_sub(num_wrbs, &txq->used);
1668 cmpl = 0;
1669 num_wrbs = 0;
1670 }
1671 if (atomic_read(&txq->used) == 0)
1672 pending_txqs--;
a8e9179a
SP
1673 }
1674
0ae57bb3 1675 if (pending_txqs == 0 || ++timeo > 200)
a8e9179a
SP
1676 break;
1677
1678 mdelay(1);
1679 } while (true);
1680
0ae57bb3
SP
1681 for_all_tx_queues(adapter, txo, i) {
1682 txq = &txo->q;
1683 if (atomic_read(&txq->used))
1684 dev_err(&adapter->pdev->dev, "%d pending tx-compls\n",
1685 atomic_read(&txq->used));
1686
1687 /* free posted tx for which compls will never arrive */
1688 while (atomic_read(&txq->used)) {
1689 sent_skb = txo->sent_skb_list[txq->tail];
1690 end_idx = txq->tail;
1691 num_wrbs = wrb_cnt_for_skb(adapter, sent_skb,
1692 &dummy_wrb);
1693 index_adv(&end_idx, num_wrbs - 1, txq->len);
1694 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
1695 atomic_sub(num_wrbs, &txq->used);
1696 }
b03388d6 1697 }
6b7c5b94
SP
1698}
1699
10ef9ab4
SP
1700static void be_evt_queues_destroy(struct be_adapter *adapter)
1701{
1702 struct be_eq_obj *eqo;
1703 int i;
1704
1705 for_all_evt_queues(adapter, eqo, i) {
1706 be_eq_clean(eqo);
1707 if (eqo->q.created)
1708 be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
1709 be_queue_free(adapter, &eqo->q);
1710 }
1711}
1712
1713static int be_evt_queues_create(struct be_adapter *adapter)
1714{
1715 struct be_queue_info *eq;
1716 struct be_eq_obj *eqo;
1717 int i, rc;
1718
1719 adapter->num_evt_qs = num_irqs(adapter);
1720
1721 for_all_evt_queues(adapter, eqo, i) {
1722 eqo->adapter = adapter;
1723 eqo->tx_budget = BE_TX_BUDGET;
1724 eqo->idx = i;
1725 eqo->max_eqd = BE_MAX_EQD;
1726 eqo->enable_aic = true;
1727
1728 eq = &eqo->q;
1729 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1730 sizeof(struct be_eq_entry));
1731 if (rc)
1732 return rc;
1733
1734 rc = be_cmd_eq_create(adapter, eq, eqo->cur_eqd);
1735 if (rc)
1736 return rc;
1737 }
1cfafab9 1738 return 0;
10ef9ab4
SP
1739}
1740
5fb379ee
SP
1741static void be_mcc_queues_destroy(struct be_adapter *adapter)
1742{
1743 struct be_queue_info *q;
5fb379ee 1744
8788fdc2 1745 q = &adapter->mcc_obj.q;
5fb379ee 1746 if (q->created)
8788fdc2 1747 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
5fb379ee
SP
1748 be_queue_free(adapter, q);
1749
8788fdc2 1750 q = &adapter->mcc_obj.cq;
5fb379ee 1751 if (q->created)
8788fdc2 1752 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
5fb379ee
SP
1753 be_queue_free(adapter, q);
1754}
1755
1756/* Must be called only after TX qs are created as MCC shares TX EQ */
1757static int be_mcc_queues_create(struct be_adapter *adapter)
1758{
1759 struct be_queue_info *q, *cq;
5fb379ee 1760
8788fdc2 1761 cq = &adapter->mcc_obj.cq;
5fb379ee 1762 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
efd2e40a 1763 sizeof(struct be_mcc_compl)))
5fb379ee
SP
1764 goto err;
1765
10ef9ab4
SP
1766 /* Use the default EQ for MCC completions */
1767 if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0))
5fb379ee
SP
1768 goto mcc_cq_free;
1769
8788fdc2 1770 q = &adapter->mcc_obj.q;
5fb379ee
SP
1771 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
1772 goto mcc_cq_destroy;
1773
8788fdc2 1774 if (be_cmd_mccq_create(adapter, q, cq))
5fb379ee
SP
1775 goto mcc_q_free;
1776
1777 return 0;
1778
1779mcc_q_free:
1780 be_queue_free(adapter, q);
1781mcc_cq_destroy:
8788fdc2 1782 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
5fb379ee
SP
1783mcc_cq_free:
1784 be_queue_free(adapter, cq);
1785err:
1786 return -1;
1787}
1788
6b7c5b94
SP
1789static void be_tx_queues_destroy(struct be_adapter *adapter)
1790{
1791 struct be_queue_info *q;
3c8def97
SP
1792 struct be_tx_obj *txo;
1793 u8 i;
6b7c5b94 1794
3c8def97
SP
1795 for_all_tx_queues(adapter, txo, i) {
1796 q = &txo->q;
1797 if (q->created)
1798 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
1799 be_queue_free(adapter, q);
6b7c5b94 1800
3c8def97
SP
1801 q = &txo->cq;
1802 if (q->created)
1803 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1804 be_queue_free(adapter, q);
1805 }
6b7c5b94
SP
1806}
1807
dafc0fe3
SP
1808static int be_num_txqs_want(struct be_adapter *adapter)
1809{
39f1d94d
SP
1810 if (sriov_want(adapter) || be_is_mc(adapter) ||
1811 lancer_chip(adapter) || !be_physfn(adapter) ||
1812 adapter->generation == BE_GEN2)
dafc0fe3
SP
1813 return 1;
1814 else
1815 return MAX_TX_QS;
1816}
1817
10ef9ab4 1818static int be_tx_cqs_create(struct be_adapter *adapter)
6b7c5b94 1819{
10ef9ab4
SP
1820 struct be_queue_info *cq, *eq;
1821 int status;
3c8def97
SP
1822 struct be_tx_obj *txo;
1823 u8 i;
6b7c5b94 1824
dafc0fe3 1825 adapter->num_tx_qs = be_num_txqs_want(adapter);
3bb62f4f
PR
1826 if (adapter->num_tx_qs != MAX_TX_QS) {
1827 rtnl_lock();
dafc0fe3
SP
1828 netif_set_real_num_tx_queues(adapter->netdev,
1829 adapter->num_tx_qs);
3bb62f4f
PR
1830 rtnl_unlock();
1831 }
dafc0fe3 1832
10ef9ab4
SP
1833 for_all_tx_queues(adapter, txo, i) {
1834 cq = &txo->cq;
1835 status = be_queue_alloc(adapter, cq, TX_CQ_LEN,
1836 sizeof(struct be_eth_tx_compl));
1837 if (status)
1838 return status;
3c8def97 1839
10ef9ab4
SP
1840 /* If num_evt_qs is less than num_tx_qs, then more than
1841 * one txq share an eq
1842 */
1843 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
1844 status = be_cmd_cq_create(adapter, cq, eq, false, 3);
1845 if (status)
1846 return status;
1847 }
1848 return 0;
1849}
6b7c5b94 1850
10ef9ab4
SP
1851static int be_tx_qs_create(struct be_adapter *adapter)
1852{
1853 struct be_tx_obj *txo;
1854 int i, status;
fe6d2a38 1855
3c8def97 1856 for_all_tx_queues(adapter, txo, i) {
10ef9ab4
SP
1857 status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
1858 sizeof(struct be_eth_wrb));
1859 if (status)
1860 return status;
6b7c5b94 1861
10ef9ab4
SP
1862 status = be_cmd_txq_create(adapter, &txo->q, &txo->cq);
1863 if (status)
1864 return status;
3c8def97 1865 }
6b7c5b94 1866
10ef9ab4 1867 return 0;
6b7c5b94
SP
1868}
1869
10ef9ab4 1870static void be_rx_cqs_destroy(struct be_adapter *adapter)
6b7c5b94
SP
1871{
1872 struct be_queue_info *q;
3abcdeda
SP
1873 struct be_rx_obj *rxo;
1874 int i;
1875
1876 for_all_rx_queues(adapter, rxo, i) {
3abcdeda
SP
1877 q = &rxo->cq;
1878 if (q->created)
1879 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1880 be_queue_free(adapter, q);
ac6a0c4a
SP
1881 }
1882}
1883
10ef9ab4 1884static int be_rx_cqs_create(struct be_adapter *adapter)
6b7c5b94 1885{
10ef9ab4 1886 struct be_queue_info *eq, *cq;
3abcdeda
SP
1887 struct be_rx_obj *rxo;
1888 int rc, i;
6b7c5b94 1889
10ef9ab4
SP
1890 /* We'll create as many RSS rings as there are irqs.
1891 * But when there's only one irq there's no use creating RSS rings
1892 */
1893 adapter->num_rx_qs = (num_irqs(adapter) > 1) ?
1894 num_irqs(adapter) + 1 : 1;
7f640062
SP
1895 if (adapter->num_rx_qs != MAX_RX_QS) {
1896 rtnl_lock();
1897 netif_set_real_num_rx_queues(adapter->netdev,
1898 adapter->num_rx_qs);
1899 rtnl_unlock();
1900 }
ac6a0c4a 1901
6b7c5b94 1902 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
3abcdeda
SP
1903 for_all_rx_queues(adapter, rxo, i) {
1904 rxo->adapter = adapter;
3abcdeda
SP
1905 cq = &rxo->cq;
1906 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
1907 sizeof(struct be_eth_rx_compl));
1908 if (rc)
10ef9ab4 1909 return rc;
3abcdeda 1910
10ef9ab4
SP
1911 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
1912 rc = be_cmd_cq_create(adapter, cq, eq, false, 3);
3abcdeda 1913 if (rc)
10ef9ab4 1914 return rc;
3abcdeda 1915 }
6b7c5b94 1916
10ef9ab4
SP
1917 if (adapter->num_rx_qs != MAX_RX_QS)
1918 dev_info(&adapter->pdev->dev,
1919 "Created only %d receive queues", adapter->num_rx_qs);
6b7c5b94 1920
10ef9ab4 1921 return 0;
b628bde2
SP
1922}
1923
6b7c5b94
SP
1924static irqreturn_t be_intx(int irq, void *dev)
1925{
1926 struct be_adapter *adapter = dev;
10ef9ab4 1927 int num_evts;
6b7c5b94 1928
10ef9ab4
SP
1929 /* With INTx only one EQ is used */
1930 num_evts = event_handle(&adapter->eq_obj[0]);
1931 if (num_evts)
1932 return IRQ_HANDLED;
1933 else
1934 return IRQ_NONE;
6b7c5b94
SP
1935}
1936
10ef9ab4 1937static irqreturn_t be_msix(int irq, void *dev)
6b7c5b94 1938{
10ef9ab4 1939 struct be_eq_obj *eqo = dev;
6b7c5b94 1940
10ef9ab4 1941 event_handle(eqo);
6b7c5b94
SP
1942 return IRQ_HANDLED;
1943}
1944
2e588f84 1945static inline bool do_gro(struct be_rx_compl_info *rxcp)
6b7c5b94 1946{
2e588f84 1947 return (rxcp->tcpf && !rxcp->err) ? true : false;
6b7c5b94
SP
1948}
1949
10ef9ab4
SP
1950static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
1951 int budget)
6b7c5b94 1952{
3abcdeda
SP
1953 struct be_adapter *adapter = rxo->adapter;
1954 struct be_queue_info *rx_cq = &rxo->cq;
2e588f84 1955 struct be_rx_compl_info *rxcp;
6b7c5b94
SP
1956 u32 work_done;
1957
1958 for (work_done = 0; work_done < budget; work_done++) {
3abcdeda 1959 rxcp = be_rx_compl_get(rxo);
6b7c5b94
SP
1960 if (!rxcp)
1961 break;
1962
12004ae9
SP
1963 /* Is it a flush compl that has no data */
1964 if (unlikely(rxcp->num_rcvd == 0))
1965 goto loop_continue;
1966
1967 /* Discard compl with partial DMA Lancer B0 */
1968 if (unlikely(!rxcp->pkt_size)) {
10ef9ab4 1969 be_rx_compl_discard(rxo, rxcp);
12004ae9
SP
1970 goto loop_continue;
1971 }
1972
1973 /* On BE drop pkts that arrive due to imperfect filtering in
1974 * promiscuous mode on some skews
1975 */
1976 if (unlikely(rxcp->port != adapter->port_num &&
1977 !lancer_chip(adapter))) {
10ef9ab4 1978 be_rx_compl_discard(rxo, rxcp);
12004ae9 1979 goto loop_continue;
64642811 1980 }
009dd872 1981
12004ae9 1982 if (do_gro(rxcp))
10ef9ab4 1983 be_rx_compl_process_gro(rxo, napi, rxcp);
12004ae9 1984 else
10ef9ab4 1985 be_rx_compl_process(rxo, rxcp);
12004ae9 1986loop_continue:
2e588f84 1987 be_rx_stats_update(rxo, rxcp);
6b7c5b94
SP
1988 }
1989
10ef9ab4
SP
1990 if (work_done) {
1991 be_cq_notify(adapter, rx_cq->id, true, work_done);
9372cacb 1992
10ef9ab4
SP
1993 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
1994 be_post_rx_frags(rxo, GFP_ATOMIC);
6b7c5b94 1995 }
10ef9ab4 1996
6b7c5b94
SP
1997 return work_done;
1998}
1999
10ef9ab4
SP
2000static bool be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
2001 int budget, int idx)
6b7c5b94 2002{
6b7c5b94 2003 struct be_eth_tx_compl *txcp;
10ef9ab4 2004 int num_wrbs = 0, work_done;
3c8def97 2005
10ef9ab4
SP
2006 for (work_done = 0; work_done < budget; work_done++) {
2007 txcp = be_tx_compl_get(&txo->cq);
2008 if (!txcp)
2009 break;
2010 num_wrbs += be_tx_compl_process(adapter, txo,
3c8def97
SP
2011 AMAP_GET_BITS(struct amap_eth_tx_compl,
2012 wrb_index, txcp));
10ef9ab4 2013 }
6b7c5b94 2014
10ef9ab4
SP
2015 if (work_done) {
2016 be_cq_notify(adapter, txo->cq.id, true, work_done);
2017 atomic_sub(num_wrbs, &txo->q.used);
3c8def97 2018
10ef9ab4
SP
2019 /* As Tx wrbs have been freed up, wake up netdev queue
2020 * if it was stopped due to lack of tx wrbs. */
2021 if (__netif_subqueue_stopped(adapter->netdev, idx) &&
2022 atomic_read(&txo->q.used) < txo->q.len / 2) {
2023 netif_wake_subqueue(adapter->netdev, idx);
3c8def97 2024 }
10ef9ab4
SP
2025
2026 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
2027 tx_stats(txo)->tx_compl += work_done;
2028 u64_stats_update_end(&tx_stats(txo)->sync_compl);
6b7c5b94 2029 }
10ef9ab4
SP
2030 return (work_done < budget); /* Done */
2031}
6b7c5b94 2032
10ef9ab4
SP
2033int be_poll(struct napi_struct *napi, int budget)
2034{
2035 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2036 struct be_adapter *adapter = eqo->adapter;
2037 int max_work = 0, work, i;
2038 bool tx_done;
f31e50a8 2039
10ef9ab4
SP
2040 /* Process all TXQs serviced by this EQ */
2041 for (i = eqo->idx; i < adapter->num_tx_qs; i += adapter->num_evt_qs) {
2042 tx_done = be_process_tx(adapter, &adapter->tx_obj[i],
2043 eqo->tx_budget, i);
2044 if (!tx_done)
2045 max_work = budget;
f31e50a8
SP
2046 }
2047
10ef9ab4
SP
2048 /* This loop will iterate twice for EQ0 in which
2049 * completions of the last RXQ (default one) are also processed
2050 * For other EQs the loop iterates only once
2051 */
2052 for (i = eqo->idx; i < adapter->num_rx_qs; i += adapter->num_evt_qs) {
2053 work = be_process_rx(&adapter->rx_obj[i], napi, budget);
2054 max_work = max(work, max_work);
2055 }
6b7c5b94 2056
10ef9ab4
SP
2057 if (is_mcc_eqo(eqo))
2058 be_process_mcc(adapter);
93c86700 2059
10ef9ab4
SP
2060 if (max_work < budget) {
2061 napi_complete(napi);
2062 be_eq_notify(adapter, eqo->q.id, true, false, 0);
2063 } else {
2064 /* As we'll continue in polling mode, count and clear events */
2065 be_eq_notify(adapter, eqo->q.id, false, false, events_get(eqo));
93c86700 2066 }
10ef9ab4 2067 return max_work;
6b7c5b94
SP
2068}
2069
d053de91 2070void be_detect_dump_ue(struct be_adapter *adapter)
7c185276 2071{
e1cfb67a
PR
2072 u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
2073 u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
7c185276
AK
2074 u32 i;
2075
72f02485
SP
2076 if (adapter->eeh_err || adapter->ue_detected)
2077 return;
2078
e1cfb67a
PR
2079 if (lancer_chip(adapter)) {
2080 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2081 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2082 sliport_err1 = ioread32(adapter->db +
2083 SLIPORT_ERROR1_OFFSET);
2084 sliport_err2 = ioread32(adapter->db +
2085 SLIPORT_ERROR2_OFFSET);
2086 }
2087 } else {
2088 pci_read_config_dword(adapter->pdev,
2089 PCICFG_UE_STATUS_LOW, &ue_lo);
2090 pci_read_config_dword(adapter->pdev,
2091 PCICFG_UE_STATUS_HIGH, &ue_hi);
2092 pci_read_config_dword(adapter->pdev,
2093 PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask);
2094 pci_read_config_dword(adapter->pdev,
2095 PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
2096
2097 ue_lo = (ue_lo & (~ue_lo_mask));
2098 ue_hi = (ue_hi & (~ue_hi_mask));
2099 }
7c185276 2100
e1cfb67a
PR
2101 if (ue_lo || ue_hi ||
2102 sliport_status & SLIPORT_STATUS_ERR_MASK) {
d053de91 2103 adapter->ue_detected = true;
7acc2087 2104 adapter->eeh_err = true;
434b3648
SP
2105 dev_err(&adapter->pdev->dev,
2106 "Unrecoverable error in the card\n");
d053de91
AK
2107 }
2108
e1cfb67a
PR
2109 if (ue_lo) {
2110 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
2111 if (ue_lo & 1)
7c185276
AK
2112 dev_err(&adapter->pdev->dev,
2113 "UE: %s bit set\n", ue_status_low_desc[i]);
2114 }
2115 }
e1cfb67a
PR
2116 if (ue_hi) {
2117 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
2118 if (ue_hi & 1)
7c185276
AK
2119 dev_err(&adapter->pdev->dev,
2120 "UE: %s bit set\n", ue_status_hi_desc[i]);
2121 }
2122 }
2123
e1cfb67a
PR
2124 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2125 dev_err(&adapter->pdev->dev,
2126 "sliport status 0x%x\n", sliport_status);
2127 dev_err(&adapter->pdev->dev,
2128 "sliport error1 0x%x\n", sliport_err1);
2129 dev_err(&adapter->pdev->dev,
2130 "sliport error2 0x%x\n", sliport_err2);
2131 }
7c185276
AK
2132}
2133
8d56ff11
SP
2134static void be_msix_disable(struct be_adapter *adapter)
2135{
ac6a0c4a 2136 if (msix_enabled(adapter)) {
8d56ff11 2137 pci_disable_msix(adapter->pdev);
ac6a0c4a 2138 adapter->num_msix_vec = 0;
3abcdeda
SP
2139 }
2140}
2141
10ef9ab4
SP
2142static uint be_num_rss_want(struct be_adapter *adapter)
2143{
2144 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
39f1d94d 2145 !sriov_want(adapter) && be_physfn(adapter) &&
10ef9ab4
SP
2146 !be_is_mc(adapter))
2147 return (adapter->be3_native) ? BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
2148 else
2149 return 0;
2150}
2151
6b7c5b94
SP
2152static void be_msix_enable(struct be_adapter *adapter)
2153{
10ef9ab4 2154#define BE_MIN_MSIX_VECTORS 1
045508a8 2155 int i, status, num_vec, num_roce_vec = 0;
6b7c5b94 2156
10ef9ab4
SP
2157 /* If RSS queues are not used, need a vec for default RX Q */
2158 num_vec = min(be_num_rss_want(adapter), num_online_cpus());
045508a8
PP
2159 if (be_roce_supported(adapter)) {
2160 num_roce_vec = min_t(u32, MAX_ROCE_MSIX_VECTORS,
2161 (num_online_cpus() + 1));
2162 num_roce_vec = min(num_roce_vec, MAX_ROCE_EQS);
2163 num_vec += num_roce_vec;
2164 num_vec = min(num_vec, MAX_MSIX_VECTORS);
2165 }
10ef9ab4 2166 num_vec = max(num_vec, BE_MIN_MSIX_VECTORS);
3abcdeda 2167
ac6a0c4a 2168 for (i = 0; i < num_vec; i++)
6b7c5b94
SP
2169 adapter->msix_entries[i].entry = i;
2170
ac6a0c4a 2171 status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
3abcdeda
SP
2172 if (status == 0) {
2173 goto done;
2174 } else if (status >= BE_MIN_MSIX_VECTORS) {
ac6a0c4a 2175 num_vec = status;
3abcdeda 2176 if (pci_enable_msix(adapter->pdev, adapter->msix_entries,
ac6a0c4a 2177 num_vec) == 0)
3abcdeda 2178 goto done;
3abcdeda
SP
2179 }
2180 return;
2181done:
045508a8
PP
2182 if (be_roce_supported(adapter)) {
2183 if (num_vec > num_roce_vec) {
2184 adapter->num_msix_vec = num_vec - num_roce_vec;
2185 adapter->num_msix_roce_vec =
2186 num_vec - adapter->num_msix_vec;
2187 } else {
2188 adapter->num_msix_vec = num_vec;
2189 adapter->num_msix_roce_vec = 0;
2190 }
2191 } else
2192 adapter->num_msix_vec = num_vec;
ac6a0c4a 2193 return;
6b7c5b94
SP
2194}
2195
fe6d2a38 2196static inline int be_msix_vec_get(struct be_adapter *adapter,
10ef9ab4 2197 struct be_eq_obj *eqo)
b628bde2 2198{
10ef9ab4 2199 return adapter->msix_entries[eqo->idx].vector;
b628bde2 2200}
6b7c5b94 2201
b628bde2
SP
2202static int be_msix_register(struct be_adapter *adapter)
2203{
10ef9ab4
SP
2204 struct net_device *netdev = adapter->netdev;
2205 struct be_eq_obj *eqo;
2206 int status, i, vec;
6b7c5b94 2207
10ef9ab4
SP
2208 for_all_evt_queues(adapter, eqo, i) {
2209 sprintf(eqo->desc, "%s-q%d", netdev->name, i);
2210 vec = be_msix_vec_get(adapter, eqo);
2211 status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
3abcdeda
SP
2212 if (status)
2213 goto err_msix;
2214 }
b628bde2 2215
6b7c5b94 2216 return 0;
3abcdeda 2217err_msix:
10ef9ab4
SP
2218 for (i--, eqo = &adapter->eq_obj[i]; i >= 0; i--, eqo--)
2219 free_irq(be_msix_vec_get(adapter, eqo), eqo);
2220 dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
2221 status);
ac6a0c4a 2222 be_msix_disable(adapter);
6b7c5b94
SP
2223 return status;
2224}
2225
2226static int be_irq_register(struct be_adapter *adapter)
2227{
2228 struct net_device *netdev = adapter->netdev;
2229 int status;
2230
ac6a0c4a 2231 if (msix_enabled(adapter)) {
6b7c5b94
SP
2232 status = be_msix_register(adapter);
2233 if (status == 0)
2234 goto done;
ba343c77
SB
2235 /* INTx is not supported for VF */
2236 if (!be_physfn(adapter))
2237 return status;
6b7c5b94
SP
2238 }
2239
2240 /* INTx */
2241 netdev->irq = adapter->pdev->irq;
2242 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
2243 adapter);
2244 if (status) {
2245 dev_err(&adapter->pdev->dev,
2246 "INTx request IRQ failed - err %d\n", status);
2247 return status;
2248 }
2249done:
2250 adapter->isr_registered = true;
2251 return 0;
2252}
2253
2254static void be_irq_unregister(struct be_adapter *adapter)
2255{
2256 struct net_device *netdev = adapter->netdev;
10ef9ab4 2257 struct be_eq_obj *eqo;
3abcdeda 2258 int i;
6b7c5b94
SP
2259
2260 if (!adapter->isr_registered)
2261 return;
2262
2263 /* INTx */
ac6a0c4a 2264 if (!msix_enabled(adapter)) {
6b7c5b94
SP
2265 free_irq(netdev->irq, adapter);
2266 goto done;
2267 }
2268
2269 /* MSIx */
10ef9ab4
SP
2270 for_all_evt_queues(adapter, eqo, i)
2271 free_irq(be_msix_vec_get(adapter, eqo), eqo);
3abcdeda 2272
6b7c5b94
SP
2273done:
2274 adapter->isr_registered = false;
6b7c5b94
SP
2275}
2276
10ef9ab4 2277static void be_rx_qs_destroy(struct be_adapter *adapter)
482c9e79
SP
2278{
2279 struct be_queue_info *q;
2280 struct be_rx_obj *rxo;
2281 int i;
2282
2283 for_all_rx_queues(adapter, rxo, i) {
2284 q = &rxo->q;
2285 if (q->created) {
2286 be_cmd_rxq_destroy(adapter, q);
2287 /* After the rxq is invalidated, wait for a grace time
2288 * of 1ms for all dma to end and the flush compl to
2289 * arrive
2290 */
2291 mdelay(1);
10ef9ab4 2292 be_rx_cq_clean(rxo);
482c9e79 2293 }
10ef9ab4 2294 be_queue_free(adapter, q);
482c9e79
SP
2295 }
2296}
2297
889cd4b2
SP
2298static int be_close(struct net_device *netdev)
2299{
2300 struct be_adapter *adapter = netdev_priv(netdev);
10ef9ab4
SP
2301 struct be_eq_obj *eqo;
2302 int i;
889cd4b2 2303
045508a8
PP
2304 be_roce_dev_close(adapter);
2305
889cd4b2
SP
2306 be_async_mcc_disable(adapter);
2307
fe6d2a38
SP
2308 if (!lancer_chip(adapter))
2309 be_intr_set(adapter, false);
889cd4b2 2310
10ef9ab4
SP
2311 for_all_evt_queues(adapter, eqo, i) {
2312 napi_disable(&eqo->napi);
2313 if (msix_enabled(adapter))
2314 synchronize_irq(be_msix_vec_get(adapter, eqo));
2315 else
2316 synchronize_irq(netdev->irq);
2317 be_eq_clean(eqo);
63fcb27f
PR
2318 }
2319
889cd4b2
SP
2320 be_irq_unregister(adapter);
2321
889cd4b2
SP
2322 /* Wait for all pending tx completions to arrive so that
2323 * all tx skbs are freed.
2324 */
0ae57bb3 2325 be_tx_compl_clean(adapter);
889cd4b2 2326
10ef9ab4 2327 be_rx_qs_destroy(adapter);
482c9e79
SP
2328 return 0;
2329}
2330
10ef9ab4 2331static int be_rx_qs_create(struct be_adapter *adapter)
482c9e79
SP
2332{
2333 struct be_rx_obj *rxo;
e9008ee9
PR
2334 int rc, i, j;
2335 u8 rsstable[128];
482c9e79
SP
2336
2337 for_all_rx_queues(adapter, rxo, i) {
10ef9ab4
SP
2338 rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
2339 sizeof(struct be_eth_rx_d));
2340 if (rc)
2341 return rc;
2342 }
2343
2344 /* The FW would like the default RXQ to be created first */
2345 rxo = default_rxo(adapter);
2346 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id, rx_frag_size,
2347 adapter->if_handle, false, &rxo->rss_id);
2348 if (rc)
2349 return rc;
2350
2351 for_all_rss_queues(adapter, rxo, i) {
482c9e79 2352 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
10ef9ab4
SP
2353 rx_frag_size, adapter->if_handle,
2354 true, &rxo->rss_id);
482c9e79
SP
2355 if (rc)
2356 return rc;
2357 }
2358
2359 if (be_multi_rxq(adapter)) {
e9008ee9
PR
2360 for (j = 0; j < 128; j += adapter->num_rx_qs - 1) {
2361 for_all_rss_queues(adapter, rxo, i) {
2362 if ((j + i) >= 128)
2363 break;
2364 rsstable[j + i] = rxo->rss_id;
2365 }
2366 }
2367 rc = be_cmd_rss_config(adapter, rsstable, 128);
482c9e79
SP
2368 if (rc)
2369 return rc;
2370 }
2371
2372 /* First time posting */
10ef9ab4 2373 for_all_rx_queues(adapter, rxo, i)
482c9e79 2374 be_post_rx_frags(rxo, GFP_KERNEL);
889cd4b2
SP
2375 return 0;
2376}
2377
6b7c5b94
SP
2378static int be_open(struct net_device *netdev)
2379{
2380 struct be_adapter *adapter = netdev_priv(netdev);
10ef9ab4 2381 struct be_eq_obj *eqo;
3abcdeda 2382 struct be_rx_obj *rxo;
10ef9ab4 2383 struct be_tx_obj *txo;
b236916a 2384 u8 link_status;
3abcdeda 2385 int status, i;
5fb379ee 2386
10ef9ab4 2387 status = be_rx_qs_create(adapter);
482c9e79
SP
2388 if (status)
2389 goto err;
2390
5fb379ee
SP
2391 be_irq_register(adapter);
2392
fe6d2a38
SP
2393 if (!lancer_chip(adapter))
2394 be_intr_set(adapter, true);
5fb379ee 2395
10ef9ab4 2396 for_all_rx_queues(adapter, rxo, i)
3abcdeda 2397 be_cq_notify(adapter, rxo->cq.id, true, 0);
5fb379ee 2398
10ef9ab4
SP
2399 for_all_tx_queues(adapter, txo, i)
2400 be_cq_notify(adapter, txo->cq.id, true, 0);
2401
7a1e9b20
SP
2402 be_async_mcc_enable(adapter);
2403
10ef9ab4
SP
2404 for_all_evt_queues(adapter, eqo, i) {
2405 napi_enable(&eqo->napi);
2406 be_eq_notify(adapter, eqo->q.id, true, false, 0);
2407 }
2408
b236916a
AK
2409 status = be_cmd_link_status_query(adapter, NULL, NULL,
2410 &link_status, 0);
2411 if (!status)
2412 be_link_status_update(adapter, link_status);
2413
045508a8 2414 be_roce_dev_open(adapter);
889cd4b2
SP
2415 return 0;
2416err:
2417 be_close(adapter->netdev);
2418 return -EIO;
5fb379ee
SP
2419}
2420
71d8d1b5
AK
2421static int be_setup_wol(struct be_adapter *adapter, bool enable)
2422{
2423 struct be_dma_mem cmd;
2424 int status = 0;
2425 u8 mac[ETH_ALEN];
2426
2427 memset(mac, 0, ETH_ALEN);
2428
2429 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
2b7bcebf
IV
2430 cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2431 GFP_KERNEL);
71d8d1b5
AK
2432 if (cmd.va == NULL)
2433 return -1;
2434 memset(cmd.va, 0, cmd.size);
2435
2436 if (enable) {
2437 status = pci_write_config_dword(adapter->pdev,
2438 PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2439 if (status) {
2440 dev_err(&adapter->pdev->dev,
2381a55c 2441 "Could not enable Wake-on-lan\n");
2b7bcebf
IV
2442 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2443 cmd.dma);
71d8d1b5
AK
2444 return status;
2445 }
2446 status = be_cmd_enable_magic_wol(adapter,
2447 adapter->netdev->dev_addr, &cmd);
2448 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2449 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2450 } else {
2451 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2452 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2453 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2454 }
2455
2b7bcebf 2456 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
71d8d1b5
AK
2457 return status;
2458}
2459
6d87f5c3
AK
2460/*
2461 * Generate a seed MAC address from the PF MAC Address using jhash.
2462 * MAC Address for VFs are assigned incrementally starting from the seed.
2463 * These addresses are programmed in the ASIC by the PF and the VF driver
2464 * queries for the MAC address during its probe.
2465 */
2466static inline int be_vf_eth_addr_config(struct be_adapter *adapter)
2467{
f9449ab7 2468 u32 vf;
3abcdeda 2469 int status = 0;
6d87f5c3 2470 u8 mac[ETH_ALEN];
11ac75ed 2471 struct be_vf_cfg *vf_cfg;
6d87f5c3
AK
2472
2473 be_vf_eth_addr_generate(adapter, mac);
2474
11ac75ed 2475 for_all_vfs(adapter, vf_cfg, vf) {
590c391d
PR
2476 if (lancer_chip(adapter)) {
2477 status = be_cmd_set_mac_list(adapter, mac, 1, vf + 1);
2478 } else {
2479 status = be_cmd_pmac_add(adapter, mac,
11ac75ed
SP
2480 vf_cfg->if_handle,
2481 &vf_cfg->pmac_id, vf + 1);
590c391d
PR
2482 }
2483
6d87f5c3
AK
2484 if (status)
2485 dev_err(&adapter->pdev->dev,
590c391d 2486 "Mac address assignment failed for VF %d\n", vf);
6d87f5c3 2487 else
11ac75ed 2488 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
6d87f5c3
AK
2489
2490 mac[5] += 1;
2491 }
2492 return status;
2493}
2494
f9449ab7 2495static void be_vf_clear(struct be_adapter *adapter)
6d87f5c3 2496{
11ac75ed 2497 struct be_vf_cfg *vf_cfg;
6d87f5c3
AK
2498 u32 vf;
2499
39f1d94d
SP
2500 if (be_find_vfs(adapter, ASSIGNED)) {
2501 dev_warn(&adapter->pdev->dev, "VFs are assigned to VMs\n");
2502 goto done;
2503 }
2504
11ac75ed 2505 for_all_vfs(adapter, vf_cfg, vf) {
590c391d
PR
2506 if (lancer_chip(adapter))
2507 be_cmd_set_mac_list(adapter, NULL, 0, vf + 1);
2508 else
11ac75ed
SP
2509 be_cmd_pmac_del(adapter, vf_cfg->if_handle,
2510 vf_cfg->pmac_id, vf + 1);
f9449ab7 2511
11ac75ed
SP
2512 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
2513 }
39f1d94d
SP
2514 pci_disable_sriov(adapter->pdev);
2515done:
2516 kfree(adapter->vf_cfg);
2517 adapter->num_vfs = 0;
6d87f5c3
AK
2518}
2519
a54769f5
SP
2520static int be_clear(struct be_adapter *adapter)
2521{
fbc13f01
AK
2522 int i = 1;
2523
191eb756
SP
2524 if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) {
2525 cancel_delayed_work_sync(&adapter->work);
2526 adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED;
2527 }
2528
11ac75ed 2529 if (sriov_enabled(adapter))
f9449ab7
SP
2530 be_vf_clear(adapter);
2531
fbc13f01
AK
2532 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++)
2533 be_cmd_pmac_del(adapter, adapter->if_handle,
2534 adapter->pmac_id[i], 0);
2535
f9449ab7 2536 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
a54769f5
SP
2537
2538 be_mcc_queues_destroy(adapter);
10ef9ab4 2539 be_rx_cqs_destroy(adapter);
a54769f5 2540 be_tx_queues_destroy(adapter);
10ef9ab4 2541 be_evt_queues_destroy(adapter);
a54769f5
SP
2542
2543 /* tell fw we're done with firing cmds */
2544 be_cmd_fw_clean(adapter);
10ef9ab4
SP
2545
2546 be_msix_disable(adapter);
a54769f5
SP
2547 return 0;
2548}
2549
39f1d94d 2550static int be_vf_setup_init(struct be_adapter *adapter)
30128031 2551{
11ac75ed 2552 struct be_vf_cfg *vf_cfg;
30128031
SP
2553 int vf;
2554
39f1d94d
SP
2555 adapter->vf_cfg = kcalloc(adapter->num_vfs, sizeof(*vf_cfg),
2556 GFP_KERNEL);
2557 if (!adapter->vf_cfg)
2558 return -ENOMEM;
2559
11ac75ed
SP
2560 for_all_vfs(adapter, vf_cfg, vf) {
2561 vf_cfg->if_handle = -1;
2562 vf_cfg->pmac_id = -1;
30128031 2563 }
39f1d94d 2564 return 0;
30128031
SP
2565}
2566
f9449ab7
SP
2567static int be_vf_setup(struct be_adapter *adapter)
2568{
11ac75ed 2569 struct be_vf_cfg *vf_cfg;
39f1d94d 2570 struct device *dev = &adapter->pdev->dev;
f9449ab7 2571 u32 cap_flags, en_flags, vf;
f1f3ee1b 2572 u16 def_vlan, lnk_speed;
39f1d94d
SP
2573 int status, enabled_vfs;
2574
2575 enabled_vfs = be_find_vfs(adapter, ENABLED);
2576 if (enabled_vfs) {
2577 dev_warn(dev, "%d VFs are already enabled\n", enabled_vfs);
2578 dev_warn(dev, "Ignoring num_vfs=%d setting\n", num_vfs);
2579 return 0;
2580 }
f9449ab7 2581
39f1d94d
SP
2582 if (num_vfs > adapter->dev_num_vfs) {
2583 dev_warn(dev, "Device supports %d VFs and not %d\n",
2584 adapter->dev_num_vfs, num_vfs);
2585 num_vfs = adapter->dev_num_vfs;
2586 }
2587
2588 status = pci_enable_sriov(adapter->pdev, num_vfs);
2589 if (!status) {
2590 adapter->num_vfs = num_vfs;
2591 } else {
2592 /* Platform doesn't support SRIOV though device supports it */
2593 dev_warn(dev, "SRIOV enable failed\n");
2594 return 0;
2595 }
2596
2597 status = be_vf_setup_init(adapter);
2598 if (status)
2599 goto err;
30128031 2600
590c391d
PR
2601 cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2602 BE_IF_FLAGS_MULTICAST;
11ac75ed 2603 for_all_vfs(adapter, vf_cfg, vf) {
f9449ab7 2604 status = be_cmd_if_create(adapter, cap_flags, en_flags, NULL,
11ac75ed 2605 &vf_cfg->if_handle, NULL, vf + 1);
f9449ab7
SP
2606 if (status)
2607 goto err;
f9449ab7
SP
2608 }
2609
39f1d94d
SP
2610 if (!enabled_vfs) {
2611 status = be_vf_eth_addr_config(adapter);
2612 if (status)
2613 goto err;
2614 }
f9449ab7 2615
11ac75ed 2616 for_all_vfs(adapter, vf_cfg, vf) {
f9449ab7 2617 status = be_cmd_link_status_query(adapter, NULL, &lnk_speed,
b236916a 2618 NULL, vf + 1);
f9449ab7
SP
2619 if (status)
2620 goto err;
11ac75ed 2621 vf_cfg->tx_rate = lnk_speed * 10;
f1f3ee1b
AK
2622
2623 status = be_cmd_get_hsw_config(adapter, &def_vlan,
2624 vf + 1, vf_cfg->if_handle);
2625 if (status)
2626 goto err;
2627 vf_cfg->def_vid = def_vlan;
f9449ab7
SP
2628 }
2629 return 0;
2630err:
2631 return status;
2632}
2633
30128031
SP
2634static void be_setup_init(struct be_adapter *adapter)
2635{
2636 adapter->vlan_prio_bmap = 0xff;
42f11cf2 2637 adapter->phy.link_speed = -1;
30128031
SP
2638 adapter->if_handle = -1;
2639 adapter->be3_native = false;
2640 adapter->promiscuous = false;
2641 adapter->eq_next_idx = 0;
42f11cf2 2642 adapter->phy.forced_port_speed = -1;
30128031
SP
2643}
2644
e5e1ee89 2645static int be_add_mac_from_list(struct be_adapter *adapter, u8 *mac)
590c391d
PR
2646{
2647 u32 pmac_id;
e5e1ee89
PR
2648 int status;
2649 bool pmac_id_active;
2650
2651 status = be_cmd_get_mac_from_list(adapter, 0, &pmac_id_active,
2652 &pmac_id, mac);
590c391d
PR
2653 if (status != 0)
2654 goto do_none;
e5e1ee89
PR
2655
2656 if (pmac_id_active) {
2657 status = be_cmd_mac_addr_query(adapter, mac,
2658 MAC_ADDRESS_TYPE_NETWORK,
2659 false, adapter->if_handle, pmac_id);
2660
2661 if (!status)
fbc13f01 2662 adapter->pmac_id[0] = pmac_id;
e5e1ee89
PR
2663 } else {
2664 status = be_cmd_pmac_add(adapter, mac,
fbc13f01 2665 adapter->if_handle, &adapter->pmac_id[0], 0);
e5e1ee89 2666 }
590c391d
PR
2667do_none:
2668 return status;
2669}
2670
39f1d94d
SP
2671/* Routine to query per function resource limits */
2672static int be_get_config(struct be_adapter *adapter)
2673{
2674 int pos;
2675 u16 dev_num_vfs;
2676
2677 pos = pci_find_ext_capability(adapter->pdev, PCI_EXT_CAP_ID_SRIOV);
2678 if (pos) {
2679 pci_read_config_word(adapter->pdev, pos + PCI_SRIOV_TOTAL_VF,
2680 &dev_num_vfs);
2681 adapter->dev_num_vfs = dev_num_vfs;
2682 }
2683 return 0;
2684}
2685
5fb379ee
SP
2686static int be_setup(struct be_adapter *adapter)
2687{
5fb379ee 2688 struct net_device *netdev = adapter->netdev;
39f1d94d 2689 struct device *dev = &adapter->pdev->dev;
f9449ab7 2690 u32 cap_flags, en_flags;
a54769f5 2691 u32 tx_fc, rx_fc;
10ef9ab4 2692 int status;
ba343c77
SB
2693 u8 mac[ETH_ALEN];
2694
30128031 2695 be_setup_init(adapter);
6b7c5b94 2696
39f1d94d
SP
2697 be_get_config(adapter);
2698
f9449ab7 2699 be_cmd_req_native_mode(adapter);
73d540f2 2700
10ef9ab4
SP
2701 be_msix_enable(adapter);
2702
2703 status = be_evt_queues_create(adapter);
2704 if (status)
a54769f5 2705 goto err;
6b7c5b94 2706
10ef9ab4
SP
2707 status = be_tx_cqs_create(adapter);
2708 if (status)
2709 goto err;
2710
2711 status = be_rx_cqs_create(adapter);
2712 if (status)
a54769f5 2713 goto err;
6b7c5b94 2714
f9449ab7 2715 status = be_mcc_queues_create(adapter);
10ef9ab4 2716 if (status)
a54769f5 2717 goto err;
6b7c5b94 2718
f9449ab7
SP
2719 memset(mac, 0, ETH_ALEN);
2720 status = be_cmd_mac_addr_query(adapter, mac, MAC_ADDRESS_TYPE_NETWORK,
590c391d 2721 true /*permanent */, 0, 0);
f9449ab7
SP
2722 if (status)
2723 return status;
2724 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2725 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2903dd65 2726
f9449ab7
SP
2727 en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2728 BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
2729 cap_flags = en_flags | BE_IF_FLAGS_MCAST_PROMISCUOUS |
5d5adb93
PR
2730 BE_IF_FLAGS_VLAN_PROMISCUOUS | BE_IF_FLAGS_PROMISCUOUS;
2731
f9449ab7
SP
2732 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS) {
2733 cap_flags |= BE_IF_FLAGS_RSS;
2734 en_flags |= BE_IF_FLAGS_RSS;
2735 }
2736 status = be_cmd_if_create(adapter, cap_flags, en_flags,
2737 netdev->dev_addr, &adapter->if_handle,
fbc13f01 2738 &adapter->pmac_id[0], 0);
5fb379ee 2739 if (status != 0)
a54769f5 2740 goto err;
6b7c5b94 2741
590c391d
PR
2742 /* The VF's permanent mac queried from card is incorrect.
2743 * For BEx: Query the mac configued by the PF using if_handle
2744 * For Lancer: Get and use mac_list to obtain mac address.
2745 */
2746 if (!be_physfn(adapter)) {
2747 if (lancer_chip(adapter))
e5e1ee89 2748 status = be_add_mac_from_list(adapter, mac);
590c391d
PR
2749 else
2750 status = be_cmd_mac_addr_query(adapter, mac,
2751 MAC_ADDRESS_TYPE_NETWORK, false,
2752 adapter->if_handle, 0);
f9449ab7
SP
2753 if (!status) {
2754 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2755 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2756 }
2757 }
0dffc83e 2758
10ef9ab4
SP
2759 status = be_tx_qs_create(adapter);
2760 if (status)
2761 goto err;
2762
04b71175 2763 be_cmd_get_fw_ver(adapter, adapter->fw_ver, NULL);
5a56eb10 2764
1d1e9a46 2765 if (adapter->vlans_added)
10329df8 2766 be_vid_config(adapter);
7ab8b0b4 2767
a54769f5 2768 be_set_rx_mode(adapter->netdev);
5fb379ee 2769
ddc3f5cb 2770 be_cmd_get_flow_control(adapter, &tx_fc, &rx_fc);
590c391d 2771
ddc3f5cb
AK
2772 if (rx_fc != adapter->rx_fc || tx_fc != adapter->tx_fc)
2773 be_cmd_set_flow_control(adapter, adapter->tx_fc,
a54769f5 2774 adapter->rx_fc);
2dc1deb6 2775
39f1d94d
SP
2776 if (be_physfn(adapter) && num_vfs) {
2777 if (adapter->dev_num_vfs)
2778 be_vf_setup(adapter);
2779 else
2780 dev_warn(dev, "device doesn't support SRIOV\n");
f9449ab7
SP
2781 }
2782
42f11cf2
AK
2783 be_cmd_get_phy_info(adapter);
2784 if (be_pause_supported(adapter))
2785 adapter->phy.fc_autoneg = 1;
2786
191eb756
SP
2787 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
2788 adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
f9449ab7 2789 return 0;
a54769f5
SP
2790err:
2791 be_clear(adapter);
2792 return status;
2793}
6b7c5b94 2794
66268739
IV
2795#ifdef CONFIG_NET_POLL_CONTROLLER
2796static void be_netpoll(struct net_device *netdev)
2797{
2798 struct be_adapter *adapter = netdev_priv(netdev);
10ef9ab4 2799 struct be_eq_obj *eqo;
66268739
IV
2800 int i;
2801
10ef9ab4
SP
2802 for_all_evt_queues(adapter, eqo, i)
2803 event_handle(eqo);
2804
2805 return;
66268739
IV
2806}
2807#endif
2808
84517482 2809#define FW_FILE_HDR_SIGN "ServerEngines Corp. "
c165541e
PR
2810char flash_cookie[2][16] = {"*** SE FLAS", "H DIRECTORY *** "};
2811
fa9a6fed 2812static bool be_flash_redboot(struct be_adapter *adapter,
3f0d4560
AK
2813 const u8 *p, u32 img_start, int image_size,
2814 int hdr_size)
fa9a6fed
SB
2815{
2816 u32 crc_offset;
2817 u8 flashed_crc[4];
2818 int status;
3f0d4560
AK
2819
2820 crc_offset = hdr_size + img_start + image_size - 4;
2821
fa9a6fed 2822 p += crc_offset;
3f0d4560
AK
2823
2824 status = be_cmd_get_flash_crc(adapter, flashed_crc,
f510fc64 2825 (image_size - 4));
fa9a6fed
SB
2826 if (status) {
2827 dev_err(&adapter->pdev->dev,
2828 "could not get crc from flash, not flashing redboot\n");
2829 return false;
2830 }
2831
2832 /*update redboot only if crc does not match*/
2833 if (!memcmp(flashed_crc, p, 4))
2834 return false;
2835 else
2836 return true;
fa9a6fed
SB
2837}
2838
306f1348
SP
2839static bool phy_flashing_required(struct be_adapter *adapter)
2840{
42f11cf2
AK
2841 return (adapter->phy.phy_type == TN_8022 &&
2842 adapter->phy.interface_type == PHY_TYPE_BASET_10GB);
306f1348
SP
2843}
2844
c165541e
PR
2845static bool is_comp_in_ufi(struct be_adapter *adapter,
2846 struct flash_section_info *fsec, int type)
2847{
2848 int i = 0, img_type = 0;
2849 struct flash_section_info_g2 *fsec_g2 = NULL;
2850
2851 if (adapter->generation != BE_GEN3)
2852 fsec_g2 = (struct flash_section_info_g2 *)fsec;
2853
2854 for (i = 0; i < MAX_FLASH_COMP; i++) {
2855 if (fsec_g2)
2856 img_type = le32_to_cpu(fsec_g2->fsec_entry[i].type);
2857 else
2858 img_type = le32_to_cpu(fsec->fsec_entry[i].type);
2859
2860 if (img_type == type)
2861 return true;
2862 }
2863 return false;
2864
2865}
2866
2867struct flash_section_info *get_fsec_info(struct be_adapter *adapter,
2868 int header_size,
2869 const struct firmware *fw)
2870{
2871 struct flash_section_info *fsec = NULL;
2872 const u8 *p = fw->data;
2873
2874 p += header_size;
2875 while (p < (fw->data + fw->size)) {
2876 fsec = (struct flash_section_info *)p;
2877 if (!memcmp(flash_cookie, fsec->cookie, sizeof(flash_cookie)))
2878 return fsec;
2879 p += 32;
2880 }
2881 return NULL;
2882}
2883
3f0d4560 2884static int be_flash_data(struct be_adapter *adapter,
c165541e
PR
2885 const struct firmware *fw,
2886 struct be_dma_mem *flash_cmd,
2887 int num_of_images)
3f0d4560 2888
84517482 2889{
3f0d4560 2890 int status = 0, i, filehdr_size = 0;
c165541e 2891 int img_hdrs_size = (num_of_images * sizeof(struct image_hdr));
3f0d4560 2892 u32 total_bytes = 0, flash_op;
84517482
AK
2893 int num_bytes;
2894 const u8 *p = fw->data;
2895 struct be_cmd_write_flashrom *req = flash_cmd->va;
215faf9c 2896 const struct flash_comp *pflashcomp;
c165541e
PR
2897 int num_comp, hdr_size;
2898 struct flash_section_info *fsec = NULL;
2899
2900 struct flash_comp gen3_flash_types[] = {
2901 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, OPTYPE_ISCSI_ACTIVE,
2902 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_iSCSI},
2903 { FLASH_REDBOOT_START_g3, OPTYPE_REDBOOT,
2904 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3, IMAGE_BOOT_CODE},
2905 { FLASH_iSCSI_BIOS_START_g3, OPTYPE_BIOS,
2906 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_ISCSI},
2907 { FLASH_PXE_BIOS_START_g3, OPTYPE_PXE_BIOS,
2908 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_PXE},
2909 { FLASH_FCoE_BIOS_START_g3, OPTYPE_FCOE_BIOS,
2910 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_FCoE},
2911 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, OPTYPE_ISCSI_BACKUP,
2912 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_iSCSI},
2913 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, OPTYPE_FCOE_FW_ACTIVE,
2914 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_FCoE},
2915 { FLASH_FCoE_BACKUP_IMAGE_START_g3, OPTYPE_FCOE_FW_BACKUP,
2916 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_FCoE},
2917 { FLASH_NCSI_START_g3, OPTYPE_NCSI_FW,
2918 FLASH_NCSI_IMAGE_MAX_SIZE_g3, IMAGE_NCSI},
2919 { FLASH_PHY_FW_START_g3, OPTYPE_PHY_FW,
2920 FLASH_PHY_FW_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_PHY}
3f0d4560 2921 };
c165541e
PR
2922
2923 struct flash_comp gen2_flash_types[] = {
2924 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, OPTYPE_ISCSI_ACTIVE,
2925 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_iSCSI},
2926 { FLASH_REDBOOT_START_g2, OPTYPE_REDBOOT,
2927 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2, IMAGE_BOOT_CODE},
2928 { FLASH_iSCSI_BIOS_START_g2, OPTYPE_BIOS,
2929 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_ISCSI},
2930 { FLASH_PXE_BIOS_START_g2, OPTYPE_PXE_BIOS,
2931 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_PXE},
2932 { FLASH_FCoE_BIOS_START_g2, OPTYPE_FCOE_BIOS,
2933 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_FCoE},
2934 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, OPTYPE_ISCSI_BACKUP,
2935 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_iSCSI},
2936 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, OPTYPE_FCOE_FW_ACTIVE,
2937 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_FCoE},
2938 { FLASH_FCoE_BACKUP_IMAGE_START_g2, OPTYPE_FCOE_FW_BACKUP,
2939 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_FCoE}
3f0d4560
AK
2940 };
2941
2942 if (adapter->generation == BE_GEN3) {
2943 pflashcomp = gen3_flash_types;
2944 filehdr_size = sizeof(struct flash_file_hdr_g3);
215faf9c 2945 num_comp = ARRAY_SIZE(gen3_flash_types);
3f0d4560
AK
2946 } else {
2947 pflashcomp = gen2_flash_types;
2948 filehdr_size = sizeof(struct flash_file_hdr_g2);
215faf9c 2949 num_comp = ARRAY_SIZE(gen2_flash_types);
84517482 2950 }
c165541e
PR
2951 /* Get flash section info*/
2952 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
2953 if (!fsec) {
2954 dev_err(&adapter->pdev->dev,
2955 "Invalid Cookie. UFI corrupted ?\n");
2956 return -1;
2957 }
9fe96934 2958 for (i = 0; i < num_comp; i++) {
c165541e 2959 if (!is_comp_in_ufi(adapter, fsec, pflashcomp[i].img_type))
9fe96934 2960 continue;
c165541e
PR
2961
2962 if ((pflashcomp[i].optype == OPTYPE_NCSI_FW) &&
2963 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
2964 continue;
2965
2966 if (pflashcomp[i].optype == OPTYPE_PHY_FW) {
306f1348
SP
2967 if (!phy_flashing_required(adapter))
2968 continue;
2969 }
c165541e
PR
2970
2971 hdr_size = filehdr_size +
2972 (num_of_images * sizeof(struct image_hdr));
2973
2974 if ((pflashcomp[i].optype == OPTYPE_REDBOOT) &&
2975 (!be_flash_redboot(adapter, fw->data, pflashcomp[i].offset,
2976 pflashcomp[i].size, hdr_size)))
3f0d4560 2977 continue;
c165541e
PR
2978
2979 /* Flash the component */
3f0d4560 2980 p = fw->data;
c165541e 2981 p += filehdr_size + pflashcomp[i].offset + img_hdrs_size;
306f1348
SP
2982 if (p + pflashcomp[i].size > fw->data + fw->size)
2983 return -1;
2984 total_bytes = pflashcomp[i].size;
3f0d4560
AK
2985 while (total_bytes) {
2986 if (total_bytes > 32*1024)
2987 num_bytes = 32*1024;
2988 else
2989 num_bytes = total_bytes;
2990 total_bytes -= num_bytes;
306f1348 2991 if (!total_bytes) {
c165541e 2992 if (pflashcomp[i].optype == OPTYPE_PHY_FW)
306f1348
SP
2993 flash_op = FLASHROM_OPER_PHY_FLASH;
2994 else
2995 flash_op = FLASHROM_OPER_FLASH;
2996 } else {
c165541e 2997 if (pflashcomp[i].optype == OPTYPE_PHY_FW)
306f1348
SP
2998 flash_op = FLASHROM_OPER_PHY_SAVE;
2999 else
3000 flash_op = FLASHROM_OPER_SAVE;
3001 }
3f0d4560
AK
3002 memcpy(req->params.data_buf, p, num_bytes);
3003 p += num_bytes;
3004 status = be_cmd_write_flashrom(adapter, flash_cmd,
3005 pflashcomp[i].optype, flash_op, num_bytes);
3006 if (status) {
306f1348
SP
3007 if ((status == ILLEGAL_IOCTL_REQ) &&
3008 (pflashcomp[i].optype ==
c165541e 3009 OPTYPE_PHY_FW))
306f1348 3010 break;
3f0d4560
AK
3011 dev_err(&adapter->pdev->dev,
3012 "cmd to write to flash rom failed.\n");
3013 return -1;
3014 }
84517482 3015 }
84517482 3016 }
84517482
AK
3017 return 0;
3018}
3019
3f0d4560
AK
3020static int get_ufigen_type(struct flash_file_hdr_g2 *fhdr)
3021{
3022 if (fhdr == NULL)
3023 return 0;
3024 if (fhdr->build[0] == '3')
3025 return BE_GEN3;
3026 else if (fhdr->build[0] == '2')
3027 return BE_GEN2;
3028 else
3029 return 0;
3030}
3031
485bf569
SN
3032static int lancer_fw_download(struct be_adapter *adapter,
3033 const struct firmware *fw)
84517482 3034{
485bf569
SN
3035#define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024)
3036#define LANCER_FW_DOWNLOAD_LOCATION "/prg"
84517482 3037 struct be_dma_mem flash_cmd;
485bf569
SN
3038 const u8 *data_ptr = NULL;
3039 u8 *dest_image_ptr = NULL;
3040 size_t image_size = 0;
3041 u32 chunk_size = 0;
3042 u32 data_written = 0;
3043 u32 offset = 0;
3044 int status = 0;
3045 u8 add_status = 0;
84517482 3046
485bf569 3047 if (!IS_ALIGNED(fw->size, sizeof(u32))) {
d9efd2af 3048 dev_err(&adapter->pdev->dev,
485bf569
SN
3049 "FW Image not properly aligned. "
3050 "Length must be 4 byte aligned.\n");
3051 status = -EINVAL;
3052 goto lancer_fw_exit;
d9efd2af
SB
3053 }
3054
485bf569
SN
3055 flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
3056 + LANCER_FW_DOWNLOAD_CHUNK;
3057 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
3058 &flash_cmd.dma, GFP_KERNEL);
3059 if (!flash_cmd.va) {
3060 status = -ENOMEM;
3061 dev_err(&adapter->pdev->dev,
3062 "Memory allocation failure while flashing\n");
3063 goto lancer_fw_exit;
3064 }
84517482 3065
485bf569
SN
3066 dest_image_ptr = flash_cmd.va +
3067 sizeof(struct lancer_cmd_req_write_object);
3068 image_size = fw->size;
3069 data_ptr = fw->data;
3070
3071 while (image_size) {
3072 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
3073
3074 /* Copy the image chunk content. */
3075 memcpy(dest_image_ptr, data_ptr, chunk_size);
3076
3077 status = lancer_cmd_write_object(adapter, &flash_cmd,
3078 chunk_size, offset, LANCER_FW_DOWNLOAD_LOCATION,
3079 &data_written, &add_status);
3080
3081 if (status)
3082 break;
3083
3084 offset += data_written;
3085 data_ptr += data_written;
3086 image_size -= data_written;
3087 }
3088
3089 if (!status) {
3090 /* Commit the FW written */
3091 status = lancer_cmd_write_object(adapter, &flash_cmd,
3092 0, offset, LANCER_FW_DOWNLOAD_LOCATION,
3093 &data_written, &add_status);
3094 }
3095
3096 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3097 flash_cmd.dma);
3098 if (status) {
3099 dev_err(&adapter->pdev->dev,
3100 "Firmware load error. "
3101 "Status code: 0x%x Additional Status: 0x%x\n",
3102 status, add_status);
3103 goto lancer_fw_exit;
3104 }
3105
3106 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
3107lancer_fw_exit:
3108 return status;
3109}
3110
3111static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
3112{
3113 struct flash_file_hdr_g2 *fhdr;
3114 struct flash_file_hdr_g3 *fhdr3;
3115 struct image_hdr *img_hdr_ptr = NULL;
3116 struct be_dma_mem flash_cmd;
3117 const u8 *p;
3118 int status = 0, i = 0, num_imgs = 0;
84517482
AK
3119
3120 p = fw->data;
3f0d4560 3121 fhdr = (struct flash_file_hdr_g2 *) p;
84517482 3122
84517482 3123 flash_cmd.size = sizeof(struct be_cmd_write_flashrom) + 32*1024;
2b7bcebf
IV
3124 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
3125 &flash_cmd.dma, GFP_KERNEL);
84517482
AK
3126 if (!flash_cmd.va) {
3127 status = -ENOMEM;
3128 dev_err(&adapter->pdev->dev,
3129 "Memory allocation failure while flashing\n");
485bf569 3130 goto be_fw_exit;
84517482
AK
3131 }
3132
3f0d4560
AK
3133 if ((adapter->generation == BE_GEN3) &&
3134 (get_ufigen_type(fhdr) == BE_GEN3)) {
3135 fhdr3 = (struct flash_file_hdr_g3 *) fw->data;
8b93b710
AK
3136 num_imgs = le32_to_cpu(fhdr3->num_imgs);
3137 for (i = 0; i < num_imgs; i++) {
3f0d4560
AK
3138 img_hdr_ptr = (struct image_hdr *) (fw->data +
3139 (sizeof(struct flash_file_hdr_g3) +
8b93b710
AK
3140 i * sizeof(struct image_hdr)));
3141 if (le32_to_cpu(img_hdr_ptr->imageid) == 1)
3142 status = be_flash_data(adapter, fw, &flash_cmd,
3143 num_imgs);
3f0d4560
AK
3144 }
3145 } else if ((adapter->generation == BE_GEN2) &&
3146 (get_ufigen_type(fhdr) == BE_GEN2)) {
3147 status = be_flash_data(adapter, fw, &flash_cmd, 0);
3148 } else {
3149 dev_err(&adapter->pdev->dev,
3150 "UFI and Interface are not compatible for flashing\n");
3151 status = -1;
84517482
AK
3152 }
3153
2b7bcebf
IV
3154 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3155 flash_cmd.dma);
84517482
AK
3156 if (status) {
3157 dev_err(&adapter->pdev->dev, "Firmware load error\n");
485bf569 3158 goto be_fw_exit;
84517482
AK
3159 }
3160
af901ca1 3161 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
84517482 3162
485bf569
SN
3163be_fw_exit:
3164 return status;
3165}
3166
3167int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
3168{
3169 const struct firmware *fw;
3170 int status;
3171
3172 if (!netif_running(adapter->netdev)) {
3173 dev_err(&adapter->pdev->dev,
3174 "Firmware load not allowed (interface is down)\n");
3175 return -1;
3176 }
3177
3178 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
3179 if (status)
3180 goto fw_exit;
3181
3182 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
3183
3184 if (lancer_chip(adapter))
3185 status = lancer_fw_download(adapter, fw);
3186 else
3187 status = be_fw_download(adapter, fw);
3188
84517482
AK
3189fw_exit:
3190 release_firmware(fw);
3191 return status;
3192}
3193
e5686ad8 3194static const struct net_device_ops be_netdev_ops = {
6b7c5b94
SP
3195 .ndo_open = be_open,
3196 .ndo_stop = be_close,
3197 .ndo_start_xmit = be_xmit,
a54769f5 3198 .ndo_set_rx_mode = be_set_rx_mode,
6b7c5b94
SP
3199 .ndo_set_mac_address = be_mac_addr_set,
3200 .ndo_change_mtu = be_change_mtu,
ab1594e9 3201 .ndo_get_stats64 = be_get_stats64,
6b7c5b94 3202 .ndo_validate_addr = eth_validate_addr,
6b7c5b94
SP
3203 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
3204 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
64600ea5 3205 .ndo_set_vf_mac = be_set_vf_mac,
1da87b7f 3206 .ndo_set_vf_vlan = be_set_vf_vlan,
e1d18735 3207 .ndo_set_vf_tx_rate = be_set_vf_tx_rate,
66268739
IV
3208 .ndo_get_vf_config = be_get_vf_config,
3209#ifdef CONFIG_NET_POLL_CONTROLLER
3210 .ndo_poll_controller = be_netpoll,
3211#endif
6b7c5b94
SP
3212};
3213
3214static void be_netdev_init(struct net_device *netdev)
3215{
3216 struct be_adapter *adapter = netdev_priv(netdev);
10ef9ab4 3217 struct be_eq_obj *eqo;
3abcdeda 3218 int i;
6b7c5b94 3219
6332c8d3 3220 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
8b8ddc68
MM
3221 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
3222 NETIF_F_HW_VLAN_TX;
3223 if (be_multi_rxq(adapter))
3224 netdev->hw_features |= NETIF_F_RXHASH;
6332c8d3
MM
3225
3226 netdev->features |= netdev->hw_features |
8b8ddc68 3227 NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
4b972914 3228
eb8a50d9 3229 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
79032644 3230 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
51c59870 3231
fbc13f01
AK
3232 netdev->priv_flags |= IFF_UNICAST_FLT;
3233
6b7c5b94
SP
3234 netdev->flags |= IFF_MULTICAST;
3235
c190e3c8
AK
3236 netif_set_gso_max_size(netdev, 65535);
3237
10ef9ab4 3238 netdev->netdev_ops = &be_netdev_ops;
6b7c5b94
SP
3239
3240 SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
3241
10ef9ab4
SP
3242 for_all_evt_queues(adapter, eqo, i)
3243 netif_napi_add(netdev, &eqo->napi, be_poll, BE_NAPI_WEIGHT);
6b7c5b94
SP
3244}
3245
3246static void be_unmap_pci_bars(struct be_adapter *adapter)
3247{
8788fdc2
SP
3248 if (adapter->csr)
3249 iounmap(adapter->csr);
3250 if (adapter->db)
3251 iounmap(adapter->db);
045508a8
PP
3252 if (adapter->roce_db.base)
3253 pci_iounmap(adapter->pdev, adapter->roce_db.base);
3254}
3255
3256static int lancer_roce_map_pci_bars(struct be_adapter *adapter)
3257{
3258 struct pci_dev *pdev = adapter->pdev;
3259 u8 __iomem *addr;
3260
3261 addr = pci_iomap(pdev, 2, 0);
3262 if (addr == NULL)
3263 return -ENOMEM;
3264
3265 adapter->roce_db.base = addr;
3266 adapter->roce_db.io_addr = pci_resource_start(pdev, 2);
3267 adapter->roce_db.size = 8192;
3268 adapter->roce_db.total_size = pci_resource_len(pdev, 2);
3269 return 0;
6b7c5b94
SP
3270}
3271
3272static int be_map_pci_bars(struct be_adapter *adapter)
3273{
3274 u8 __iomem *addr;
db3ea781 3275 int db_reg;
6b7c5b94 3276
fe6d2a38 3277 if (lancer_chip(adapter)) {
045508a8
PP
3278 if (be_type_2_3(adapter)) {
3279 addr = ioremap_nocache(
3280 pci_resource_start(adapter->pdev, 0),
3281 pci_resource_len(adapter->pdev, 0));
3282 if (addr == NULL)
3283 return -ENOMEM;
3284 adapter->db = addr;
3285 }
3286 if (adapter->if_type == SLI_INTF_TYPE_3) {
3287 if (lancer_roce_map_pci_bars(adapter))
3288 goto pci_map_err;
3289 }
fe6d2a38
SP
3290 return 0;
3291 }
3292
ba343c77
SB
3293 if (be_physfn(adapter)) {
3294 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 2),
3295 pci_resource_len(adapter->pdev, 2));
3296 if (addr == NULL)
3297 return -ENOMEM;
3298 adapter->csr = addr;
3299 }
6b7c5b94 3300
ba343c77 3301 if (adapter->generation == BE_GEN2) {
ba343c77
SB
3302 db_reg = 4;
3303 } else {
ba343c77
SB
3304 if (be_physfn(adapter))
3305 db_reg = 4;
3306 else
3307 db_reg = 0;
3308 }
3309 addr = ioremap_nocache(pci_resource_start(adapter->pdev, db_reg),
3310 pci_resource_len(adapter->pdev, db_reg));
6b7c5b94
SP
3311 if (addr == NULL)
3312 goto pci_map_err;
ba343c77 3313 adapter->db = addr;
045508a8
PP
3314 if (adapter->sli_family == SKYHAWK_SLI_FAMILY) {
3315 adapter->roce_db.size = 4096;
3316 adapter->roce_db.io_addr =
3317 pci_resource_start(adapter->pdev, db_reg);
3318 adapter->roce_db.total_size =
3319 pci_resource_len(adapter->pdev, db_reg);
3320 }
6b7c5b94
SP
3321 return 0;
3322pci_map_err:
3323 be_unmap_pci_bars(adapter);
3324 return -ENOMEM;
3325}
3326
6b7c5b94
SP
3327static void be_ctrl_cleanup(struct be_adapter *adapter)
3328{
8788fdc2 3329 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
6b7c5b94
SP
3330
3331 be_unmap_pci_bars(adapter);
3332
3333 if (mem->va)
2b7bcebf
IV
3334 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3335 mem->dma);
e7b909a6 3336
5b8821b7 3337 mem = &adapter->rx_filter;
e7b909a6 3338 if (mem->va)
2b7bcebf
IV
3339 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3340 mem->dma);
6b7c5b94
SP
3341}
3342
6b7c5b94
SP
3343static int be_ctrl_init(struct be_adapter *adapter)
3344{
8788fdc2
SP
3345 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
3346 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
5b8821b7 3347 struct be_dma_mem *rx_filter = &adapter->rx_filter;
6b7c5b94 3348 int status;
6b7c5b94
SP
3349
3350 status = be_map_pci_bars(adapter);
3351 if (status)
e7b909a6 3352 goto done;
6b7c5b94
SP
3353
3354 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
2b7bcebf
IV
3355 mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
3356 mbox_mem_alloc->size,
3357 &mbox_mem_alloc->dma,
3358 GFP_KERNEL);
6b7c5b94 3359 if (!mbox_mem_alloc->va) {
e7b909a6
SP
3360 status = -ENOMEM;
3361 goto unmap_pci_bars;
6b7c5b94
SP
3362 }
3363 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
3364 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
3365 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
3366 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
e7b909a6 3367
5b8821b7
SP
3368 rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
3369 rx_filter->va = dma_alloc_coherent(&adapter->pdev->dev, rx_filter->size,
3370 &rx_filter->dma, GFP_KERNEL);
3371 if (rx_filter->va == NULL) {
e7b909a6
SP
3372 status = -ENOMEM;
3373 goto free_mbox;
3374 }
5b8821b7 3375 memset(rx_filter->va, 0, rx_filter->size);
e7b909a6 3376
2984961c 3377 mutex_init(&adapter->mbox_lock);
8788fdc2
SP
3378 spin_lock_init(&adapter->mcc_lock);
3379 spin_lock_init(&adapter->mcc_cq_lock);
a8f447bd 3380
dd131e76 3381 init_completion(&adapter->flash_compl);
cf588477 3382 pci_save_state(adapter->pdev);
6b7c5b94 3383 return 0;
e7b909a6
SP
3384
3385free_mbox:
2b7bcebf
IV
3386 dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
3387 mbox_mem_alloc->va, mbox_mem_alloc->dma);
e7b909a6
SP
3388
3389unmap_pci_bars:
3390 be_unmap_pci_bars(adapter);
3391
3392done:
3393 return status;
6b7c5b94
SP
3394}
3395
3396static void be_stats_cleanup(struct be_adapter *adapter)
3397{
3abcdeda 3398 struct be_dma_mem *cmd = &adapter->stats_cmd;
6b7c5b94
SP
3399
3400 if (cmd->va)
2b7bcebf
IV
3401 dma_free_coherent(&adapter->pdev->dev, cmd->size,
3402 cmd->va, cmd->dma);
6b7c5b94
SP
3403}
3404
3405static int be_stats_init(struct be_adapter *adapter)
3406{
3abcdeda 3407 struct be_dma_mem *cmd = &adapter->stats_cmd;
6b7c5b94 3408
005d5696 3409 if (adapter->generation == BE_GEN2) {
89a88ab8 3410 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
005d5696
SX
3411 } else {
3412 if (lancer_chip(adapter))
3413 cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
3414 else
3415 cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
3416 }
2b7bcebf
IV
3417 cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
3418 GFP_KERNEL);
6b7c5b94
SP
3419 if (cmd->va == NULL)
3420 return -1;
d291b9af 3421 memset(cmd->va, 0, cmd->size);
6b7c5b94
SP
3422 return 0;
3423}
3424
3425static void __devexit be_remove(struct pci_dev *pdev)
3426{
3427 struct be_adapter *adapter = pci_get_drvdata(pdev);
8d56ff11 3428
6b7c5b94
SP
3429 if (!adapter)
3430 return;
3431
045508a8
PP
3432 be_roce_dev_remove(adapter);
3433
6b7c5b94
SP
3434 unregister_netdev(adapter->netdev);
3435
5fb379ee
SP
3436 be_clear(adapter);
3437
6b7c5b94
SP
3438 be_stats_cleanup(adapter);
3439
3440 be_ctrl_cleanup(adapter);
3441
6b7c5b94
SP
3442 pci_set_drvdata(pdev, NULL);
3443 pci_release_regions(pdev);
3444 pci_disable_device(pdev);
3445
3446 free_netdev(adapter->netdev);
3447}
3448
4762f6ce
AK
3449bool be_is_wol_supported(struct be_adapter *adapter)
3450{
3451 return ((adapter->wol_cap & BE_WOL_CAP) &&
3452 !be_is_wol_excluded(adapter)) ? true : false;
3453}
3454
941a77d5
SK
3455u32 be_get_fw_log_level(struct be_adapter *adapter)
3456{
3457 struct be_dma_mem extfat_cmd;
3458 struct be_fat_conf_params *cfgs;
3459 int status;
3460 u32 level = 0;
3461 int j;
3462
3463 memset(&extfat_cmd, 0, sizeof(struct be_dma_mem));
3464 extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps);
3465 extfat_cmd.va = pci_alloc_consistent(adapter->pdev, extfat_cmd.size,
3466 &extfat_cmd.dma);
3467
3468 if (!extfat_cmd.va) {
3469 dev_err(&adapter->pdev->dev, "%s: Memory allocation failure\n",
3470 __func__);
3471 goto err;
3472 }
3473
3474 status = be_cmd_get_ext_fat_capabilites(adapter, &extfat_cmd);
3475 if (!status) {
3476 cfgs = (struct be_fat_conf_params *)(extfat_cmd.va +
3477 sizeof(struct be_cmd_resp_hdr));
3478 for (j = 0; j < cfgs->module[0].num_modes; j++) {
3479 if (cfgs->module[0].trace_lvl[j].mode == MODE_UART)
3480 level = cfgs->module[0].trace_lvl[j].dbg_lvl;
3481 }
3482 }
3483 pci_free_consistent(adapter->pdev, extfat_cmd.size, extfat_cmd.va,
3484 extfat_cmd.dma);
3485err:
3486 return level;
3487}
39f1d94d 3488static int be_get_initial_config(struct be_adapter *adapter)
6b7c5b94 3489{
6b7c5b94 3490 int status;
941a77d5 3491 u32 level;
6b7c5b94 3492
3abcdeda
SP
3493 status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
3494 &adapter->function_mode, &adapter->function_caps);
43a04fdc
SP
3495 if (status)
3496 return status;
3497
752961a1 3498 if (adapter->function_mode & FLEX10_MODE)
456d9c96 3499 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
82903e4b
AK
3500 else
3501 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
3502
fbc13f01
AK
3503 if (be_physfn(adapter))
3504 adapter->max_pmac_cnt = BE_UC_PMAC_COUNT;
3505 else
3506 adapter->max_pmac_cnt = BE_VF_UC_PMAC_COUNT;
3507
3508 /* primary mac needs 1 pmac entry */
3509 adapter->pmac_id = kcalloc(adapter->max_pmac_cnt + 1,
3510 sizeof(u32), GFP_KERNEL);
3511 if (!adapter->pmac_id)
3512 return -ENOMEM;
3513
9e1453c5
AK
3514 status = be_cmd_get_cntl_attributes(adapter);
3515 if (status)
3516 return status;
3517
4762f6ce
AK
3518 status = be_cmd_get_acpi_wol_cap(adapter);
3519 if (status) {
3520 /* in case of a failure to get wol capabillities
3521 * check the exclusion list to determine WOL capability */
3522 if (!be_is_wol_excluded(adapter))
3523 adapter->wol_cap |= BE_WOL_CAP;
3524 }
3525
3526 if (be_is_wol_supported(adapter))
3527 adapter->wol = true;
3528
941a77d5
SK
3529 level = be_get_fw_log_level(adapter);
3530 adapter->msg_enable = level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0;
3531
2243e2e9 3532 return 0;
6b7c5b94
SP
3533}
3534
39f1d94d 3535static int be_dev_type_check(struct be_adapter *adapter)
fe6d2a38
SP
3536{
3537 struct pci_dev *pdev = adapter->pdev;
3538 u32 sli_intf = 0, if_type;
3539
3540 switch (pdev->device) {
3541 case BE_DEVICE_ID1:
3542 case OC_DEVICE_ID1:
3543 adapter->generation = BE_GEN2;
3544 break;
3545 case BE_DEVICE_ID2:
3546 case OC_DEVICE_ID2:
3547 adapter->generation = BE_GEN3;
3548 break;
3549 case OC_DEVICE_ID3:
12f4d0a8 3550 case OC_DEVICE_ID4:
fe6d2a38 3551 pci_read_config_dword(pdev, SLI_INTF_REG_OFFSET, &sli_intf);
045508a8
PP
3552 adapter->if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
3553 SLI_INTF_IF_TYPE_SHIFT;
fe6d2a38
SP
3554 if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
3555 SLI_INTF_IF_TYPE_SHIFT;
fe6d2a38 3556 if (((sli_intf & SLI_INTF_VALID_MASK) != SLI_INTF_VALID) ||
045508a8
PP
3557 !be_type_2_3(adapter)) {
3558 dev_err(&pdev->dev, "SLI_INTF reg val is not valid\n");
3559 return -EINVAL;
3560 }
3561 adapter->sli_family = ((sli_intf & SLI_INTF_FAMILY_MASK) >>
3562 SLI_INTF_FAMILY_SHIFT);
3563 adapter->generation = BE_GEN3;
3564 break;
3565 case OC_DEVICE_ID5:
3566 pci_read_config_dword(pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3567 if ((sli_intf & SLI_INTF_VALID_MASK) != SLI_INTF_VALID) {
fe6d2a38
SP
3568 dev_err(&pdev->dev, "SLI_INTF reg val is not valid\n");
3569 return -EINVAL;
3570 }
fe6d2a38
SP
3571 adapter->sli_family = ((sli_intf & SLI_INTF_FAMILY_MASK) >>
3572 SLI_INTF_FAMILY_SHIFT);
3573 adapter->generation = BE_GEN3;
3574 break;
3575 default:
3576 adapter->generation = 0;
3577 }
39f1d94d
SP
3578
3579 pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3580 adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
fe6d2a38
SP
3581 return 0;
3582}
3583
37eed1cb
PR
3584static int lancer_wait_ready(struct be_adapter *adapter)
3585{
d8110f62 3586#define SLIPORT_READY_TIMEOUT 30
37eed1cb
PR
3587 u32 sliport_status;
3588 int status = 0, i;
3589
3590 for (i = 0; i < SLIPORT_READY_TIMEOUT; i++) {
3591 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3592 if (sliport_status & SLIPORT_STATUS_RDY_MASK)
3593 break;
3594
d8110f62 3595 msleep(1000);
37eed1cb
PR
3596 }
3597
3598 if (i == SLIPORT_READY_TIMEOUT)
3599 status = -1;
3600
3601 return status;
3602}
3603
3604static int lancer_test_and_set_rdy_state(struct be_adapter *adapter)
3605{
3606 int status;
3607 u32 sliport_status, err, reset_needed;
3608 status = lancer_wait_ready(adapter);
3609 if (!status) {
3610 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3611 err = sliport_status & SLIPORT_STATUS_ERR_MASK;
3612 reset_needed = sliport_status & SLIPORT_STATUS_RN_MASK;
3613 if (err && reset_needed) {
3614 iowrite32(SLI_PORT_CONTROL_IP_MASK,
3615 adapter->db + SLIPORT_CONTROL_OFFSET);
3616
3617 /* check adapter has corrected the error */
3618 status = lancer_wait_ready(adapter);
3619 sliport_status = ioread32(adapter->db +
3620 SLIPORT_STATUS_OFFSET);
3621 sliport_status &= (SLIPORT_STATUS_ERR_MASK |
3622 SLIPORT_STATUS_RN_MASK);
3623 if (status || sliport_status)
3624 status = -1;
3625 } else if (err || reset_needed) {
3626 status = -1;
3627 }
3628 }
3629 return status;
3630}
3631
d8110f62
PR
3632static void lancer_test_and_recover_fn_err(struct be_adapter *adapter)
3633{
3634 int status;
3635 u32 sliport_status;
3636
3637 if (adapter->eeh_err || adapter->ue_detected)
3638 return;
3639
3640 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3641
3642 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
3643 dev_err(&adapter->pdev->dev,
3644 "Adapter in error state."
3645 "Trying to recover.\n");
3646
3647 status = lancer_test_and_set_rdy_state(adapter);
3648 if (status)
3649 goto err;
3650
3651 netif_device_detach(adapter->netdev);
3652
3653 if (netif_running(adapter->netdev))
3654 be_close(adapter->netdev);
3655
3656 be_clear(adapter);
3657
3658 adapter->fw_timeout = false;
3659
3660 status = be_setup(adapter);
3661 if (status)
3662 goto err;
3663
3664 if (netif_running(adapter->netdev)) {
3665 status = be_open(adapter->netdev);
3666 if (status)
3667 goto err;
3668 }
3669
3670 netif_device_attach(adapter->netdev);
3671
3672 dev_err(&adapter->pdev->dev,
3673 "Adapter error recovery succeeded\n");
3674 }
3675 return;
3676err:
3677 dev_err(&adapter->pdev->dev,
3678 "Adapter error recovery failed\n");
3679}
3680
3681static void be_worker(struct work_struct *work)
3682{
3683 struct be_adapter *adapter =
3684 container_of(work, struct be_adapter, work.work);
3685 struct be_rx_obj *rxo;
10ef9ab4 3686 struct be_eq_obj *eqo;
d8110f62
PR
3687 int i;
3688
3689 if (lancer_chip(adapter))
3690 lancer_test_and_recover_fn_err(adapter);
3691
3692 be_detect_dump_ue(adapter);
3693
3694 /* when interrupts are not yet enabled, just reap any pending
3695 * mcc completions */
3696 if (!netif_running(adapter->netdev)) {
10ef9ab4 3697 be_process_mcc(adapter);
d8110f62
PR
3698 goto reschedule;
3699 }
3700
3701 if (!adapter->stats_cmd_sent) {
3702 if (lancer_chip(adapter))
3703 lancer_cmd_get_pport_stats(adapter,
3704 &adapter->stats_cmd);
3705 else
3706 be_cmd_get_stats(adapter, &adapter->stats_cmd);
3707 }
3708
3709 for_all_rx_queues(adapter, rxo, i) {
d8110f62
PR
3710 if (rxo->rx_post_starved) {
3711 rxo->rx_post_starved = false;
3712 be_post_rx_frags(rxo, GFP_KERNEL);
3713 }
3714 }
3715
10ef9ab4
SP
3716 for_all_evt_queues(adapter, eqo, i)
3717 be_eqd_update(adapter, eqo);
3718
d8110f62
PR
3719reschedule:
3720 adapter->work_counter++;
3721 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
3722}
3723
39f1d94d
SP
3724static bool be_reset_required(struct be_adapter *adapter)
3725{
d79c0a20 3726 return be_find_vfs(adapter, ENABLED) > 0 ? false : true;
39f1d94d
SP
3727}
3728
6b7c5b94
SP
3729static int __devinit be_probe(struct pci_dev *pdev,
3730 const struct pci_device_id *pdev_id)
3731{
3732 int status = 0;
3733 struct be_adapter *adapter;
3734 struct net_device *netdev;
6b7c5b94
SP
3735
3736 status = pci_enable_device(pdev);
3737 if (status)
3738 goto do_none;
3739
3740 status = pci_request_regions(pdev, DRV_NAME);
3741 if (status)
3742 goto disable_dev;
3743 pci_set_master(pdev);
3744
7f640062 3745 netdev = alloc_etherdev_mqs(sizeof(*adapter), MAX_TX_QS, MAX_RX_QS);
6b7c5b94
SP
3746 if (netdev == NULL) {
3747 status = -ENOMEM;
3748 goto rel_reg;
3749 }
3750 adapter = netdev_priv(netdev);
3751 adapter->pdev = pdev;
3752 pci_set_drvdata(pdev, adapter);
fe6d2a38 3753
39f1d94d 3754 status = be_dev_type_check(adapter);
63657b9c 3755 if (status)
fe6d2a38
SP
3756 goto free_netdev;
3757
6b7c5b94 3758 adapter->netdev = netdev;
2243e2e9 3759 SET_NETDEV_DEV(netdev, &pdev->dev);
6b7c5b94 3760
2b7bcebf 3761 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
6b7c5b94
SP
3762 if (!status) {
3763 netdev->features |= NETIF_F_HIGHDMA;
3764 } else {
2b7bcebf 3765 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
6b7c5b94
SP
3766 if (status) {
3767 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
3768 goto free_netdev;
3769 }
3770 }
3771
6b7c5b94
SP
3772 status = be_ctrl_init(adapter);
3773 if (status)
39f1d94d 3774 goto free_netdev;
6b7c5b94 3775
37eed1cb 3776 if (lancer_chip(adapter)) {
d8110f62
PR
3777 status = lancer_wait_ready(adapter);
3778 if (!status) {
3779 iowrite32(SLI_PORT_CONTROL_IP_MASK,
3780 adapter->db + SLIPORT_CONTROL_OFFSET);
3781 status = lancer_test_and_set_rdy_state(adapter);
3782 }
37eed1cb
PR
3783 if (status) {
3784 dev_err(&pdev->dev, "Adapter in non recoverable error\n");
48f5a191 3785 goto ctrl_clean;
37eed1cb
PR
3786 }
3787 }
3788
2243e2e9 3789 /* sync up with fw's ready state */
ba343c77
SB
3790 if (be_physfn(adapter)) {
3791 status = be_cmd_POST(adapter);
3792 if (status)
3793 goto ctrl_clean;
ba343c77 3794 }
6b7c5b94 3795
2243e2e9
SP
3796 /* tell fw we're ready to fire cmds */
3797 status = be_cmd_fw_init(adapter);
6b7c5b94 3798 if (status)
2243e2e9
SP
3799 goto ctrl_clean;
3800
39f1d94d
SP
3801 if (be_reset_required(adapter)) {
3802 status = be_cmd_reset_function(adapter);
3803 if (status)
3804 goto ctrl_clean;
3805 }
556ae191 3806
10ef9ab4
SP
3807 /* The INTR bit may be set in the card when probed by a kdump kernel
3808 * after a crash.
3809 */
3810 if (!lancer_chip(adapter))
3811 be_intr_set(adapter, false);
3812
2243e2e9
SP
3813 status = be_stats_init(adapter);
3814 if (status)
3815 goto ctrl_clean;
3816
39f1d94d 3817 status = be_get_initial_config(adapter);
6b7c5b94
SP
3818 if (status)
3819 goto stats_clean;
6b7c5b94
SP
3820
3821 INIT_DELAYED_WORK(&adapter->work, be_worker);
a54769f5 3822 adapter->rx_fc = adapter->tx_fc = true;
6b7c5b94 3823
5fb379ee
SP
3824 status = be_setup(adapter);
3825 if (status)
3abcdeda 3826 goto msix_disable;
2243e2e9 3827
3abcdeda 3828 be_netdev_init(netdev);
6b7c5b94
SP
3829 status = register_netdev(netdev);
3830 if (status != 0)
5fb379ee 3831 goto unsetup;
6b7c5b94 3832
045508a8
PP
3833 be_roce_dev_add(adapter);
3834
10ef9ab4
SP
3835 dev_info(&pdev->dev, "%s: %s port %d\n", netdev->name, nic_name(pdev),
3836 adapter->port_num);
34b1ef04 3837
6b7c5b94
SP
3838 return 0;
3839
5fb379ee
SP
3840unsetup:
3841 be_clear(adapter);
3abcdeda
SP
3842msix_disable:
3843 be_msix_disable(adapter);
6b7c5b94
SP
3844stats_clean:
3845 be_stats_cleanup(adapter);
3846ctrl_clean:
3847 be_ctrl_cleanup(adapter);
f9449ab7 3848free_netdev:
fe6d2a38 3849 free_netdev(netdev);
8d56ff11 3850 pci_set_drvdata(pdev, NULL);
6b7c5b94
SP
3851rel_reg:
3852 pci_release_regions(pdev);
3853disable_dev:
3854 pci_disable_device(pdev);
3855do_none:
c4ca2374 3856 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
6b7c5b94
SP
3857 return status;
3858}
3859
3860static int be_suspend(struct pci_dev *pdev, pm_message_t state)
3861{
3862 struct be_adapter *adapter = pci_get_drvdata(pdev);
3863 struct net_device *netdev = adapter->netdev;
3864
71d8d1b5
AK
3865 if (adapter->wol)
3866 be_setup_wol(adapter, true);
3867
6b7c5b94
SP
3868 netif_device_detach(netdev);
3869 if (netif_running(netdev)) {
3870 rtnl_lock();
3871 be_close(netdev);
3872 rtnl_unlock();
3873 }
9b0365f1 3874 be_clear(adapter);
6b7c5b94
SP
3875
3876 pci_save_state(pdev);
3877 pci_disable_device(pdev);
3878 pci_set_power_state(pdev, pci_choose_state(pdev, state));
3879 return 0;
3880}
3881
3882static int be_resume(struct pci_dev *pdev)
3883{
3884 int status = 0;
3885 struct be_adapter *adapter = pci_get_drvdata(pdev);
3886 struct net_device *netdev = adapter->netdev;
3887
3888 netif_device_detach(netdev);
3889
3890 status = pci_enable_device(pdev);
3891 if (status)
3892 return status;
3893
3894 pci_set_power_state(pdev, 0);
3895 pci_restore_state(pdev);
3896
2243e2e9
SP
3897 /* tell fw we're ready to fire cmds */
3898 status = be_cmd_fw_init(adapter);
3899 if (status)
3900 return status;
3901
9b0365f1 3902 be_setup(adapter);
6b7c5b94
SP
3903 if (netif_running(netdev)) {
3904 rtnl_lock();
3905 be_open(netdev);
3906 rtnl_unlock();
3907 }
3908 netif_device_attach(netdev);
71d8d1b5
AK
3909
3910 if (adapter->wol)
3911 be_setup_wol(adapter, false);
a4ca055f 3912
6b7c5b94
SP
3913 return 0;
3914}
3915
82456b03
SP
3916/*
3917 * An FLR will stop BE from DMAing any data.
3918 */
3919static void be_shutdown(struct pci_dev *pdev)
3920{
3921 struct be_adapter *adapter = pci_get_drvdata(pdev);
82456b03 3922
2d5d4154
AK
3923 if (!adapter)
3924 return;
82456b03 3925
0f4a6828 3926 cancel_delayed_work_sync(&adapter->work);
a4ca055f 3927
2d5d4154 3928 netif_device_detach(adapter->netdev);
82456b03 3929
82456b03
SP
3930 if (adapter->wol)
3931 be_setup_wol(adapter, true);
3932
57841869
AK
3933 be_cmd_reset_function(adapter);
3934
82456b03 3935 pci_disable_device(pdev);
82456b03
SP
3936}
3937
cf588477
SP
3938static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
3939 pci_channel_state_t state)
3940{
3941 struct be_adapter *adapter = pci_get_drvdata(pdev);
3942 struct net_device *netdev = adapter->netdev;
3943
3944 dev_err(&adapter->pdev->dev, "EEH error detected\n");
3945
3946 adapter->eeh_err = true;
3947
3948 netif_device_detach(netdev);
3949
3950 if (netif_running(netdev)) {
3951 rtnl_lock();
3952 be_close(netdev);
3953 rtnl_unlock();
3954 }
3955 be_clear(adapter);
3956
3957 if (state == pci_channel_io_perm_failure)
3958 return PCI_ERS_RESULT_DISCONNECT;
3959
3960 pci_disable_device(pdev);
3961
eeb7fc7b
SK
3962 /* The error could cause the FW to trigger a flash debug dump.
3963 * Resetting the card while flash dump is in progress
3964 * can cause it not to recover; wait for it to finish
3965 */
3966 ssleep(30);
cf588477
SP
3967 return PCI_ERS_RESULT_NEED_RESET;
3968}
3969
3970static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
3971{
3972 struct be_adapter *adapter = pci_get_drvdata(pdev);
3973 int status;
3974
3975 dev_info(&adapter->pdev->dev, "EEH reset\n");
3976 adapter->eeh_err = false;
6589ade0
SP
3977 adapter->ue_detected = false;
3978 adapter->fw_timeout = false;
cf588477
SP
3979
3980 status = pci_enable_device(pdev);
3981 if (status)
3982 return PCI_ERS_RESULT_DISCONNECT;
3983
3984 pci_set_master(pdev);
3985 pci_set_power_state(pdev, 0);
3986 pci_restore_state(pdev);
3987
3988 /* Check if card is ok and fw is ready */
3989 status = be_cmd_POST(adapter);
3990 if (status)
3991 return PCI_ERS_RESULT_DISCONNECT;
3992
3993 return PCI_ERS_RESULT_RECOVERED;
3994}
3995
3996static void be_eeh_resume(struct pci_dev *pdev)
3997{
3998 int status = 0;
3999 struct be_adapter *adapter = pci_get_drvdata(pdev);
4000 struct net_device *netdev = adapter->netdev;
4001
4002 dev_info(&adapter->pdev->dev, "EEH resume\n");
4003
4004 pci_save_state(pdev);
4005
4006 /* tell fw we're ready to fire cmds */
4007 status = be_cmd_fw_init(adapter);
4008 if (status)
4009 goto err;
4010
4011 status = be_setup(adapter);
4012 if (status)
4013 goto err;
4014
4015 if (netif_running(netdev)) {
4016 status = be_open(netdev);
4017 if (status)
4018 goto err;
4019 }
4020 netif_device_attach(netdev);
4021 return;
4022err:
4023 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
cf588477
SP
4024}
4025
4026static struct pci_error_handlers be_eeh_handlers = {
4027 .error_detected = be_eeh_err_detected,
4028 .slot_reset = be_eeh_reset,
4029 .resume = be_eeh_resume,
4030};
4031
6b7c5b94
SP
4032static struct pci_driver be_driver = {
4033 .name = DRV_NAME,
4034 .id_table = be_dev_ids,
4035 .probe = be_probe,
4036 .remove = be_remove,
4037 .suspend = be_suspend,
cf588477 4038 .resume = be_resume,
82456b03 4039 .shutdown = be_shutdown,
cf588477 4040 .err_handler = &be_eeh_handlers
6b7c5b94
SP
4041};
4042
4043static int __init be_init_module(void)
4044{
8e95a202
JP
4045 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
4046 rx_frag_size != 2048) {
6b7c5b94
SP
4047 printk(KERN_WARNING DRV_NAME
4048 " : Module param rx_frag_size must be 2048/4096/8192."
4049 " Using 2048\n");
4050 rx_frag_size = 2048;
4051 }
6b7c5b94
SP
4052
4053 return pci_register_driver(&be_driver);
4054}
4055module_init(be_init_module);
4056
4057static void __exit be_exit_module(void)
4058{
4059 pci_unregister_driver(&be_driver);
4060}
4061module_exit(be_exit_module);