be2net: Stats for Lancer
[linux-2.6-block.git] / drivers / net / benet / be_main.c
CommitLineData
6b7c5b94 1/*
d2145cde 2 * Copyright (C) 2005 - 2011 Emulex
6b7c5b94
SP
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
d2145cde 11 * linux-drivers@emulex.com
6b7c5b94 12 *
d2145cde
AK
13 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
6b7c5b94
SP
16 */
17
18#include "be.h"
8788fdc2 19#include "be_cmds.h"
65f71b8b 20#include <asm/div64.h>
6b7c5b94
SP
21
22MODULE_VERSION(DRV_VER);
23MODULE_DEVICE_TABLE(pci, be_dev_ids);
24MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
25MODULE_AUTHOR("ServerEngines Corporation");
26MODULE_LICENSE("GPL");
27
2e588f84 28static ushort rx_frag_size = 2048;
ba343c77 29static unsigned int num_vfs;
2e588f84 30module_param(rx_frag_size, ushort, S_IRUGO);
ba343c77 31module_param(num_vfs, uint, S_IRUGO);
6b7c5b94 32MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
ba343c77 33MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
6b7c5b94 34
3abcdeda
SP
35static bool multi_rxq = true;
36module_param(multi_rxq, bool, S_IRUGO | S_IWUSR);
37MODULE_PARM_DESC(multi_rxq, "Multi Rx Queue support. Enabled by default");
38
6b7c5b94 39static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
c4ca2374 40 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
59fd5d87 41 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
c4ca2374
AK
42 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
43 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
fe6d2a38 44 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
6b7c5b94
SP
45 { 0 }
46};
47MODULE_DEVICE_TABLE(pci, be_dev_ids);
7c185276
AK
48/* UE Status Low CSR */
49static char *ue_status_low_desc[] = {
50 "CEV",
51 "CTX",
52 "DBUF",
53 "ERX",
54 "Host",
55 "MPU",
56 "NDMA",
57 "PTC ",
58 "RDMA ",
59 "RXF ",
60 "RXIPS ",
61 "RXULP0 ",
62 "RXULP1 ",
63 "RXULP2 ",
64 "TIM ",
65 "TPOST ",
66 "TPRE ",
67 "TXIPS ",
68 "TXULP0 ",
69 "TXULP1 ",
70 "UC ",
71 "WDMA ",
72 "TXULP2 ",
73 "HOST1 ",
74 "P0_OB_LINK ",
75 "P1_OB_LINK ",
76 "HOST_GPIO ",
77 "MBOX ",
78 "AXGMAC0",
79 "AXGMAC1",
80 "JTAG",
81 "MPU_INTPEND"
82};
83/* UE Status High CSR */
84static char *ue_status_hi_desc[] = {
85 "LPCMEMHOST",
86 "MGMT_MAC",
87 "PCS0ONLINE",
88 "MPU_IRAM",
89 "PCS1ONLINE",
90 "PCTL0",
91 "PCTL1",
92 "PMEM",
93 "RR",
94 "TXPB",
95 "RXPP",
96 "XAUI",
97 "TXP",
98 "ARM",
99 "IPC",
100 "HOST2",
101 "HOST3",
102 "HOST4",
103 "HOST5",
104 "HOST6",
105 "HOST7",
106 "HOST8",
107 "HOST9",
108 "NETC"
109 "Unknown",
110 "Unknown",
111 "Unknown",
112 "Unknown",
113 "Unknown",
114 "Unknown",
115 "Unknown",
116 "Unknown"
117};
6b7c5b94
SP
118
119static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
120{
121 struct be_dma_mem *mem = &q->dma_mem;
122 if (mem->va)
2b7bcebf
IV
123 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
124 mem->dma);
6b7c5b94
SP
125}
126
127static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
128 u16 len, u16 entry_size)
129{
130 struct be_dma_mem *mem = &q->dma_mem;
131
132 memset(q, 0, sizeof(*q));
133 q->len = len;
134 q->entry_size = entry_size;
135 mem->size = len * entry_size;
2b7bcebf
IV
136 mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
137 GFP_KERNEL);
6b7c5b94
SP
138 if (!mem->va)
139 return -1;
140 memset(mem->va, 0, mem->size);
141 return 0;
142}
143
8788fdc2 144static void be_intr_set(struct be_adapter *adapter, bool enable)
6b7c5b94 145{
8788fdc2 146 u8 __iomem *addr = adapter->pcicfg + PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET;
6b7c5b94
SP
147 u32 reg = ioread32(addr);
148 u32 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
5f0b849e 149
cf588477
SP
150 if (adapter->eeh_err)
151 return;
152
5f0b849e 153 if (!enabled && enable)
6b7c5b94 154 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
5f0b849e 155 else if (enabled && !enable)
6b7c5b94 156 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
5f0b849e 157 else
6b7c5b94 158 return;
5f0b849e 159
6b7c5b94
SP
160 iowrite32(reg, addr);
161}
162
8788fdc2 163static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
6b7c5b94
SP
164{
165 u32 val = 0;
166 val |= qid & DB_RQ_RING_ID_MASK;
167 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
f3eb62d2
SP
168
169 wmb();
8788fdc2 170 iowrite32(val, adapter->db + DB_RQ_OFFSET);
6b7c5b94
SP
171}
172
8788fdc2 173static void be_txq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
6b7c5b94
SP
174{
175 u32 val = 0;
176 val |= qid & DB_TXULP_RING_ID_MASK;
177 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
f3eb62d2
SP
178
179 wmb();
8788fdc2 180 iowrite32(val, adapter->db + DB_TXULP1_OFFSET);
6b7c5b94
SP
181}
182
8788fdc2 183static void be_eq_notify(struct be_adapter *adapter, u16 qid,
6b7c5b94
SP
184 bool arm, bool clear_int, u16 num_popped)
185{
186 u32 val = 0;
187 val |= qid & DB_EQ_RING_ID_MASK;
fe6d2a38
SP
188 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
189 DB_EQ_RING_ID_EXT_MASK_SHIFT);
cf588477
SP
190
191 if (adapter->eeh_err)
192 return;
193
6b7c5b94
SP
194 if (arm)
195 val |= 1 << DB_EQ_REARM_SHIFT;
196 if (clear_int)
197 val |= 1 << DB_EQ_CLR_SHIFT;
198 val |= 1 << DB_EQ_EVNT_SHIFT;
199 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
8788fdc2 200 iowrite32(val, adapter->db + DB_EQ_OFFSET);
6b7c5b94
SP
201}
202
8788fdc2 203void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
6b7c5b94
SP
204{
205 u32 val = 0;
206 val |= qid & DB_CQ_RING_ID_MASK;
fe6d2a38
SP
207 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
208 DB_CQ_RING_ID_EXT_MASK_SHIFT);
cf588477
SP
209
210 if (adapter->eeh_err)
211 return;
212
6b7c5b94
SP
213 if (arm)
214 val |= 1 << DB_CQ_REARM_SHIFT;
215 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
8788fdc2 216 iowrite32(val, adapter->db + DB_CQ_OFFSET);
6b7c5b94
SP
217}
218
6b7c5b94
SP
219static int be_mac_addr_set(struct net_device *netdev, void *p)
220{
221 struct be_adapter *adapter = netdev_priv(netdev);
222 struct sockaddr *addr = p;
223 int status = 0;
224
ca9e4988
AK
225 if (!is_valid_ether_addr(addr->sa_data))
226 return -EADDRNOTAVAIL;
227
ba343c77
SB
228 /* MAC addr configuration will be done in hardware for VFs
229 * by their corresponding PFs. Just copy to netdev addr here
230 */
231 if (!be_physfn(adapter))
232 goto netdev_addr;
233
f8617e08
AK
234 status = be_cmd_pmac_del(adapter, adapter->if_handle,
235 adapter->pmac_id, 0);
a65027e4
SP
236 if (status)
237 return status;
6b7c5b94 238
a65027e4 239 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
f8617e08 240 adapter->if_handle, &adapter->pmac_id, 0);
ba343c77 241netdev_addr:
6b7c5b94
SP
242 if (!status)
243 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
244
245 return status;
246}
247
89a88ab8
AK
248static void populate_be2_stats(struct be_adapter *adapter)
249{
250
251 struct be_drv_stats *drvs = &adapter->drv_stats;
252 struct be_pmem_stats *pmem_sts = be_pmem_stats_from_cmd(adapter);
253 struct be_port_rxf_stats_v0 *port_stats =
254 be_port_rxf_stats_from_cmd(adapter);
255 struct be_rxf_stats_v0 *rxf_stats =
256 be_rxf_stats_from_cmd(adapter);
257
258 drvs->rx_pause_frames = port_stats->rx_pause_frames;
259 drvs->rx_crc_errors = port_stats->rx_crc_errors;
260 drvs->rx_control_frames = port_stats->rx_control_frames;
261 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
262 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
263 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
264 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
265 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
266 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
267 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
268 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
269 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
270 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
271 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
272 drvs->rx_input_fifo_overflow_drop =
273 port_stats->rx_input_fifo_overflow;
274 drvs->rx_dropped_header_too_small =
275 port_stats->rx_dropped_header_too_small;
276 drvs->rx_address_match_errors =
277 port_stats->rx_address_match_errors;
278 drvs->rx_alignment_symbol_errors =
279 port_stats->rx_alignment_symbol_errors;
280
281 drvs->tx_pauseframes = port_stats->tx_pauseframes;
282 drvs->tx_controlframes = port_stats->tx_controlframes;
283
284 if (adapter->port_num)
285 drvs->jabber_events =
286 rxf_stats->port1_jabber_events;
287 else
288 drvs->jabber_events =
289 rxf_stats->port0_jabber_events;
290 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
291 drvs->rx_drops_no_txpb = rxf_stats->rx_drops_no_txpb;
292 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
293 drvs->rx_drops_invalid_ring = rxf_stats->rx_drops_invalid_ring;
294 drvs->forwarded_packets = rxf_stats->forwarded_packets;
295 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
296 drvs->rx_drops_no_tpre_descr =
297 rxf_stats->rx_drops_no_tpre_descr;
298 drvs->rx_drops_too_many_frags =
299 rxf_stats->rx_drops_too_many_frags;
300 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
301}
302
303static void populate_be3_stats(struct be_adapter *adapter)
304{
305 struct be_drv_stats *drvs = &adapter->drv_stats;
306 struct be_pmem_stats *pmem_sts = be_pmem_stats_from_cmd(adapter);
307
308 struct be_rxf_stats_v1 *rxf_stats =
309 be_rxf_stats_from_cmd(adapter);
310 struct be_port_rxf_stats_v1 *port_stats =
311 be_port_rxf_stats_from_cmd(adapter);
312
313 drvs->rx_priority_pause_frames = 0;
314 drvs->pmem_fifo_overflow_drop = 0;
315 drvs->rx_pause_frames = port_stats->rx_pause_frames;
316 drvs->rx_crc_errors = port_stats->rx_crc_errors;
317 drvs->rx_control_frames = port_stats->rx_control_frames;
318 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
319 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
320 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
321 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
322 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
323 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
324 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
325 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
326 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
327 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
328 drvs->rx_dropped_header_too_small =
329 port_stats->rx_dropped_header_too_small;
330 drvs->rx_input_fifo_overflow_drop =
331 port_stats->rx_input_fifo_overflow_drop;
332 drvs->rx_address_match_errors =
333 port_stats->rx_address_match_errors;
334 drvs->rx_alignment_symbol_errors =
335 port_stats->rx_alignment_symbol_errors;
336 drvs->rxpp_fifo_overflow_drop =
337 port_stats->rxpp_fifo_overflow_drop;
338 drvs->tx_pauseframes = port_stats->tx_pauseframes;
339 drvs->tx_controlframes = port_stats->tx_controlframes;
340 drvs->jabber_events = port_stats->jabber_events;
341 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
342 drvs->rx_drops_no_txpb = rxf_stats->rx_drops_no_txpb;
343 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
344 drvs->rx_drops_invalid_ring = rxf_stats->rx_drops_invalid_ring;
345 drvs->forwarded_packets = rxf_stats->forwarded_packets;
346 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
347 drvs->rx_drops_no_tpre_descr =
348 rxf_stats->rx_drops_no_tpre_descr;
349 drvs->rx_drops_too_many_frags =
350 rxf_stats->rx_drops_too_many_frags;
351 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
352}
353
005d5696
SX
354static void populate_lancer_stats(struct be_adapter *adapter)
355{
89a88ab8 356
005d5696
SX
357 struct be_drv_stats *drvs = &adapter->drv_stats;
358 struct lancer_cmd_pport_stats *pport_stats = pport_stats_from_cmd
359 (adapter);
360 drvs->rx_priority_pause_frames = 0;
361 drvs->pmem_fifo_overflow_drop = 0;
362 drvs->rx_pause_frames =
363 make_64bit_val(pport_stats->rx_pause_frames_lo,
364 pport_stats->rx_pause_frames_hi);
365 drvs->rx_crc_errors = make_64bit_val(pport_stats->rx_crc_errors_hi,
366 pport_stats->rx_crc_errors_lo);
367 drvs->rx_control_frames =
368 make_64bit_val(pport_stats->rx_control_frames_hi,
369 pport_stats->rx_control_frames_lo);
370 drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
371 drvs->rx_frame_too_long =
372 make_64bit_val(pport_stats->rx_internal_mac_errors_hi,
373 pport_stats->rx_frames_too_long_lo);
374 drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
375 drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
376 drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
377 drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
378 drvs->rx_dropped_tcp_length =
379 pport_stats->rx_dropped_invalid_tcp_length;
380 drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
381 drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
382 drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
383 drvs->rx_dropped_header_too_small =
384 pport_stats->rx_dropped_header_too_small;
385 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
386 drvs->rx_address_match_errors = pport_stats->rx_address_match_errors;
387 drvs->rx_alignment_symbol_errors =
388 make_64bit_val(pport_stats->rx_symbol_errors_hi,
389 pport_stats->rx_symbol_errors_lo);
390 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
391 drvs->tx_pauseframes = make_64bit_val(pport_stats->tx_pause_frames_hi,
392 pport_stats->tx_pause_frames_lo);
393 drvs->tx_controlframes =
394 make_64bit_val(pport_stats->tx_control_frames_hi,
395 pport_stats->tx_control_frames_lo);
396 drvs->jabber_events = pport_stats->rx_jabbers;
397 drvs->rx_drops_no_pbuf = 0;
398 drvs->rx_drops_no_txpb = 0;
399 drvs->rx_drops_no_erx_descr = 0;
400 drvs->rx_drops_invalid_ring = pport_stats->rx_drops_invalid_queue;
401 drvs->forwarded_packets = make_64bit_val(pport_stats->num_forwards_hi,
402 pport_stats->num_forwards_lo);
403 drvs->rx_drops_mtu = make_64bit_val(pport_stats->rx_drops_mtu_hi,
404 pport_stats->rx_drops_mtu_lo);
405 drvs->rx_drops_no_tpre_descr = 0;
406 drvs->rx_drops_too_many_frags =
407 make_64bit_val(pport_stats->rx_drops_too_many_frags_hi,
408 pport_stats->rx_drops_too_many_frags_lo);
409}
89a88ab8
AK
410
411void be_parse_stats(struct be_adapter *adapter)
412{
005d5696
SX
413 if (adapter->generation == BE_GEN3) {
414 if (lancer_chip(adapter))
415 populate_lancer_stats(adapter);
416 else
417 populate_be3_stats(adapter);
418 } else {
89a88ab8 419 populate_be2_stats(adapter);
005d5696 420 }
89a88ab8
AK
421}
422
b31c50a7 423void netdev_stats_update(struct be_adapter *adapter)
6b7c5b94 424{
89a88ab8 425 struct be_drv_stats *drvs = &adapter->drv_stats;
78122a52 426 struct net_device_stats *dev_stats = &adapter->netdev->stats;
3abcdeda
SP
427 struct be_rx_obj *rxo;
428 int i;
6b7c5b94 429
3abcdeda
SP
430 memset(dev_stats, 0, sizeof(*dev_stats));
431 for_all_rx_queues(adapter, rxo, i) {
432 dev_stats->rx_packets += rx_stats(rxo)->rx_pkts;
433 dev_stats->rx_bytes += rx_stats(rxo)->rx_bytes;
434 dev_stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
435 /* no space in linux buffers: best possible approximation */
89a88ab8 436 if (adapter->generation == BE_GEN3) {
005d5696
SX
437 if (!(lancer_chip(adapter))) {
438 struct be_erx_stats_v1 *erx_stats =
89a88ab8 439 be_erx_stats_from_cmd(adapter);
005d5696 440 dev_stats->rx_dropped +=
89a88ab8 441 erx_stats->rx_drops_no_fragments[rxo->q.id];
005d5696 442 }
89a88ab8
AK
443 } else {
444 struct be_erx_stats_v0 *erx_stats =
445 be_erx_stats_from_cmd(adapter);
446 dev_stats->rx_dropped +=
447 erx_stats->rx_drops_no_fragments[rxo->q.id];
448 }
3abcdeda
SP
449 }
450
451 dev_stats->tx_packets = tx_stats(adapter)->be_tx_pkts;
452 dev_stats->tx_bytes = tx_stats(adapter)->be_tx_bytes;
6b7c5b94
SP
453
454 /* bad pkts received */
89a88ab8
AK
455 dev_stats->rx_errors = drvs->rx_crc_errors +
456 drvs->rx_alignment_symbol_errors +
457 drvs->rx_in_range_errors +
458 drvs->rx_out_range_errors +
459 drvs->rx_frame_too_long +
460 drvs->rx_dropped_too_small +
461 drvs->rx_dropped_too_short +
462 drvs->rx_dropped_header_too_small +
463 drvs->rx_dropped_tcp_length +
464 drvs->rx_dropped_runt +
465 drvs->rx_tcp_checksum_errs +
466 drvs->rx_ip_checksum_errs +
467 drvs->rx_udp_checksum_errs;
68110868 468
6b7c5b94 469 /* detailed rx errors */
89a88ab8
AK
470 dev_stats->rx_length_errors = drvs->rx_in_range_errors +
471 drvs->rx_out_range_errors +
472 drvs->rx_frame_too_long;
68110868 473
89a88ab8 474 dev_stats->rx_crc_errors = drvs->rx_crc_errors;
6b7c5b94
SP
475
476 /* frame alignment errors */
89a88ab8 477 dev_stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
68110868 478
6b7c5b94
SP
479 /* receiver fifo overrun */
480 /* drops_no_pbuf is no per i/f, it's per BE card */
89a88ab8
AK
481 dev_stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
482 drvs->rx_input_fifo_overflow_drop +
483 drvs->rx_drops_no_pbuf;
6b7c5b94
SP
484}
485
8788fdc2 486void be_link_status_update(struct be_adapter *adapter, bool link_up)
6b7c5b94 487{
6b7c5b94
SP
488 struct net_device *netdev = adapter->netdev;
489
6b7c5b94 490 /* If link came up or went down */
a8f447bd 491 if (adapter->link_up != link_up) {
0dffc83e 492 adapter->link_speed = -1;
a8f447bd 493 if (link_up) {
6b7c5b94
SP
494 netif_carrier_on(netdev);
495 printk(KERN_INFO "%s: Link up\n", netdev->name);
a8f447bd 496 } else {
a8f447bd
SP
497 netif_carrier_off(netdev);
498 printk(KERN_INFO "%s: Link down\n", netdev->name);
6b7c5b94 499 }
a8f447bd 500 adapter->link_up = link_up;
6b7c5b94 501 }
6b7c5b94
SP
502}
503
504/* Update the EQ delay n BE based on the RX frags consumed / sec */
3abcdeda 505static void be_rx_eqd_update(struct be_adapter *adapter, struct be_rx_obj *rxo)
6b7c5b94 506{
3abcdeda
SP
507 struct be_eq_obj *rx_eq = &rxo->rx_eq;
508 struct be_rx_stats *stats = &rxo->stats;
4097f663
SP
509 ulong now = jiffies;
510 u32 eqd;
511
512 if (!rx_eq->enable_aic)
513 return;
514
515 /* Wrapped around */
516 if (time_before(now, stats->rx_fps_jiffies)) {
517 stats->rx_fps_jiffies = now;
518 return;
519 }
6b7c5b94
SP
520
521 /* Update once a second */
4097f663 522 if ((now - stats->rx_fps_jiffies) < HZ)
6b7c5b94
SP
523 return;
524
3abcdeda 525 stats->rx_fps = (stats->rx_frags - stats->prev_rx_frags) /
4097f663 526 ((now - stats->rx_fps_jiffies) / HZ);
6b7c5b94 527
4097f663 528 stats->rx_fps_jiffies = now;
3abcdeda
SP
529 stats->prev_rx_frags = stats->rx_frags;
530 eqd = stats->rx_fps / 110000;
6b7c5b94
SP
531 eqd = eqd << 3;
532 if (eqd > rx_eq->max_eqd)
533 eqd = rx_eq->max_eqd;
534 if (eqd < rx_eq->min_eqd)
535 eqd = rx_eq->min_eqd;
536 if (eqd < 10)
537 eqd = 0;
538 if (eqd != rx_eq->cur_eqd)
8788fdc2 539 be_cmd_modify_eqd(adapter, rx_eq->q.id, eqd);
6b7c5b94
SP
540
541 rx_eq->cur_eqd = eqd;
542}
543
65f71b8b
SH
544static u32 be_calc_rate(u64 bytes, unsigned long ticks)
545{
546 u64 rate = bytes;
547
548 do_div(rate, ticks / HZ);
549 rate <<= 3; /* bytes/sec -> bits/sec */
550 do_div(rate, 1000000ul); /* MB/Sec */
551
552 return rate;
553}
554
4097f663
SP
555static void be_tx_rate_update(struct be_adapter *adapter)
556{
3abcdeda 557 struct be_tx_stats *stats = tx_stats(adapter);
4097f663
SP
558 ulong now = jiffies;
559
560 /* Wrapped around? */
561 if (time_before(now, stats->be_tx_jiffies)) {
562 stats->be_tx_jiffies = now;
563 return;
564 }
565
566 /* Update tx rate once in two seconds */
567 if ((now - stats->be_tx_jiffies) > 2 * HZ) {
65f71b8b
SH
568 stats->be_tx_rate = be_calc_rate(stats->be_tx_bytes
569 - stats->be_tx_bytes_prev,
570 now - stats->be_tx_jiffies);
4097f663
SP
571 stats->be_tx_jiffies = now;
572 stats->be_tx_bytes_prev = stats->be_tx_bytes;
573 }
574}
575
6b7c5b94 576static void be_tx_stats_update(struct be_adapter *adapter,
91992e44 577 u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
6b7c5b94 578{
3abcdeda 579 struct be_tx_stats *stats = tx_stats(adapter);
6b7c5b94
SP
580 stats->be_tx_reqs++;
581 stats->be_tx_wrbs += wrb_cnt;
582 stats->be_tx_bytes += copied;
91992e44 583 stats->be_tx_pkts += (gso_segs ? gso_segs : 1);
6b7c5b94
SP
584 if (stopped)
585 stats->be_tx_stops++;
6b7c5b94
SP
586}
587
588/* Determine number of WRB entries needed to xmit data in an skb */
fe6d2a38
SP
589static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
590 bool *dummy)
6b7c5b94 591{
ebc8d2ab
DM
592 int cnt = (skb->len > skb->data_len);
593
594 cnt += skb_shinfo(skb)->nr_frags;
595
6b7c5b94
SP
596 /* to account for hdr wrb */
597 cnt++;
fe6d2a38
SP
598 if (lancer_chip(adapter) || !(cnt & 1)) {
599 *dummy = false;
600 } else {
6b7c5b94
SP
601 /* add a dummy to make it an even num */
602 cnt++;
603 *dummy = true;
fe6d2a38 604 }
6b7c5b94
SP
605 BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
606 return cnt;
607}
608
609static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
610{
611 wrb->frag_pa_hi = upper_32_bits(addr);
612 wrb->frag_pa_lo = addr & 0xFFFFFFFF;
613 wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
614}
615
cc4ce020
SK
616static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
617 struct sk_buff *skb, u32 wrb_cnt, u32 len)
6b7c5b94 618{
cc4ce020
SK
619 u8 vlan_prio = 0;
620 u16 vlan_tag = 0;
621
6b7c5b94
SP
622 memset(hdr, 0, sizeof(*hdr));
623
624 AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
625
49e4b847 626 if (skb_is_gso(skb)) {
6b7c5b94
SP
627 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
628 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
629 hdr, skb_shinfo(skb)->gso_size);
fe6d2a38 630 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
49e4b847 631 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
fe6d2a38
SP
632 if (lancer_chip(adapter) && adapter->sli_family ==
633 LANCER_A0_SLI_FAMILY) {
634 AMAP_SET_BITS(struct amap_eth_hdr_wrb, ipcs, hdr, 1);
635 if (is_tcp_pkt(skb))
636 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
637 tcpcs, hdr, 1);
638 else if (is_udp_pkt(skb))
639 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
640 udpcs, hdr, 1);
641 }
6b7c5b94
SP
642 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
643 if (is_tcp_pkt(skb))
644 AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
645 else if (is_udp_pkt(skb))
646 AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
647 }
648
cc4ce020 649 if (adapter->vlan_grp && vlan_tx_tag_present(skb)) {
6b7c5b94 650 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
cc4ce020
SK
651 vlan_tag = vlan_tx_tag_get(skb);
652 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
653 /* If vlan priority provided by OS is NOT in available bmap */
654 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
655 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
656 adapter->recommended_prio;
657 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
6b7c5b94
SP
658 }
659
660 AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
661 AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, 1);
662 AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
663 AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
664}
665
2b7bcebf 666static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
7101e111
SP
667 bool unmap_single)
668{
669 dma_addr_t dma;
670
671 be_dws_le_to_cpu(wrb, sizeof(*wrb));
672
673 dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
b681ee77 674 if (wrb->frag_len) {
7101e111 675 if (unmap_single)
2b7bcebf
IV
676 dma_unmap_single(dev, dma, wrb->frag_len,
677 DMA_TO_DEVICE);
7101e111 678 else
2b7bcebf 679 dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
7101e111
SP
680 }
681}
6b7c5b94
SP
682
683static int make_tx_wrbs(struct be_adapter *adapter,
684 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb)
685{
7101e111
SP
686 dma_addr_t busaddr;
687 int i, copied = 0;
2b7bcebf 688 struct device *dev = &adapter->pdev->dev;
6b7c5b94
SP
689 struct sk_buff *first_skb = skb;
690 struct be_queue_info *txq = &adapter->tx_obj.q;
691 struct be_eth_wrb *wrb;
692 struct be_eth_hdr_wrb *hdr;
7101e111
SP
693 bool map_single = false;
694 u16 map_head;
6b7c5b94 695
6b7c5b94
SP
696 hdr = queue_head_node(txq);
697 queue_head_inc(txq);
7101e111 698 map_head = txq->head;
6b7c5b94 699
ebc8d2ab 700 if (skb->len > skb->data_len) {
e743d313 701 int len = skb_headlen(skb);
2b7bcebf
IV
702 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
703 if (dma_mapping_error(dev, busaddr))
7101e111
SP
704 goto dma_err;
705 map_single = true;
ebc8d2ab
DM
706 wrb = queue_head_node(txq);
707 wrb_fill(wrb, busaddr, len);
708 be_dws_cpu_to_le(wrb, sizeof(*wrb));
709 queue_head_inc(txq);
710 copied += len;
711 }
6b7c5b94 712
ebc8d2ab
DM
713 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
714 struct skb_frag_struct *frag =
715 &skb_shinfo(skb)->frags[i];
2b7bcebf
IV
716 busaddr = dma_map_page(dev, frag->page, frag->page_offset,
717 frag->size, DMA_TO_DEVICE);
718 if (dma_mapping_error(dev, busaddr))
7101e111 719 goto dma_err;
ebc8d2ab
DM
720 wrb = queue_head_node(txq);
721 wrb_fill(wrb, busaddr, frag->size);
722 be_dws_cpu_to_le(wrb, sizeof(*wrb));
723 queue_head_inc(txq);
724 copied += frag->size;
6b7c5b94
SP
725 }
726
727 if (dummy_wrb) {
728 wrb = queue_head_node(txq);
729 wrb_fill(wrb, 0, 0);
730 be_dws_cpu_to_le(wrb, sizeof(*wrb));
731 queue_head_inc(txq);
732 }
733
cc4ce020 734 wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied);
6b7c5b94
SP
735 be_dws_cpu_to_le(hdr, sizeof(*hdr));
736
737 return copied;
7101e111
SP
738dma_err:
739 txq->head = map_head;
740 while (copied) {
741 wrb = queue_head_node(txq);
2b7bcebf 742 unmap_tx_frag(dev, wrb, map_single);
7101e111
SP
743 map_single = false;
744 copied -= wrb->frag_len;
745 queue_head_inc(txq);
746 }
747 return 0;
6b7c5b94
SP
748}
749
61357325 750static netdev_tx_t be_xmit(struct sk_buff *skb,
b31c50a7 751 struct net_device *netdev)
6b7c5b94
SP
752{
753 struct be_adapter *adapter = netdev_priv(netdev);
754 struct be_tx_obj *tx_obj = &adapter->tx_obj;
755 struct be_queue_info *txq = &tx_obj->q;
756 u32 wrb_cnt = 0, copied = 0;
757 u32 start = txq->head;
758 bool dummy_wrb, stopped = false;
759
fe6d2a38 760 wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
6b7c5b94
SP
761
762 copied = make_tx_wrbs(adapter, skb, wrb_cnt, dummy_wrb);
c190e3c8
AK
763 if (copied) {
764 /* record the sent skb in the sent_skb table */
765 BUG_ON(tx_obj->sent_skb_list[start]);
766 tx_obj->sent_skb_list[start] = skb;
767
768 /* Ensure txq has space for the next skb; Else stop the queue
769 * *BEFORE* ringing the tx doorbell, so that we serialze the
770 * tx compls of the current transmit which'll wake up the queue
771 */
7101e111 772 atomic_add(wrb_cnt, &txq->used);
c190e3c8
AK
773 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
774 txq->len) {
775 netif_stop_queue(netdev);
776 stopped = true;
777 }
6b7c5b94 778
c190e3c8 779 be_txq_notify(adapter, txq->id, wrb_cnt);
6b7c5b94 780
91992e44
AK
781 be_tx_stats_update(adapter, wrb_cnt, copied,
782 skb_shinfo(skb)->gso_segs, stopped);
c190e3c8
AK
783 } else {
784 txq->head = start;
785 dev_kfree_skb_any(skb);
6b7c5b94 786 }
6b7c5b94
SP
787 return NETDEV_TX_OK;
788}
789
790static int be_change_mtu(struct net_device *netdev, int new_mtu)
791{
792 struct be_adapter *adapter = netdev_priv(netdev);
793 if (new_mtu < BE_MIN_MTU ||
34a89b8c
AK
794 new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
795 (ETH_HLEN + ETH_FCS_LEN))) {
6b7c5b94
SP
796 dev_info(&adapter->pdev->dev,
797 "MTU must be between %d and %d bytes\n",
34a89b8c
AK
798 BE_MIN_MTU,
799 (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
6b7c5b94
SP
800 return -EINVAL;
801 }
802 dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
803 netdev->mtu, new_mtu);
804 netdev->mtu = new_mtu;
805 return 0;
806}
807
808/*
82903e4b
AK
809 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
810 * If the user configures more, place BE in vlan promiscuous mode.
6b7c5b94 811 */
1da87b7f 812static int be_vid_config(struct be_adapter *adapter, bool vf, u32 vf_num)
6b7c5b94 813{
6b7c5b94
SP
814 u16 vtag[BE_NUM_VLANS_SUPPORTED];
815 u16 ntags = 0, i;
82903e4b 816 int status = 0;
1da87b7f
AK
817 u32 if_handle;
818
819 if (vf) {
820 if_handle = adapter->vf_cfg[vf_num].vf_if_handle;
821 vtag[0] = cpu_to_le16(adapter->vf_cfg[vf_num].vf_vlan_tag);
822 status = be_cmd_vlan_config(adapter, if_handle, vtag, 1, 1, 0);
823 }
6b7c5b94 824
82903e4b 825 if (adapter->vlans_added <= adapter->max_vlans) {
6b7c5b94 826 /* Construct VLAN Table to give to HW */
b738127d 827 for (i = 0; i < VLAN_N_VID; i++) {
6b7c5b94
SP
828 if (adapter->vlan_tag[i]) {
829 vtag[ntags] = cpu_to_le16(i);
830 ntags++;
831 }
832 }
b31c50a7
SP
833 status = be_cmd_vlan_config(adapter, adapter->if_handle,
834 vtag, ntags, 1, 0);
6b7c5b94 835 } else {
b31c50a7
SP
836 status = be_cmd_vlan_config(adapter, adapter->if_handle,
837 NULL, 0, 1, 1);
6b7c5b94 838 }
1da87b7f 839
b31c50a7 840 return status;
6b7c5b94
SP
841}
842
843static void be_vlan_register(struct net_device *netdev, struct vlan_group *grp)
844{
845 struct be_adapter *adapter = netdev_priv(netdev);
6b7c5b94 846
6b7c5b94 847 adapter->vlan_grp = grp;
6b7c5b94
SP
848}
849
850static void be_vlan_add_vid(struct net_device *netdev, u16 vid)
851{
852 struct be_adapter *adapter = netdev_priv(netdev);
853
1da87b7f 854 adapter->vlans_added++;
ba343c77
SB
855 if (!be_physfn(adapter))
856 return;
857
6b7c5b94 858 adapter->vlan_tag[vid] = 1;
82903e4b 859 if (adapter->vlans_added <= (adapter->max_vlans + 1))
1da87b7f 860 be_vid_config(adapter, false, 0);
6b7c5b94
SP
861}
862
863static void be_vlan_rem_vid(struct net_device *netdev, u16 vid)
864{
865 struct be_adapter *adapter = netdev_priv(netdev);
866
1da87b7f
AK
867 adapter->vlans_added--;
868 vlan_group_set_device(adapter->vlan_grp, vid, NULL);
869
ba343c77
SB
870 if (!be_physfn(adapter))
871 return;
872
6b7c5b94 873 adapter->vlan_tag[vid] = 0;
82903e4b 874 if (adapter->vlans_added <= adapter->max_vlans)
1da87b7f 875 be_vid_config(adapter, false, 0);
6b7c5b94
SP
876}
877
24307eef 878static void be_set_multicast_list(struct net_device *netdev)
6b7c5b94
SP
879{
880 struct be_adapter *adapter = netdev_priv(netdev);
6b7c5b94 881
24307eef 882 if (netdev->flags & IFF_PROMISC) {
ecd0bf0f 883 be_cmd_promiscuous_config(adapter, true);
24307eef
SP
884 adapter->promiscuous = true;
885 goto done;
6b7c5b94
SP
886 }
887
25985edc 888 /* BE was previously in promiscuous mode; disable it */
24307eef
SP
889 if (adapter->promiscuous) {
890 adapter->promiscuous = false;
ecd0bf0f 891 be_cmd_promiscuous_config(adapter, false);
6b7c5b94
SP
892 }
893
e7b909a6 894 /* Enable multicast promisc if num configured exceeds what we support */
4cd24eaf
JP
895 if (netdev->flags & IFF_ALLMULTI ||
896 netdev_mc_count(netdev) > BE_MAX_MC) {
0ddf477b 897 be_cmd_multicast_set(adapter, adapter->if_handle, NULL,
e7b909a6 898 &adapter->mc_cmd_mem);
24307eef 899 goto done;
6b7c5b94 900 }
6b7c5b94 901
0ddf477b 902 be_cmd_multicast_set(adapter, adapter->if_handle, netdev,
f31e50a8 903 &adapter->mc_cmd_mem);
24307eef
SP
904done:
905 return;
6b7c5b94
SP
906}
907
ba343c77
SB
908static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
909{
910 struct be_adapter *adapter = netdev_priv(netdev);
911 int status;
912
913 if (!adapter->sriov_enabled)
914 return -EPERM;
915
916 if (!is_valid_ether_addr(mac) || (vf >= num_vfs))
917 return -EINVAL;
918
64600ea5
AK
919 if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID)
920 status = be_cmd_pmac_del(adapter,
921 adapter->vf_cfg[vf].vf_if_handle,
f8617e08 922 adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
ba343c77 923
64600ea5
AK
924 status = be_cmd_pmac_add(adapter, mac,
925 adapter->vf_cfg[vf].vf_if_handle,
f8617e08 926 &adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
64600ea5
AK
927
928 if (status)
ba343c77
SB
929 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
930 mac, vf);
64600ea5
AK
931 else
932 memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
933
ba343c77
SB
934 return status;
935}
936
64600ea5
AK
937static int be_get_vf_config(struct net_device *netdev, int vf,
938 struct ifla_vf_info *vi)
939{
940 struct be_adapter *adapter = netdev_priv(netdev);
941
942 if (!adapter->sriov_enabled)
943 return -EPERM;
944
945 if (vf >= num_vfs)
946 return -EINVAL;
947
948 vi->vf = vf;
e1d18735 949 vi->tx_rate = adapter->vf_cfg[vf].vf_tx_rate;
1da87b7f 950 vi->vlan = adapter->vf_cfg[vf].vf_vlan_tag;
64600ea5
AK
951 vi->qos = 0;
952 memcpy(&vi->mac, adapter->vf_cfg[vf].vf_mac_addr, ETH_ALEN);
953
954 return 0;
955}
956
1da87b7f
AK
957static int be_set_vf_vlan(struct net_device *netdev,
958 int vf, u16 vlan, u8 qos)
959{
960 struct be_adapter *adapter = netdev_priv(netdev);
961 int status = 0;
962
963 if (!adapter->sriov_enabled)
964 return -EPERM;
965
966 if ((vf >= num_vfs) || (vlan > 4095))
967 return -EINVAL;
968
969 if (vlan) {
970 adapter->vf_cfg[vf].vf_vlan_tag = vlan;
971 adapter->vlans_added++;
972 } else {
973 adapter->vf_cfg[vf].vf_vlan_tag = 0;
974 adapter->vlans_added--;
975 }
976
977 status = be_vid_config(adapter, true, vf);
978
979 if (status)
980 dev_info(&adapter->pdev->dev,
981 "VLAN %d config on VF %d failed\n", vlan, vf);
982 return status;
983}
984
e1d18735
AK
985static int be_set_vf_tx_rate(struct net_device *netdev,
986 int vf, int rate)
987{
988 struct be_adapter *adapter = netdev_priv(netdev);
989 int status = 0;
990
991 if (!adapter->sriov_enabled)
992 return -EPERM;
993
994 if ((vf >= num_vfs) || (rate < 0))
995 return -EINVAL;
996
997 if (rate > 10000)
998 rate = 10000;
999
1000 adapter->vf_cfg[vf].vf_tx_rate = rate;
856c4012 1001 status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
e1d18735
AK
1002
1003 if (status)
1004 dev_info(&adapter->pdev->dev,
1005 "tx rate %d on VF %d failed\n", rate, vf);
1006 return status;
1007}
1008
3abcdeda 1009static void be_rx_rate_update(struct be_rx_obj *rxo)
6b7c5b94 1010{
3abcdeda 1011 struct be_rx_stats *stats = &rxo->stats;
4097f663 1012 ulong now = jiffies;
6b7c5b94 1013
4097f663 1014 /* Wrapped around */
3abcdeda
SP
1015 if (time_before(now, stats->rx_jiffies)) {
1016 stats->rx_jiffies = now;
4097f663
SP
1017 return;
1018 }
6b7c5b94
SP
1019
1020 /* Update the rate once in two seconds */
3abcdeda 1021 if ((now - stats->rx_jiffies) < 2 * HZ)
6b7c5b94
SP
1022 return;
1023
3abcdeda
SP
1024 stats->rx_rate = be_calc_rate(stats->rx_bytes - stats->rx_bytes_prev,
1025 now - stats->rx_jiffies);
1026 stats->rx_jiffies = now;
1027 stats->rx_bytes_prev = stats->rx_bytes;
6b7c5b94
SP
1028}
1029
3abcdeda 1030static void be_rx_stats_update(struct be_rx_obj *rxo,
2e588f84 1031 struct be_rx_compl_info *rxcp)
4097f663 1032{
3abcdeda 1033 struct be_rx_stats *stats = &rxo->stats;
1ef78abe 1034
3abcdeda 1035 stats->rx_compl++;
2e588f84
SP
1036 stats->rx_frags += rxcp->num_rcvd;
1037 stats->rx_bytes += rxcp->pkt_size;
3abcdeda 1038 stats->rx_pkts++;
2e588f84 1039 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
3abcdeda 1040 stats->rx_mcast_pkts++;
2e588f84
SP
1041 if (rxcp->err)
1042 stats->rxcp_err++;
4097f663
SP
1043}
1044
2e588f84 1045static inline bool csum_passed(struct be_rx_compl_info *rxcp)
728a9972 1046{
19fad86f
PR
1047 /* L4 checksum is not reliable for non TCP/UDP packets.
1048 * Also ignore ipcksm for ipv6 pkts */
2e588f84
SP
1049 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
1050 (rxcp->ip_csum || rxcp->ipv6);
728a9972
AK
1051}
1052
6b7c5b94 1053static struct be_rx_page_info *
3abcdeda
SP
1054get_rx_page_info(struct be_adapter *adapter,
1055 struct be_rx_obj *rxo,
1056 u16 frag_idx)
6b7c5b94
SP
1057{
1058 struct be_rx_page_info *rx_page_info;
3abcdeda 1059 struct be_queue_info *rxq = &rxo->q;
6b7c5b94 1060
3abcdeda 1061 rx_page_info = &rxo->page_info_tbl[frag_idx];
6b7c5b94
SP
1062 BUG_ON(!rx_page_info->page);
1063
205859a2 1064 if (rx_page_info->last_page_user) {
2b7bcebf
IV
1065 dma_unmap_page(&adapter->pdev->dev,
1066 dma_unmap_addr(rx_page_info, bus),
1067 adapter->big_page_size, DMA_FROM_DEVICE);
205859a2
AK
1068 rx_page_info->last_page_user = false;
1069 }
6b7c5b94
SP
1070
1071 atomic_dec(&rxq->used);
1072 return rx_page_info;
1073}
1074
1075/* Throwaway the data in the Rx completion */
1076static void be_rx_compl_discard(struct be_adapter *adapter,
3abcdeda 1077 struct be_rx_obj *rxo,
2e588f84 1078 struct be_rx_compl_info *rxcp)
6b7c5b94 1079{
3abcdeda 1080 struct be_queue_info *rxq = &rxo->q;
6b7c5b94 1081 struct be_rx_page_info *page_info;
2e588f84 1082 u16 i, num_rcvd = rxcp->num_rcvd;
6b7c5b94 1083
e80d9da6 1084 for (i = 0; i < num_rcvd; i++) {
2e588f84 1085 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
e80d9da6
PR
1086 put_page(page_info->page);
1087 memset(page_info, 0, sizeof(*page_info));
2e588f84 1088 index_inc(&rxcp->rxq_idx, rxq->len);
6b7c5b94
SP
1089 }
1090}
1091
1092/*
1093 * skb_fill_rx_data forms a complete skb for an ether frame
1094 * indicated by rxcp.
1095 */
3abcdeda 1096static void skb_fill_rx_data(struct be_adapter *adapter, struct be_rx_obj *rxo,
2e588f84 1097 struct sk_buff *skb, struct be_rx_compl_info *rxcp)
6b7c5b94 1098{
3abcdeda 1099 struct be_queue_info *rxq = &rxo->q;
6b7c5b94 1100 struct be_rx_page_info *page_info;
2e588f84
SP
1101 u16 i, j;
1102 u16 hdr_len, curr_frag_len, remaining;
6b7c5b94 1103 u8 *start;
6b7c5b94 1104
2e588f84 1105 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
6b7c5b94
SP
1106 start = page_address(page_info->page) + page_info->page_offset;
1107 prefetch(start);
1108
1109 /* Copy data in the first descriptor of this completion */
2e588f84 1110 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
6b7c5b94
SP
1111
1112 /* Copy the header portion into skb_data */
2e588f84 1113 hdr_len = min(BE_HDR_LEN, curr_frag_len);
6b7c5b94
SP
1114 memcpy(skb->data, start, hdr_len);
1115 skb->len = curr_frag_len;
1116 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
1117 /* Complete packet has now been moved to data */
1118 put_page(page_info->page);
1119 skb->data_len = 0;
1120 skb->tail += curr_frag_len;
1121 } else {
1122 skb_shinfo(skb)->nr_frags = 1;
1123 skb_shinfo(skb)->frags[0].page = page_info->page;
1124 skb_shinfo(skb)->frags[0].page_offset =
1125 page_info->page_offset + hdr_len;
1126 skb_shinfo(skb)->frags[0].size = curr_frag_len - hdr_len;
1127 skb->data_len = curr_frag_len - hdr_len;
1128 skb->tail += hdr_len;
1129 }
205859a2 1130 page_info->page = NULL;
6b7c5b94 1131
2e588f84
SP
1132 if (rxcp->pkt_size <= rx_frag_size) {
1133 BUG_ON(rxcp->num_rcvd != 1);
1134 return;
6b7c5b94
SP
1135 }
1136
1137 /* More frags present for this completion */
2e588f84
SP
1138 index_inc(&rxcp->rxq_idx, rxq->len);
1139 remaining = rxcp->pkt_size - curr_frag_len;
1140 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
1141 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
1142 curr_frag_len = min(remaining, rx_frag_size);
6b7c5b94 1143
bd46cb6c
AK
1144 /* Coalesce all frags from the same physical page in one slot */
1145 if (page_info->page_offset == 0) {
1146 /* Fresh page */
1147 j++;
1148 skb_shinfo(skb)->frags[j].page = page_info->page;
1149 skb_shinfo(skb)->frags[j].page_offset =
1150 page_info->page_offset;
1151 skb_shinfo(skb)->frags[j].size = 0;
1152 skb_shinfo(skb)->nr_frags++;
1153 } else {
1154 put_page(page_info->page);
1155 }
1156
1157 skb_shinfo(skb)->frags[j].size += curr_frag_len;
6b7c5b94
SP
1158 skb->len += curr_frag_len;
1159 skb->data_len += curr_frag_len;
6b7c5b94 1160
2e588f84
SP
1161 remaining -= curr_frag_len;
1162 index_inc(&rxcp->rxq_idx, rxq->len);
205859a2 1163 page_info->page = NULL;
6b7c5b94 1164 }
bd46cb6c 1165 BUG_ON(j > MAX_SKB_FRAGS);
6b7c5b94
SP
1166}
1167
5be93b9a 1168/* Process the RX completion indicated by rxcp when GRO is disabled */
6b7c5b94 1169static void be_rx_compl_process(struct be_adapter *adapter,
3abcdeda 1170 struct be_rx_obj *rxo,
2e588f84 1171 struct be_rx_compl_info *rxcp)
6b7c5b94 1172{
6332c8d3 1173 struct net_device *netdev = adapter->netdev;
6b7c5b94 1174 struct sk_buff *skb;
89420424 1175
6332c8d3 1176 skb = netdev_alloc_skb_ip_align(netdev, BE_HDR_LEN);
a058a632 1177 if (unlikely(!skb)) {
6b7c5b94
SP
1178 if (net_ratelimit())
1179 dev_warn(&adapter->pdev->dev, "skb alloc failed\n");
3abcdeda 1180 be_rx_compl_discard(adapter, rxo, rxcp);
6b7c5b94
SP
1181 return;
1182 }
1183
2e588f84 1184 skb_fill_rx_data(adapter, rxo, skb, rxcp);
6b7c5b94 1185
6332c8d3 1186 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
728a9972 1187 skb->ip_summed = CHECKSUM_UNNECESSARY;
c6ce2f4b
SK
1188 else
1189 skb_checksum_none_assert(skb);
6b7c5b94
SP
1190
1191 skb->truesize = skb->len + sizeof(struct sk_buff);
6332c8d3 1192 skb->protocol = eth_type_trans(skb, netdev);
4b972914
AK
1193 if (adapter->netdev->features & NETIF_F_RXHASH)
1194 skb->rxhash = rxcp->rss_hash;
1195
6b7c5b94 1196
2e588f84 1197 if (unlikely(rxcp->vlanf)) {
82903e4b 1198 if (!adapter->vlan_grp || adapter->vlans_added == 0) {
6b7c5b94
SP
1199 kfree_skb(skb);
1200 return;
1201 }
6709d952
SK
1202 vlan_hwaccel_receive_skb(skb, adapter->vlan_grp,
1203 rxcp->vlan_tag);
6b7c5b94
SP
1204 } else {
1205 netif_receive_skb(skb);
1206 }
6b7c5b94
SP
1207}
1208
5be93b9a
AK
1209/* Process the RX completion indicated by rxcp when GRO is enabled */
1210static void be_rx_compl_process_gro(struct be_adapter *adapter,
3abcdeda 1211 struct be_rx_obj *rxo,
2e588f84 1212 struct be_rx_compl_info *rxcp)
6b7c5b94
SP
1213{
1214 struct be_rx_page_info *page_info;
5be93b9a 1215 struct sk_buff *skb = NULL;
3abcdeda
SP
1216 struct be_queue_info *rxq = &rxo->q;
1217 struct be_eq_obj *eq_obj = &rxo->rx_eq;
2e588f84
SP
1218 u16 remaining, curr_frag_len;
1219 u16 i, j;
3968fa1e 1220
5be93b9a
AK
1221 skb = napi_get_frags(&eq_obj->napi);
1222 if (!skb) {
3abcdeda 1223 be_rx_compl_discard(adapter, rxo, rxcp);
5be93b9a
AK
1224 return;
1225 }
1226
2e588f84
SP
1227 remaining = rxcp->pkt_size;
1228 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
1229 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
6b7c5b94
SP
1230
1231 curr_frag_len = min(remaining, rx_frag_size);
1232
bd46cb6c
AK
1233 /* Coalesce all frags from the same physical page in one slot */
1234 if (i == 0 || page_info->page_offset == 0) {
1235 /* First frag or Fresh page */
1236 j++;
5be93b9a
AK
1237 skb_shinfo(skb)->frags[j].page = page_info->page;
1238 skb_shinfo(skb)->frags[j].page_offset =
1239 page_info->page_offset;
1240 skb_shinfo(skb)->frags[j].size = 0;
bd46cb6c
AK
1241 } else {
1242 put_page(page_info->page);
1243 }
5be93b9a 1244 skb_shinfo(skb)->frags[j].size += curr_frag_len;
6b7c5b94 1245
bd46cb6c 1246 remaining -= curr_frag_len;
2e588f84 1247 index_inc(&rxcp->rxq_idx, rxq->len);
6b7c5b94
SP
1248 memset(page_info, 0, sizeof(*page_info));
1249 }
bd46cb6c 1250 BUG_ON(j > MAX_SKB_FRAGS);
6b7c5b94 1251
5be93b9a 1252 skb_shinfo(skb)->nr_frags = j + 1;
2e588f84
SP
1253 skb->len = rxcp->pkt_size;
1254 skb->data_len = rxcp->pkt_size;
1255 skb->truesize += rxcp->pkt_size;
5be93b9a 1256 skb->ip_summed = CHECKSUM_UNNECESSARY;
4b972914
AK
1257 if (adapter->netdev->features & NETIF_F_RXHASH)
1258 skb->rxhash = rxcp->rss_hash;
5be93b9a 1259
2e588f84 1260 if (likely(!rxcp->vlanf))
5be93b9a 1261 napi_gro_frags(&eq_obj->napi);
2e588f84 1262 else
6709d952
SK
1263 vlan_gro_frags(&eq_obj->napi, adapter->vlan_grp,
1264 rxcp->vlan_tag);
2e588f84
SP
1265}
1266
1267static void be_parse_rx_compl_v1(struct be_adapter *adapter,
1268 struct be_eth_rx_compl *compl,
1269 struct be_rx_compl_info *rxcp)
1270{
1271 rxcp->pkt_size =
1272 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1273 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1274 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1275 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
9ecb42fd 1276 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
2e588f84
SP
1277 rxcp->ip_csum =
1278 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1279 rxcp->l4_csum =
1280 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1281 rxcp->ipv6 =
1282 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
1283 rxcp->rxq_idx =
1284 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, fragndx, compl);
1285 rxcp->num_rcvd =
1286 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1287 rxcp->pkt_type =
1288 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
4b972914
AK
1289 rxcp->rss_hash =
1290 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, rxcp);
15d72184
SP
1291 if (rxcp->vlanf) {
1292 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm,
3c709f8f
DM
1293 compl);
1294 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
1295 compl);
15d72184 1296 }
2e588f84
SP
1297}
1298
1299static void be_parse_rx_compl_v0(struct be_adapter *adapter,
1300 struct be_eth_rx_compl *compl,
1301 struct be_rx_compl_info *rxcp)
1302{
1303 rxcp->pkt_size =
1304 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1305 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1306 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1307 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
9ecb42fd 1308 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
2e588f84
SP
1309 rxcp->ip_csum =
1310 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1311 rxcp->l4_csum =
1312 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1313 rxcp->ipv6 =
1314 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
1315 rxcp->rxq_idx =
1316 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, fragndx, compl);
1317 rxcp->num_rcvd =
1318 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1319 rxcp->pkt_type =
1320 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
4b972914
AK
1321 rxcp->rss_hash =
1322 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, rxcp);
15d72184
SP
1323 if (rxcp->vlanf) {
1324 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm,
3c709f8f
DM
1325 compl);
1326 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
1327 compl);
15d72184 1328 }
2e588f84
SP
1329}
1330
1331static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1332{
1333 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1334 struct be_rx_compl_info *rxcp = &rxo->rxcp;
1335 struct be_adapter *adapter = rxo->adapter;
6b7c5b94 1336
2e588f84
SP
1337 /* For checking the valid bit it is Ok to use either definition as the
1338 * valid bit is at the same position in both v0 and v1 Rx compl */
1339 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
1340 return NULL;
6b7c5b94 1341
2e588f84
SP
1342 rmb();
1343 be_dws_le_to_cpu(compl, sizeof(*compl));
6b7c5b94 1344
2e588f84
SP
1345 if (adapter->be3_native)
1346 be_parse_rx_compl_v1(adapter, compl, rxcp);
1347 else
1348 be_parse_rx_compl_v0(adapter, compl, rxcp);
6b7c5b94 1349
15d72184
SP
1350 if (rxcp->vlanf) {
1351 /* vlanf could be wrongly set in some cards.
1352 * ignore if vtm is not set */
1353 if ((adapter->function_mode & 0x400) && !rxcp->vtm)
1354 rxcp->vlanf = 0;
6b7c5b94 1355
15d72184 1356 if (!lancer_chip(adapter))
3c709f8f 1357 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
6b7c5b94 1358
3c709f8f
DM
1359 if (((adapter->pvid & VLAN_VID_MASK) ==
1360 (rxcp->vlan_tag & VLAN_VID_MASK)) &&
1361 !adapter->vlan_tag[rxcp->vlan_tag])
15d72184
SP
1362 rxcp->vlanf = 0;
1363 }
2e588f84
SP
1364
1365 /* As the compl has been parsed, reset it; we wont touch it again */
1366 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
6b7c5b94 1367
3abcdeda 1368 queue_tail_inc(&rxo->cq);
6b7c5b94
SP
1369 return rxcp;
1370}
1371
1829b086 1372static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
6b7c5b94 1373{
6b7c5b94 1374 u32 order = get_order(size);
1829b086 1375
6b7c5b94 1376 if (order > 0)
1829b086
ED
1377 gfp |= __GFP_COMP;
1378 return alloc_pages(gfp, order);
6b7c5b94
SP
1379}
1380
1381/*
1382 * Allocate a page, split it to fragments of size rx_frag_size and post as
1383 * receive buffers to BE
1384 */
1829b086 1385static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
6b7c5b94 1386{
3abcdeda
SP
1387 struct be_adapter *adapter = rxo->adapter;
1388 struct be_rx_page_info *page_info_tbl = rxo->page_info_tbl;
26d92f92 1389 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
3abcdeda 1390 struct be_queue_info *rxq = &rxo->q;
6b7c5b94
SP
1391 struct page *pagep = NULL;
1392 struct be_eth_rx_d *rxd;
1393 u64 page_dmaaddr = 0, frag_dmaaddr;
1394 u32 posted, page_offset = 0;
1395
3abcdeda 1396 page_info = &rxo->page_info_tbl[rxq->head];
6b7c5b94
SP
1397 for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1398 if (!pagep) {
1829b086 1399 pagep = be_alloc_pages(adapter->big_page_size, gfp);
6b7c5b94 1400 if (unlikely(!pagep)) {
3abcdeda 1401 rxo->stats.rx_post_fail++;
6b7c5b94
SP
1402 break;
1403 }
2b7bcebf
IV
1404 page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
1405 0, adapter->big_page_size,
1406 DMA_FROM_DEVICE);
6b7c5b94
SP
1407 page_info->page_offset = 0;
1408 } else {
1409 get_page(pagep);
1410 page_info->page_offset = page_offset + rx_frag_size;
1411 }
1412 page_offset = page_info->page_offset;
1413 page_info->page = pagep;
fac6da5b 1414 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
6b7c5b94
SP
1415 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1416
1417 rxd = queue_head_node(rxq);
1418 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1419 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
6b7c5b94
SP
1420
1421 /* Any space left in the current big page for another frag? */
1422 if ((page_offset + rx_frag_size + rx_frag_size) >
1423 adapter->big_page_size) {
1424 pagep = NULL;
1425 page_info->last_page_user = true;
1426 }
26d92f92
SP
1427
1428 prev_page_info = page_info;
1429 queue_head_inc(rxq);
6b7c5b94
SP
1430 page_info = &page_info_tbl[rxq->head];
1431 }
1432 if (pagep)
26d92f92 1433 prev_page_info->last_page_user = true;
6b7c5b94
SP
1434
1435 if (posted) {
6b7c5b94 1436 atomic_add(posted, &rxq->used);
8788fdc2 1437 be_rxq_notify(adapter, rxq->id, posted);
ea1dae11
SP
1438 } else if (atomic_read(&rxq->used) == 0) {
1439 /* Let be_worker replenish when memory is available */
3abcdeda 1440 rxo->rx_post_starved = true;
6b7c5b94 1441 }
6b7c5b94
SP
1442}
1443
5fb379ee 1444static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
6b7c5b94 1445{
6b7c5b94
SP
1446 struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1447
1448 if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1449 return NULL;
1450
f3eb62d2 1451 rmb();
6b7c5b94
SP
1452 be_dws_le_to_cpu(txcp, sizeof(*txcp));
1453
1454 txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1455
1456 queue_tail_inc(tx_cq);
1457 return txcp;
1458}
1459
4d586b82 1460static u16 be_tx_compl_process(struct be_adapter *adapter, u16 last_index)
6b7c5b94
SP
1461{
1462 struct be_queue_info *txq = &adapter->tx_obj.q;
a73b796e 1463 struct be_eth_wrb *wrb;
6b7c5b94
SP
1464 struct sk_buff **sent_skbs = adapter->tx_obj.sent_skb_list;
1465 struct sk_buff *sent_skb;
ec43b1a6
SP
1466 u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1467 bool unmap_skb_hdr = true;
6b7c5b94 1468
ec43b1a6 1469 sent_skb = sent_skbs[txq->tail];
6b7c5b94 1470 BUG_ON(!sent_skb);
ec43b1a6
SP
1471 sent_skbs[txq->tail] = NULL;
1472
1473 /* skip header wrb */
a73b796e 1474 queue_tail_inc(txq);
6b7c5b94 1475
ec43b1a6 1476 do {
6b7c5b94 1477 cur_index = txq->tail;
a73b796e 1478 wrb = queue_tail_node(txq);
2b7bcebf
IV
1479 unmap_tx_frag(&adapter->pdev->dev, wrb,
1480 (unmap_skb_hdr && skb_headlen(sent_skb)));
ec43b1a6
SP
1481 unmap_skb_hdr = false;
1482
6b7c5b94
SP
1483 num_wrbs++;
1484 queue_tail_inc(txq);
ec43b1a6 1485 } while (cur_index != last_index);
6b7c5b94 1486
6b7c5b94 1487 kfree_skb(sent_skb);
4d586b82 1488 return num_wrbs;
6b7c5b94
SP
1489}
1490
859b1e4e
SP
1491static inline struct be_eq_entry *event_get(struct be_eq_obj *eq_obj)
1492{
1493 struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
1494
1495 if (!eqe->evt)
1496 return NULL;
1497
f3eb62d2 1498 rmb();
859b1e4e
SP
1499 eqe->evt = le32_to_cpu(eqe->evt);
1500 queue_tail_inc(&eq_obj->q);
1501 return eqe;
1502}
1503
1504static int event_handle(struct be_adapter *adapter,
1505 struct be_eq_obj *eq_obj)
1506{
1507 struct be_eq_entry *eqe;
1508 u16 num = 0;
1509
1510 while ((eqe = event_get(eq_obj)) != NULL) {
1511 eqe->evt = 0;
1512 num++;
1513 }
1514
1515 /* Deal with any spurious interrupts that come
1516 * without events
1517 */
1518 be_eq_notify(adapter, eq_obj->q.id, true, true, num);
1519 if (num)
1520 napi_schedule(&eq_obj->napi);
1521
1522 return num;
1523}
1524
1525/* Just read and notify events without processing them.
1526 * Used at the time of destroying event queues */
1527static void be_eq_clean(struct be_adapter *adapter,
1528 struct be_eq_obj *eq_obj)
1529{
1530 struct be_eq_entry *eqe;
1531 u16 num = 0;
1532
1533 while ((eqe = event_get(eq_obj)) != NULL) {
1534 eqe->evt = 0;
1535 num++;
1536 }
1537
1538 if (num)
1539 be_eq_notify(adapter, eq_obj->q.id, false, true, num);
1540}
1541
3abcdeda 1542static void be_rx_q_clean(struct be_adapter *adapter, struct be_rx_obj *rxo)
6b7c5b94
SP
1543{
1544 struct be_rx_page_info *page_info;
3abcdeda
SP
1545 struct be_queue_info *rxq = &rxo->q;
1546 struct be_queue_info *rx_cq = &rxo->cq;
2e588f84 1547 struct be_rx_compl_info *rxcp;
6b7c5b94
SP
1548 u16 tail;
1549
1550 /* First cleanup pending rx completions */
3abcdeda
SP
1551 while ((rxcp = be_rx_compl_get(rxo)) != NULL) {
1552 be_rx_compl_discard(adapter, rxo, rxcp);
64642811 1553 be_cq_notify(adapter, rx_cq->id, false, 1);
6b7c5b94
SP
1554 }
1555
1556 /* Then free posted rx buffer that were not used */
1557 tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
cdab23b7 1558 for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
3abcdeda 1559 page_info = get_rx_page_info(adapter, rxo, tail);
6b7c5b94
SP
1560 put_page(page_info->page);
1561 memset(page_info, 0, sizeof(*page_info));
1562 }
1563 BUG_ON(atomic_read(&rxq->used));
1564}
1565
a8e9179a 1566static void be_tx_compl_clean(struct be_adapter *adapter)
6b7c5b94 1567{
a8e9179a 1568 struct be_queue_info *tx_cq = &adapter->tx_obj.cq;
6b7c5b94 1569 struct be_queue_info *txq = &adapter->tx_obj.q;
a8e9179a 1570 struct be_eth_tx_compl *txcp;
4d586b82 1571 u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
b03388d6
SP
1572 struct sk_buff **sent_skbs = adapter->tx_obj.sent_skb_list;
1573 struct sk_buff *sent_skb;
1574 bool dummy_wrb;
a8e9179a
SP
1575
1576 /* Wait for a max of 200ms for all the tx-completions to arrive. */
1577 do {
1578 while ((txcp = be_tx_compl_get(tx_cq))) {
1579 end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl,
1580 wrb_index, txcp);
4d586b82 1581 num_wrbs += be_tx_compl_process(adapter, end_idx);
a8e9179a
SP
1582 cmpl++;
1583 }
1584 if (cmpl) {
1585 be_cq_notify(adapter, tx_cq->id, false, cmpl);
4d586b82 1586 atomic_sub(num_wrbs, &txq->used);
a8e9179a 1587 cmpl = 0;
4d586b82 1588 num_wrbs = 0;
a8e9179a
SP
1589 }
1590
1591 if (atomic_read(&txq->used) == 0 || ++timeo > 200)
1592 break;
1593
1594 mdelay(1);
1595 } while (true);
1596
1597 if (atomic_read(&txq->used))
1598 dev_err(&adapter->pdev->dev, "%d pending tx-completions\n",
1599 atomic_read(&txq->used));
b03388d6
SP
1600
1601 /* free posted tx for which compls will never arrive */
1602 while (atomic_read(&txq->used)) {
1603 sent_skb = sent_skbs[txq->tail];
1604 end_idx = txq->tail;
1605 index_adv(&end_idx,
fe6d2a38
SP
1606 wrb_cnt_for_skb(adapter, sent_skb, &dummy_wrb) - 1,
1607 txq->len);
4d586b82
PR
1608 num_wrbs = be_tx_compl_process(adapter, end_idx);
1609 atomic_sub(num_wrbs, &txq->used);
b03388d6 1610 }
6b7c5b94
SP
1611}
1612
5fb379ee
SP
1613static void be_mcc_queues_destroy(struct be_adapter *adapter)
1614{
1615 struct be_queue_info *q;
5fb379ee 1616
8788fdc2 1617 q = &adapter->mcc_obj.q;
5fb379ee 1618 if (q->created)
8788fdc2 1619 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
5fb379ee
SP
1620 be_queue_free(adapter, q);
1621
8788fdc2 1622 q = &adapter->mcc_obj.cq;
5fb379ee 1623 if (q->created)
8788fdc2 1624 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
5fb379ee
SP
1625 be_queue_free(adapter, q);
1626}
1627
1628/* Must be called only after TX qs are created as MCC shares TX EQ */
1629static int be_mcc_queues_create(struct be_adapter *adapter)
1630{
1631 struct be_queue_info *q, *cq;
5fb379ee
SP
1632
1633 /* Alloc MCC compl queue */
8788fdc2 1634 cq = &adapter->mcc_obj.cq;
5fb379ee 1635 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
efd2e40a 1636 sizeof(struct be_mcc_compl)))
5fb379ee
SP
1637 goto err;
1638
1639 /* Ask BE to create MCC compl queue; share TX's eq */
8788fdc2 1640 if (be_cmd_cq_create(adapter, cq, &adapter->tx_eq.q, false, true, 0))
5fb379ee
SP
1641 goto mcc_cq_free;
1642
1643 /* Alloc MCC queue */
8788fdc2 1644 q = &adapter->mcc_obj.q;
5fb379ee
SP
1645 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
1646 goto mcc_cq_destroy;
1647
1648 /* Ask BE to create MCC queue */
8788fdc2 1649 if (be_cmd_mccq_create(adapter, q, cq))
5fb379ee
SP
1650 goto mcc_q_free;
1651
1652 return 0;
1653
1654mcc_q_free:
1655 be_queue_free(adapter, q);
1656mcc_cq_destroy:
8788fdc2 1657 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
5fb379ee
SP
1658mcc_cq_free:
1659 be_queue_free(adapter, cq);
1660err:
1661 return -1;
1662}
1663
6b7c5b94
SP
1664static void be_tx_queues_destroy(struct be_adapter *adapter)
1665{
1666 struct be_queue_info *q;
1667
1668 q = &adapter->tx_obj.q;
a8e9179a 1669 if (q->created)
8788fdc2 1670 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
6b7c5b94
SP
1671 be_queue_free(adapter, q);
1672
1673 q = &adapter->tx_obj.cq;
1674 if (q->created)
8788fdc2 1675 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
6b7c5b94
SP
1676 be_queue_free(adapter, q);
1677
859b1e4e
SP
1678 /* Clear any residual events */
1679 be_eq_clean(adapter, &adapter->tx_eq);
1680
6b7c5b94
SP
1681 q = &adapter->tx_eq.q;
1682 if (q->created)
8788fdc2 1683 be_cmd_q_destroy(adapter, q, QTYPE_EQ);
6b7c5b94
SP
1684 be_queue_free(adapter, q);
1685}
1686
1687static int be_tx_queues_create(struct be_adapter *adapter)
1688{
1689 struct be_queue_info *eq, *q, *cq;
1690
1691 adapter->tx_eq.max_eqd = 0;
1692 adapter->tx_eq.min_eqd = 0;
1693 adapter->tx_eq.cur_eqd = 96;
1694 adapter->tx_eq.enable_aic = false;
1695 /* Alloc Tx Event queue */
1696 eq = &adapter->tx_eq.q;
1697 if (be_queue_alloc(adapter, eq, EVNT_Q_LEN, sizeof(struct be_eq_entry)))
1698 return -1;
1699
1700 /* Ask BE to create Tx Event queue */
8788fdc2 1701 if (be_cmd_eq_create(adapter, eq, adapter->tx_eq.cur_eqd))
6b7c5b94 1702 goto tx_eq_free;
fe6d2a38 1703
ecd62107 1704 adapter->tx_eq.eq_idx = adapter->eq_next_idx++;
fe6d2a38 1705
ba343c77 1706
6b7c5b94
SP
1707 /* Alloc TX eth compl queue */
1708 cq = &adapter->tx_obj.cq;
1709 if (be_queue_alloc(adapter, cq, TX_CQ_LEN,
1710 sizeof(struct be_eth_tx_compl)))
1711 goto tx_eq_destroy;
1712
1713 /* Ask BE to create Tx eth compl queue */
8788fdc2 1714 if (be_cmd_cq_create(adapter, cq, eq, false, false, 3))
6b7c5b94
SP
1715 goto tx_cq_free;
1716
1717 /* Alloc TX eth queue */
1718 q = &adapter->tx_obj.q;
1719 if (be_queue_alloc(adapter, q, TX_Q_LEN, sizeof(struct be_eth_wrb)))
1720 goto tx_cq_destroy;
1721
1722 /* Ask BE to create Tx eth queue */
8788fdc2 1723 if (be_cmd_txq_create(adapter, q, cq))
6b7c5b94
SP
1724 goto tx_q_free;
1725 return 0;
1726
1727tx_q_free:
1728 be_queue_free(adapter, q);
1729tx_cq_destroy:
8788fdc2 1730 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
6b7c5b94
SP
1731tx_cq_free:
1732 be_queue_free(adapter, cq);
1733tx_eq_destroy:
8788fdc2 1734 be_cmd_q_destroy(adapter, eq, QTYPE_EQ);
6b7c5b94
SP
1735tx_eq_free:
1736 be_queue_free(adapter, eq);
1737 return -1;
1738}
1739
1740static void be_rx_queues_destroy(struct be_adapter *adapter)
1741{
1742 struct be_queue_info *q;
3abcdeda
SP
1743 struct be_rx_obj *rxo;
1744 int i;
1745
1746 for_all_rx_queues(adapter, rxo, i) {
1747 q = &rxo->q;
1748 if (q->created) {
1749 be_cmd_q_destroy(adapter, q, QTYPE_RXQ);
1750 /* After the rxq is invalidated, wait for a grace time
1751 * of 1ms for all dma to end and the flush compl to
1752 * arrive
1753 */
1754 mdelay(1);
1755 be_rx_q_clean(adapter, rxo);
1756 }
1757 be_queue_free(adapter, q);
1758
1759 q = &rxo->cq;
1760 if (q->created)
1761 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1762 be_queue_free(adapter, q);
1763
1764 /* Clear any residual events */
1765 q = &rxo->rx_eq.q;
1766 if (q->created) {
1767 be_eq_clean(adapter, &rxo->rx_eq);
1768 be_cmd_q_destroy(adapter, q, QTYPE_EQ);
1769 }
1770 be_queue_free(adapter, q);
6b7c5b94 1771 }
6b7c5b94
SP
1772}
1773
ac6a0c4a
SP
1774static u32 be_num_rxqs_want(struct be_adapter *adapter)
1775{
1776 if (multi_rxq && (adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
1777 !adapter->sriov_enabled && !(adapter->function_mode & 0x400)) {
1778 return 1 + MAX_RSS_QS; /* one default non-RSS queue */
1779 } else {
1780 dev_warn(&adapter->pdev->dev,
1781 "No support for multiple RX queues\n");
1782 return 1;
1783 }
1784}
1785
6b7c5b94
SP
1786static int be_rx_queues_create(struct be_adapter *adapter)
1787{
1788 struct be_queue_info *eq, *q, *cq;
3abcdeda
SP
1789 struct be_rx_obj *rxo;
1790 int rc, i;
6b7c5b94 1791
ac6a0c4a
SP
1792 adapter->num_rx_qs = min(be_num_rxqs_want(adapter),
1793 msix_enabled(adapter) ?
1794 adapter->num_msix_vec - 1 : 1);
1795 if (adapter->num_rx_qs != MAX_RX_QS)
1796 dev_warn(&adapter->pdev->dev,
1797 "Can create only %d RX queues", adapter->num_rx_qs);
1798
6b7c5b94 1799 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
3abcdeda
SP
1800 for_all_rx_queues(adapter, rxo, i) {
1801 rxo->adapter = adapter;
1802 rxo->rx_eq.max_eqd = BE_MAX_EQD;
1803 rxo->rx_eq.enable_aic = true;
1804
1805 /* EQ */
1806 eq = &rxo->rx_eq.q;
1807 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1808 sizeof(struct be_eq_entry));
1809 if (rc)
1810 goto err;
1811
1812 rc = be_cmd_eq_create(adapter, eq, rxo->rx_eq.cur_eqd);
1813 if (rc)
1814 goto err;
1815
ecd62107 1816 rxo->rx_eq.eq_idx = adapter->eq_next_idx++;
fe6d2a38 1817
3abcdeda
SP
1818 /* CQ */
1819 cq = &rxo->cq;
1820 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
1821 sizeof(struct be_eth_rx_compl));
1822 if (rc)
1823 goto err;
1824
1825 rc = be_cmd_cq_create(adapter, cq, eq, false, false, 3);
1826 if (rc)
1827 goto err;
3abcdeda
SP
1828 /* Rx Q */
1829 q = &rxo->q;
1830 rc = be_queue_alloc(adapter, q, RX_Q_LEN,
1831 sizeof(struct be_eth_rx_d));
1832 if (rc)
1833 goto err;
1834
1835 rc = be_cmd_rxq_create(adapter, q, cq->id, rx_frag_size,
1836 BE_MAX_JUMBO_FRAME_SIZE, adapter->if_handle,
1837 (i > 0) ? 1 : 0/* rss enable */, &rxo->rss_id);
1838 if (rc)
1839 goto err;
1840 }
1841
1842 if (be_multi_rxq(adapter)) {
1843 u8 rsstable[MAX_RSS_QS];
1844
1845 for_all_rss_queues(adapter, rxo, i)
1846 rsstable[i] = rxo->rss_id;
1847
1848 rc = be_cmd_rss_config(adapter, rsstable,
1849 adapter->num_rx_qs - 1);
1850 if (rc)
1851 goto err;
1852 }
6b7c5b94
SP
1853
1854 return 0;
3abcdeda
SP
1855err:
1856 be_rx_queues_destroy(adapter);
1857 return -1;
6b7c5b94 1858}
6b7c5b94 1859
fe6d2a38 1860static bool event_peek(struct be_eq_obj *eq_obj)
b628bde2 1861{
fe6d2a38
SP
1862 struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
1863 if (!eqe->evt)
1864 return false;
1865 else
1866 return true;
b628bde2
SP
1867}
1868
6b7c5b94
SP
1869static irqreturn_t be_intx(int irq, void *dev)
1870{
1871 struct be_adapter *adapter = dev;
3abcdeda 1872 struct be_rx_obj *rxo;
fe6d2a38 1873 int isr, i, tx = 0 , rx = 0;
6b7c5b94 1874
fe6d2a38
SP
1875 if (lancer_chip(adapter)) {
1876 if (event_peek(&adapter->tx_eq))
1877 tx = event_handle(adapter, &adapter->tx_eq);
1878 for_all_rx_queues(adapter, rxo, i) {
1879 if (event_peek(&rxo->rx_eq))
1880 rx |= event_handle(adapter, &rxo->rx_eq);
1881 }
6b7c5b94 1882
fe6d2a38
SP
1883 if (!(tx || rx))
1884 return IRQ_NONE;
3abcdeda 1885
fe6d2a38
SP
1886 } else {
1887 isr = ioread32(adapter->csr + CEV_ISR0_OFFSET +
1888 (adapter->tx_eq.q.id / 8) * CEV_ISR_SIZE);
1889 if (!isr)
1890 return IRQ_NONE;
1891
ecd62107 1892 if ((1 << adapter->tx_eq.eq_idx & isr))
fe6d2a38
SP
1893 event_handle(adapter, &adapter->tx_eq);
1894
1895 for_all_rx_queues(adapter, rxo, i) {
ecd62107 1896 if ((1 << rxo->rx_eq.eq_idx & isr))
fe6d2a38
SP
1897 event_handle(adapter, &rxo->rx_eq);
1898 }
3abcdeda 1899 }
c001c213 1900
8788fdc2 1901 return IRQ_HANDLED;
6b7c5b94
SP
1902}
1903
1904static irqreturn_t be_msix_rx(int irq, void *dev)
1905{
3abcdeda
SP
1906 struct be_rx_obj *rxo = dev;
1907 struct be_adapter *adapter = rxo->adapter;
6b7c5b94 1908
3abcdeda 1909 event_handle(adapter, &rxo->rx_eq);
6b7c5b94
SP
1910
1911 return IRQ_HANDLED;
1912}
1913
5fb379ee 1914static irqreturn_t be_msix_tx_mcc(int irq, void *dev)
6b7c5b94
SP
1915{
1916 struct be_adapter *adapter = dev;
1917
8788fdc2 1918 event_handle(adapter, &adapter->tx_eq);
6b7c5b94
SP
1919
1920 return IRQ_HANDLED;
1921}
1922
2e588f84 1923static inline bool do_gro(struct be_rx_compl_info *rxcp)
6b7c5b94 1924{
2e588f84 1925 return (rxcp->tcpf && !rxcp->err) ? true : false;
6b7c5b94
SP
1926}
1927
49b05221 1928static int be_poll_rx(struct napi_struct *napi, int budget)
6b7c5b94
SP
1929{
1930 struct be_eq_obj *rx_eq = container_of(napi, struct be_eq_obj, napi);
3abcdeda
SP
1931 struct be_rx_obj *rxo = container_of(rx_eq, struct be_rx_obj, rx_eq);
1932 struct be_adapter *adapter = rxo->adapter;
1933 struct be_queue_info *rx_cq = &rxo->cq;
2e588f84 1934 struct be_rx_compl_info *rxcp;
6b7c5b94
SP
1935 u32 work_done;
1936
3abcdeda 1937 rxo->stats.rx_polls++;
6b7c5b94 1938 for (work_done = 0; work_done < budget; work_done++) {
3abcdeda 1939 rxcp = be_rx_compl_get(rxo);
6b7c5b94
SP
1940 if (!rxcp)
1941 break;
1942
e80d9da6 1943 /* Ignore flush completions */
009dd872 1944 if (rxcp->num_rcvd && rxcp->pkt_size) {
2e588f84 1945 if (do_gro(rxcp))
64642811
SP
1946 be_rx_compl_process_gro(adapter, rxo, rxcp);
1947 else
1948 be_rx_compl_process(adapter, rxo, rxcp);
009dd872
PR
1949 } else if (rxcp->pkt_size == 0) {
1950 be_rx_compl_discard(adapter, rxo, rxcp);
64642811 1951 }
009dd872 1952
2e588f84 1953 be_rx_stats_update(rxo, rxcp);
6b7c5b94
SP
1954 }
1955
6b7c5b94 1956 /* Refill the queue */
3abcdeda 1957 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
1829b086 1958 be_post_rx_frags(rxo, GFP_ATOMIC);
6b7c5b94
SP
1959
1960 /* All consumed */
1961 if (work_done < budget) {
1962 napi_complete(napi);
8788fdc2 1963 be_cq_notify(adapter, rx_cq->id, true, work_done);
6b7c5b94
SP
1964 } else {
1965 /* More to be consumed; continue with interrupts disabled */
8788fdc2 1966 be_cq_notify(adapter, rx_cq->id, false, work_done);
6b7c5b94
SP
1967 }
1968 return work_done;
1969}
1970
f31e50a8
SP
1971/* As TX and MCC share the same EQ check for both TX and MCC completions.
1972 * For TX/MCC we don't honour budget; consume everything
1973 */
1974static int be_poll_tx_mcc(struct napi_struct *napi, int budget)
6b7c5b94 1975{
f31e50a8
SP
1976 struct be_eq_obj *tx_eq = container_of(napi, struct be_eq_obj, napi);
1977 struct be_adapter *adapter =
1978 container_of(tx_eq, struct be_adapter, tx_eq);
5fb379ee
SP
1979 struct be_queue_info *txq = &adapter->tx_obj.q;
1980 struct be_queue_info *tx_cq = &adapter->tx_obj.cq;
6b7c5b94 1981 struct be_eth_tx_compl *txcp;
f31e50a8 1982 int tx_compl = 0, mcc_compl, status = 0;
4d586b82 1983 u16 end_idx, num_wrbs = 0;
6b7c5b94 1984
5fb379ee 1985 while ((txcp = be_tx_compl_get(tx_cq))) {
6b7c5b94 1986 end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl,
f31e50a8 1987 wrb_index, txcp);
4d586b82 1988 num_wrbs += be_tx_compl_process(adapter, end_idx);
f31e50a8 1989 tx_compl++;
6b7c5b94
SP
1990 }
1991
f31e50a8
SP
1992 mcc_compl = be_process_mcc(adapter, &status);
1993
1994 napi_complete(napi);
1995
1996 if (mcc_compl) {
1997 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
1998 be_cq_notify(adapter, mcc_obj->cq.id, true, mcc_compl);
1999 }
2000
2001 if (tx_compl) {
2002 be_cq_notify(adapter, adapter->tx_obj.cq.id, true, tx_compl);
5fb379ee 2003
4d586b82
PR
2004 atomic_sub(num_wrbs, &txq->used);
2005
5fb379ee
SP
2006 /* As Tx wrbs have been freed up, wake up netdev queue if
2007 * it was stopped due to lack of tx wrbs.
2008 */
2009 if (netif_queue_stopped(adapter->netdev) &&
6b7c5b94 2010 atomic_read(&txq->used) < txq->len / 2) {
5fb379ee
SP
2011 netif_wake_queue(adapter->netdev);
2012 }
2013
3abcdeda
SP
2014 tx_stats(adapter)->be_tx_events++;
2015 tx_stats(adapter)->be_tx_compl += tx_compl;
6b7c5b94 2016 }
6b7c5b94
SP
2017
2018 return 1;
2019}
2020
d053de91 2021void be_detect_dump_ue(struct be_adapter *adapter)
7c185276
AK
2022{
2023 u32 ue_status_lo, ue_status_hi, ue_status_lo_mask, ue_status_hi_mask;
2024 u32 i;
2025
2026 pci_read_config_dword(adapter->pdev,
2027 PCICFG_UE_STATUS_LOW, &ue_status_lo);
2028 pci_read_config_dword(adapter->pdev,
2029 PCICFG_UE_STATUS_HIGH, &ue_status_hi);
2030 pci_read_config_dword(adapter->pdev,
2031 PCICFG_UE_STATUS_LOW_MASK, &ue_status_lo_mask);
2032 pci_read_config_dword(adapter->pdev,
2033 PCICFG_UE_STATUS_HI_MASK, &ue_status_hi_mask);
2034
2035 ue_status_lo = (ue_status_lo & (~ue_status_lo_mask));
2036 ue_status_hi = (ue_status_hi & (~ue_status_hi_mask));
2037
d053de91
AK
2038 if (ue_status_lo || ue_status_hi) {
2039 adapter->ue_detected = true;
7acc2087 2040 adapter->eeh_err = true;
d053de91
AK
2041 dev_err(&adapter->pdev->dev, "UE Detected!!\n");
2042 }
2043
7c185276
AK
2044 if (ue_status_lo) {
2045 for (i = 0; ue_status_lo; ue_status_lo >>= 1, i++) {
2046 if (ue_status_lo & 1)
2047 dev_err(&adapter->pdev->dev,
2048 "UE: %s bit set\n", ue_status_low_desc[i]);
2049 }
2050 }
2051 if (ue_status_hi) {
2052 for (i = 0; ue_status_hi; ue_status_hi >>= 1, i++) {
2053 if (ue_status_hi & 1)
2054 dev_err(&adapter->pdev->dev,
2055 "UE: %s bit set\n", ue_status_hi_desc[i]);
2056 }
2057 }
2058
2059}
2060
ea1dae11
SP
2061static void be_worker(struct work_struct *work)
2062{
2063 struct be_adapter *adapter =
2064 container_of(work, struct be_adapter, work.work);
3abcdeda
SP
2065 struct be_rx_obj *rxo;
2066 int i;
ea1dae11 2067
16da8250
SP
2068 if (!adapter->ue_detected && !lancer_chip(adapter))
2069 be_detect_dump_ue(adapter);
2070
f203af70
SK
2071 /* when interrupts are not yet enabled, just reap any pending
2072 * mcc completions */
2073 if (!netif_running(adapter->netdev)) {
2074 int mcc_compl, status = 0;
2075
2076 mcc_compl = be_process_mcc(adapter, &status);
2077
2078 if (mcc_compl) {
2079 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
2080 be_cq_notify(adapter, mcc_obj->cq.id, false, mcc_compl);
2081 }
9b037f38 2082
f203af70
SK
2083 goto reschedule;
2084 }
2085
005d5696
SX
2086 if (!adapter->stats_cmd_sent) {
2087 if (lancer_chip(adapter))
2088 lancer_cmd_get_pport_stats(adapter,
2089 &adapter->stats_cmd);
2090 else
2091 be_cmd_get_stats(adapter, &adapter->stats_cmd);
2092 }
4097f663 2093 be_tx_rate_update(adapter);
4097f663 2094
3abcdeda
SP
2095 for_all_rx_queues(adapter, rxo, i) {
2096 be_rx_rate_update(rxo);
2097 be_rx_eqd_update(adapter, rxo);
2098
2099 if (rxo->rx_post_starved) {
2100 rxo->rx_post_starved = false;
1829b086 2101 be_post_rx_frags(rxo, GFP_KERNEL);
3abcdeda 2102 }
ea1dae11
SP
2103 }
2104
f203af70 2105reschedule:
e74fbd03 2106 adapter->work_counter++;
ea1dae11
SP
2107 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
2108}
2109
8d56ff11
SP
2110static void be_msix_disable(struct be_adapter *adapter)
2111{
ac6a0c4a 2112 if (msix_enabled(adapter)) {
8d56ff11 2113 pci_disable_msix(adapter->pdev);
ac6a0c4a 2114 adapter->num_msix_vec = 0;
3abcdeda
SP
2115 }
2116}
2117
6b7c5b94
SP
2118static void be_msix_enable(struct be_adapter *adapter)
2119{
3abcdeda 2120#define BE_MIN_MSIX_VECTORS (1 + 1) /* Rx + Tx */
ac6a0c4a 2121 int i, status, num_vec;
6b7c5b94 2122
ac6a0c4a 2123 num_vec = be_num_rxqs_want(adapter) + 1;
3abcdeda 2124
ac6a0c4a 2125 for (i = 0; i < num_vec; i++)
6b7c5b94
SP
2126 adapter->msix_entries[i].entry = i;
2127
ac6a0c4a 2128 status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
3abcdeda
SP
2129 if (status == 0) {
2130 goto done;
2131 } else if (status >= BE_MIN_MSIX_VECTORS) {
ac6a0c4a 2132 num_vec = status;
3abcdeda 2133 if (pci_enable_msix(adapter->pdev, adapter->msix_entries,
ac6a0c4a 2134 num_vec) == 0)
3abcdeda 2135 goto done;
3abcdeda
SP
2136 }
2137 return;
2138done:
ac6a0c4a
SP
2139 adapter->num_msix_vec = num_vec;
2140 return;
6b7c5b94
SP
2141}
2142
ba343c77
SB
2143static void be_sriov_enable(struct be_adapter *adapter)
2144{
344dbf10 2145 be_check_sriov_fn_type(adapter);
6dedec81 2146#ifdef CONFIG_PCI_IOV
ba343c77 2147 if (be_physfn(adapter) && num_vfs) {
81be8f0a
AK
2148 int status, pos;
2149 u16 nvfs;
2150
2151 pos = pci_find_ext_capability(adapter->pdev,
2152 PCI_EXT_CAP_ID_SRIOV);
2153 pci_read_config_word(adapter->pdev,
2154 pos + PCI_SRIOV_TOTAL_VF, &nvfs);
2155
2156 if (num_vfs > nvfs) {
2157 dev_info(&adapter->pdev->dev,
2158 "Device supports %d VFs and not %d\n",
2159 nvfs, num_vfs);
2160 num_vfs = nvfs;
2161 }
6dedec81 2162
ba343c77
SB
2163 status = pci_enable_sriov(adapter->pdev, num_vfs);
2164 adapter->sriov_enabled = status ? false : true;
2165 }
2166#endif
ba343c77
SB
2167}
2168
2169static void be_sriov_disable(struct be_adapter *adapter)
2170{
2171#ifdef CONFIG_PCI_IOV
2172 if (adapter->sriov_enabled) {
2173 pci_disable_sriov(adapter->pdev);
2174 adapter->sriov_enabled = false;
2175 }
2176#endif
2177}
2178
fe6d2a38
SP
2179static inline int be_msix_vec_get(struct be_adapter *adapter,
2180 struct be_eq_obj *eq_obj)
6b7c5b94 2181{
ecd62107 2182 return adapter->msix_entries[eq_obj->eq_idx].vector;
6b7c5b94
SP
2183}
2184
b628bde2
SP
2185static int be_request_irq(struct be_adapter *adapter,
2186 struct be_eq_obj *eq_obj,
3abcdeda 2187 void *handler, char *desc, void *context)
6b7c5b94
SP
2188{
2189 struct net_device *netdev = adapter->netdev;
b628bde2
SP
2190 int vec;
2191
2192 sprintf(eq_obj->desc, "%s-%s", netdev->name, desc);
fe6d2a38 2193 vec = be_msix_vec_get(adapter, eq_obj);
3abcdeda 2194 return request_irq(vec, handler, 0, eq_obj->desc, context);
b628bde2
SP
2195}
2196
3abcdeda
SP
2197static void be_free_irq(struct be_adapter *adapter, struct be_eq_obj *eq_obj,
2198 void *context)
b628bde2 2199{
fe6d2a38 2200 int vec = be_msix_vec_get(adapter, eq_obj);
3abcdeda 2201 free_irq(vec, context);
b628bde2 2202}
6b7c5b94 2203
b628bde2
SP
2204static int be_msix_register(struct be_adapter *adapter)
2205{
3abcdeda
SP
2206 struct be_rx_obj *rxo;
2207 int status, i;
2208 char qname[10];
b628bde2 2209
3abcdeda
SP
2210 status = be_request_irq(adapter, &adapter->tx_eq, be_msix_tx_mcc, "tx",
2211 adapter);
6b7c5b94
SP
2212 if (status)
2213 goto err;
2214
3abcdeda
SP
2215 for_all_rx_queues(adapter, rxo, i) {
2216 sprintf(qname, "rxq%d", i);
2217 status = be_request_irq(adapter, &rxo->rx_eq, be_msix_rx,
2218 qname, rxo);
2219 if (status)
2220 goto err_msix;
2221 }
b628bde2 2222
6b7c5b94 2223 return 0;
b628bde2 2224
3abcdeda
SP
2225err_msix:
2226 be_free_irq(adapter, &adapter->tx_eq, adapter);
2227
2228 for (i--, rxo = &adapter->rx_obj[i]; i >= 0; i--, rxo--)
2229 be_free_irq(adapter, &rxo->rx_eq, rxo);
2230
6b7c5b94
SP
2231err:
2232 dev_warn(&adapter->pdev->dev,
2233 "MSIX Request IRQ failed - err %d\n", status);
ac6a0c4a 2234 be_msix_disable(adapter);
6b7c5b94
SP
2235 return status;
2236}
2237
2238static int be_irq_register(struct be_adapter *adapter)
2239{
2240 struct net_device *netdev = adapter->netdev;
2241 int status;
2242
ac6a0c4a 2243 if (msix_enabled(adapter)) {
6b7c5b94
SP
2244 status = be_msix_register(adapter);
2245 if (status == 0)
2246 goto done;
ba343c77
SB
2247 /* INTx is not supported for VF */
2248 if (!be_physfn(adapter))
2249 return status;
6b7c5b94
SP
2250 }
2251
2252 /* INTx */
2253 netdev->irq = adapter->pdev->irq;
2254 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
2255 adapter);
2256 if (status) {
2257 dev_err(&adapter->pdev->dev,
2258 "INTx request IRQ failed - err %d\n", status);
2259 return status;
2260 }
2261done:
2262 adapter->isr_registered = true;
2263 return 0;
2264}
2265
2266static void be_irq_unregister(struct be_adapter *adapter)
2267{
2268 struct net_device *netdev = adapter->netdev;
3abcdeda
SP
2269 struct be_rx_obj *rxo;
2270 int i;
6b7c5b94
SP
2271
2272 if (!adapter->isr_registered)
2273 return;
2274
2275 /* INTx */
ac6a0c4a 2276 if (!msix_enabled(adapter)) {
6b7c5b94
SP
2277 free_irq(netdev->irq, adapter);
2278 goto done;
2279 }
2280
2281 /* MSIx */
3abcdeda
SP
2282 be_free_irq(adapter, &adapter->tx_eq, adapter);
2283
2284 for_all_rx_queues(adapter, rxo, i)
2285 be_free_irq(adapter, &rxo->rx_eq, rxo);
2286
6b7c5b94
SP
2287done:
2288 adapter->isr_registered = false;
6b7c5b94
SP
2289}
2290
889cd4b2
SP
2291static int be_close(struct net_device *netdev)
2292{
2293 struct be_adapter *adapter = netdev_priv(netdev);
3abcdeda 2294 struct be_rx_obj *rxo;
889cd4b2 2295 struct be_eq_obj *tx_eq = &adapter->tx_eq;
3abcdeda 2296 int vec, i;
889cd4b2 2297
889cd4b2
SP
2298 be_async_mcc_disable(adapter);
2299
889cd4b2
SP
2300 netif_carrier_off(netdev);
2301 adapter->link_up = false;
2302
fe6d2a38
SP
2303 if (!lancer_chip(adapter))
2304 be_intr_set(adapter, false);
889cd4b2 2305
63fcb27f
PR
2306 for_all_rx_queues(adapter, rxo, i)
2307 napi_disable(&rxo->rx_eq.napi);
2308
2309 napi_disable(&tx_eq->napi);
2310
2311 if (lancer_chip(adapter)) {
2312 be_cq_notify(adapter, adapter->tx_obj.cq.id, false, 0);
2313 be_cq_notify(adapter, adapter->mcc_obj.cq.id, false, 0);
2314 for_all_rx_queues(adapter, rxo, i)
2315 be_cq_notify(adapter, rxo->cq.id, false, 0);
2316 }
2317
ac6a0c4a 2318 if (msix_enabled(adapter)) {
fe6d2a38 2319 vec = be_msix_vec_get(adapter, tx_eq);
889cd4b2 2320 synchronize_irq(vec);
3abcdeda
SP
2321
2322 for_all_rx_queues(adapter, rxo, i) {
fe6d2a38 2323 vec = be_msix_vec_get(adapter, &rxo->rx_eq);
3abcdeda
SP
2324 synchronize_irq(vec);
2325 }
889cd4b2
SP
2326 } else {
2327 synchronize_irq(netdev->irq);
2328 }
2329 be_irq_unregister(adapter);
2330
889cd4b2
SP
2331 /* Wait for all pending tx completions to arrive so that
2332 * all tx skbs are freed.
2333 */
2334 be_tx_compl_clean(adapter);
2335
2336 return 0;
2337}
2338
6b7c5b94
SP
2339static int be_open(struct net_device *netdev)
2340{
2341 struct be_adapter *adapter = netdev_priv(netdev);
6b7c5b94 2342 struct be_eq_obj *tx_eq = &adapter->tx_eq;
3abcdeda 2343 struct be_rx_obj *rxo;
a8f447bd 2344 bool link_up;
3abcdeda 2345 int status, i;
0388f251
SB
2346 u8 mac_speed;
2347 u16 link_speed;
5fb379ee 2348
3abcdeda 2349 for_all_rx_queues(adapter, rxo, i) {
1829b086 2350 be_post_rx_frags(rxo, GFP_KERNEL);
3abcdeda
SP
2351 napi_enable(&rxo->rx_eq.napi);
2352 }
5fb379ee
SP
2353 napi_enable(&tx_eq->napi);
2354
2355 be_irq_register(adapter);
2356
fe6d2a38
SP
2357 if (!lancer_chip(adapter))
2358 be_intr_set(adapter, true);
5fb379ee
SP
2359
2360 /* The evt queues are created in unarmed state; arm them */
3abcdeda
SP
2361 for_all_rx_queues(adapter, rxo, i) {
2362 be_eq_notify(adapter, rxo->rx_eq.q.id, true, false, 0);
2363 be_cq_notify(adapter, rxo->cq.id, true, 0);
2364 }
8788fdc2 2365 be_eq_notify(adapter, tx_eq->q.id, true, false, 0);
5fb379ee 2366
7a1e9b20
SP
2367 /* Now that interrupts are on we can process async mcc */
2368 be_async_mcc_enable(adapter);
2369
0388f251 2370 status = be_cmd_link_status_query(adapter, &link_up, &mac_speed,
187e8756 2371 &link_speed, 0);
a8f447bd 2372 if (status)
889cd4b2 2373 goto err;
a8f447bd 2374 be_link_status_update(adapter, link_up);
5fb379ee 2375
889cd4b2 2376 if (be_physfn(adapter)) {
1da87b7f 2377 status = be_vid_config(adapter, false, 0);
889cd4b2
SP
2378 if (status)
2379 goto err;
4f2aa89c 2380
ba343c77
SB
2381 status = be_cmd_set_flow_control(adapter,
2382 adapter->tx_fc, adapter->rx_fc);
2383 if (status)
889cd4b2 2384 goto err;
ba343c77 2385 }
4f2aa89c 2386
889cd4b2
SP
2387 return 0;
2388err:
2389 be_close(adapter->netdev);
2390 return -EIO;
5fb379ee
SP
2391}
2392
71d8d1b5
AK
2393static int be_setup_wol(struct be_adapter *adapter, bool enable)
2394{
2395 struct be_dma_mem cmd;
2396 int status = 0;
2397 u8 mac[ETH_ALEN];
2398
2399 memset(mac, 0, ETH_ALEN);
2400
2401 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
2b7bcebf
IV
2402 cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2403 GFP_KERNEL);
71d8d1b5
AK
2404 if (cmd.va == NULL)
2405 return -1;
2406 memset(cmd.va, 0, cmd.size);
2407
2408 if (enable) {
2409 status = pci_write_config_dword(adapter->pdev,
2410 PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2411 if (status) {
2412 dev_err(&adapter->pdev->dev,
2381a55c 2413 "Could not enable Wake-on-lan\n");
2b7bcebf
IV
2414 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2415 cmd.dma);
71d8d1b5
AK
2416 return status;
2417 }
2418 status = be_cmd_enable_magic_wol(adapter,
2419 adapter->netdev->dev_addr, &cmd);
2420 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2421 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2422 } else {
2423 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2424 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2425 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2426 }
2427
2b7bcebf 2428 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
71d8d1b5
AK
2429 return status;
2430}
2431
6d87f5c3
AK
2432/*
2433 * Generate a seed MAC address from the PF MAC Address using jhash.
2434 * MAC Address for VFs are assigned incrementally starting from the seed.
2435 * These addresses are programmed in the ASIC by the PF and the VF driver
2436 * queries for the MAC address during its probe.
2437 */
2438static inline int be_vf_eth_addr_config(struct be_adapter *adapter)
2439{
2440 u32 vf = 0;
3abcdeda 2441 int status = 0;
6d87f5c3
AK
2442 u8 mac[ETH_ALEN];
2443
2444 be_vf_eth_addr_generate(adapter, mac);
2445
2446 for (vf = 0; vf < num_vfs; vf++) {
2447 status = be_cmd_pmac_add(adapter, mac,
2448 adapter->vf_cfg[vf].vf_if_handle,
f8617e08
AK
2449 &adapter->vf_cfg[vf].vf_pmac_id,
2450 vf + 1);
6d87f5c3
AK
2451 if (status)
2452 dev_err(&adapter->pdev->dev,
2453 "Mac address add failed for VF %d\n", vf);
2454 else
2455 memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
2456
2457 mac[5] += 1;
2458 }
2459 return status;
2460}
2461
2462static inline void be_vf_eth_addr_rem(struct be_adapter *adapter)
2463{
2464 u32 vf;
2465
2466 for (vf = 0; vf < num_vfs; vf++) {
2467 if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID)
2468 be_cmd_pmac_del(adapter,
2469 adapter->vf_cfg[vf].vf_if_handle,
f8617e08 2470 adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
6d87f5c3
AK
2471 }
2472}
2473
5fb379ee
SP
2474static int be_setup(struct be_adapter *adapter)
2475{
5fb379ee 2476 struct net_device *netdev = adapter->netdev;
ba343c77 2477 u32 cap_flags, en_flags, vf = 0;
6b7c5b94 2478 int status;
ba343c77
SB
2479 u8 mac[ETH_ALEN];
2480
f21b538c
PR
2481 cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED |
2482 BE_IF_FLAGS_BROADCAST |
2483 BE_IF_FLAGS_MULTICAST;
6b7c5b94 2484
ba343c77
SB
2485 if (be_physfn(adapter)) {
2486 cap_flags |= BE_IF_FLAGS_MCAST_PROMISCUOUS |
2487 BE_IF_FLAGS_PROMISCUOUS |
2488 BE_IF_FLAGS_PASS_L3L4_ERRORS;
2489 en_flags |= BE_IF_FLAGS_PASS_L3L4_ERRORS;
3abcdeda 2490
ac6a0c4a 2491 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS) {
3abcdeda
SP
2492 cap_flags |= BE_IF_FLAGS_RSS;
2493 en_flags |= BE_IF_FLAGS_RSS;
2494 }
ba343c77 2495 }
73d540f2
SP
2496
2497 status = be_cmd_if_create(adapter, cap_flags, en_flags,
2498 netdev->dev_addr, false/* pmac_invalid */,
ba343c77 2499 &adapter->if_handle, &adapter->pmac_id, 0);
6b7c5b94
SP
2500 if (status != 0)
2501 goto do_none;
2502
ba343c77 2503 if (be_physfn(adapter)) {
c99ac3e7
AK
2504 if (adapter->sriov_enabled) {
2505 while (vf < num_vfs) {
2506 cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED |
2507 BE_IF_FLAGS_BROADCAST;
2508 status = be_cmd_if_create(adapter, cap_flags,
2509 en_flags, mac, true,
64600ea5 2510 &adapter->vf_cfg[vf].vf_if_handle,
ba343c77 2511 NULL, vf+1);
c99ac3e7
AK
2512 if (status) {
2513 dev_err(&adapter->pdev->dev,
2514 "Interface Create failed for VF %d\n",
2515 vf);
2516 goto if_destroy;
2517 }
2518 adapter->vf_cfg[vf].vf_pmac_id =
2519 BE_INVALID_PMAC_ID;
2520 vf++;
ba343c77 2521 }
84e5b9f7 2522 }
c99ac3e7 2523 } else {
ba343c77
SB
2524 status = be_cmd_mac_addr_query(adapter, mac,
2525 MAC_ADDRESS_TYPE_NETWORK, false, adapter->if_handle);
2526 if (!status) {
2527 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2528 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2529 }
2530 }
2531
6b7c5b94
SP
2532 status = be_tx_queues_create(adapter);
2533 if (status != 0)
2534 goto if_destroy;
2535
2536 status = be_rx_queues_create(adapter);
2537 if (status != 0)
2538 goto tx_qs_destroy;
2539
5fb379ee
SP
2540 status = be_mcc_queues_create(adapter);
2541 if (status != 0)
2542 goto rx_qs_destroy;
6b7c5b94 2543
0dffc83e
AK
2544 adapter->link_speed = -1;
2545
6b7c5b94
SP
2546 return 0;
2547
5fb379ee
SP
2548rx_qs_destroy:
2549 be_rx_queues_destroy(adapter);
6b7c5b94
SP
2550tx_qs_destroy:
2551 be_tx_queues_destroy(adapter);
2552if_destroy:
c99ac3e7
AK
2553 if (be_physfn(adapter) && adapter->sriov_enabled)
2554 for (vf = 0; vf < num_vfs; vf++)
2555 if (adapter->vf_cfg[vf].vf_if_handle)
2556 be_cmd_if_destroy(adapter,
658681f7
AK
2557 adapter->vf_cfg[vf].vf_if_handle,
2558 vf + 1);
2559 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
6b7c5b94
SP
2560do_none:
2561 return status;
2562}
2563
5fb379ee
SP
2564static int be_clear(struct be_adapter *adapter)
2565{
7ab8b0b4
AK
2566 int vf;
2567
c99ac3e7 2568 if (be_physfn(adapter) && adapter->sriov_enabled)
6d87f5c3
AK
2569 be_vf_eth_addr_rem(adapter);
2570
1a8887d8 2571 be_mcc_queues_destroy(adapter);
5fb379ee
SP
2572 be_rx_queues_destroy(adapter);
2573 be_tx_queues_destroy(adapter);
1f5db833 2574 adapter->eq_next_idx = 0;
5fb379ee 2575
7ab8b0b4
AK
2576 if (be_physfn(adapter) && adapter->sriov_enabled)
2577 for (vf = 0; vf < num_vfs; vf++)
2578 if (adapter->vf_cfg[vf].vf_if_handle)
2579 be_cmd_if_destroy(adapter,
2580 adapter->vf_cfg[vf].vf_if_handle,
2581 vf + 1);
2582
658681f7 2583 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
5fb379ee 2584
2243e2e9
SP
2585 /* tell fw we're done with firing cmds */
2586 be_cmd_fw_clean(adapter);
5fb379ee
SP
2587 return 0;
2588}
2589
6b7c5b94 2590
84517482 2591#define FW_FILE_HDR_SIGN "ServerEngines Corp. "
fa9a6fed 2592static bool be_flash_redboot(struct be_adapter *adapter,
3f0d4560
AK
2593 const u8 *p, u32 img_start, int image_size,
2594 int hdr_size)
fa9a6fed
SB
2595{
2596 u32 crc_offset;
2597 u8 flashed_crc[4];
2598 int status;
3f0d4560
AK
2599
2600 crc_offset = hdr_size + img_start + image_size - 4;
2601
fa9a6fed 2602 p += crc_offset;
3f0d4560
AK
2603
2604 status = be_cmd_get_flash_crc(adapter, flashed_crc,
f510fc64 2605 (image_size - 4));
fa9a6fed
SB
2606 if (status) {
2607 dev_err(&adapter->pdev->dev,
2608 "could not get crc from flash, not flashing redboot\n");
2609 return false;
2610 }
2611
2612 /*update redboot only if crc does not match*/
2613 if (!memcmp(flashed_crc, p, 4))
2614 return false;
2615 else
2616 return true;
fa9a6fed
SB
2617}
2618
3f0d4560 2619static int be_flash_data(struct be_adapter *adapter,
84517482 2620 const struct firmware *fw,
3f0d4560
AK
2621 struct be_dma_mem *flash_cmd, int num_of_images)
2622
84517482 2623{
3f0d4560
AK
2624 int status = 0, i, filehdr_size = 0;
2625 u32 total_bytes = 0, flash_op;
84517482
AK
2626 int num_bytes;
2627 const u8 *p = fw->data;
2628 struct be_cmd_write_flashrom *req = flash_cmd->va;
215faf9c 2629 const struct flash_comp *pflashcomp;
9fe96934 2630 int num_comp;
3f0d4560 2631
215faf9c 2632 static const struct flash_comp gen3_flash_types[9] = {
3f0d4560
AK
2633 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, IMG_TYPE_ISCSI_ACTIVE,
2634 FLASH_IMAGE_MAX_SIZE_g3},
2635 { FLASH_REDBOOT_START_g3, IMG_TYPE_REDBOOT,
2636 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3},
2637 { FLASH_iSCSI_BIOS_START_g3, IMG_TYPE_BIOS,
2638 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2639 { FLASH_PXE_BIOS_START_g3, IMG_TYPE_PXE_BIOS,
2640 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2641 { FLASH_FCoE_BIOS_START_g3, IMG_TYPE_FCOE_BIOS,
2642 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2643 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, IMG_TYPE_ISCSI_BACKUP,
2644 FLASH_IMAGE_MAX_SIZE_g3},
2645 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, IMG_TYPE_FCOE_FW_ACTIVE,
2646 FLASH_IMAGE_MAX_SIZE_g3},
2647 { FLASH_FCoE_BACKUP_IMAGE_START_g3, IMG_TYPE_FCOE_FW_BACKUP,
9fe96934
SB
2648 FLASH_IMAGE_MAX_SIZE_g3},
2649 { FLASH_NCSI_START_g3, IMG_TYPE_NCSI_FW,
2650 FLASH_NCSI_IMAGE_MAX_SIZE_g3}
3f0d4560 2651 };
215faf9c 2652 static const struct flash_comp gen2_flash_types[8] = {
3f0d4560
AK
2653 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, IMG_TYPE_ISCSI_ACTIVE,
2654 FLASH_IMAGE_MAX_SIZE_g2},
2655 { FLASH_REDBOOT_START_g2, IMG_TYPE_REDBOOT,
2656 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2},
2657 { FLASH_iSCSI_BIOS_START_g2, IMG_TYPE_BIOS,
2658 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2659 { FLASH_PXE_BIOS_START_g2, IMG_TYPE_PXE_BIOS,
2660 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2661 { FLASH_FCoE_BIOS_START_g2, IMG_TYPE_FCOE_BIOS,
2662 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2663 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, IMG_TYPE_ISCSI_BACKUP,
2664 FLASH_IMAGE_MAX_SIZE_g2},
2665 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, IMG_TYPE_FCOE_FW_ACTIVE,
2666 FLASH_IMAGE_MAX_SIZE_g2},
2667 { FLASH_FCoE_BACKUP_IMAGE_START_g2, IMG_TYPE_FCOE_FW_BACKUP,
2668 FLASH_IMAGE_MAX_SIZE_g2}
2669 };
2670
2671 if (adapter->generation == BE_GEN3) {
2672 pflashcomp = gen3_flash_types;
2673 filehdr_size = sizeof(struct flash_file_hdr_g3);
215faf9c 2674 num_comp = ARRAY_SIZE(gen3_flash_types);
3f0d4560
AK
2675 } else {
2676 pflashcomp = gen2_flash_types;
2677 filehdr_size = sizeof(struct flash_file_hdr_g2);
215faf9c 2678 num_comp = ARRAY_SIZE(gen2_flash_types);
84517482 2679 }
9fe96934
SB
2680 for (i = 0; i < num_comp; i++) {
2681 if ((pflashcomp[i].optype == IMG_TYPE_NCSI_FW) &&
2682 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
2683 continue;
3f0d4560
AK
2684 if ((pflashcomp[i].optype == IMG_TYPE_REDBOOT) &&
2685 (!be_flash_redboot(adapter, fw->data,
fae21a4d
AK
2686 pflashcomp[i].offset, pflashcomp[i].size, filehdr_size +
2687 (num_of_images * sizeof(struct image_hdr)))))
3f0d4560
AK
2688 continue;
2689 p = fw->data;
2690 p += filehdr_size + pflashcomp[i].offset
2691 + (num_of_images * sizeof(struct image_hdr));
2692 if (p + pflashcomp[i].size > fw->data + fw->size)
84517482 2693 return -1;
3f0d4560
AK
2694 total_bytes = pflashcomp[i].size;
2695 while (total_bytes) {
2696 if (total_bytes > 32*1024)
2697 num_bytes = 32*1024;
2698 else
2699 num_bytes = total_bytes;
2700 total_bytes -= num_bytes;
2701
2702 if (!total_bytes)
2703 flash_op = FLASHROM_OPER_FLASH;
2704 else
2705 flash_op = FLASHROM_OPER_SAVE;
2706 memcpy(req->params.data_buf, p, num_bytes);
2707 p += num_bytes;
2708 status = be_cmd_write_flashrom(adapter, flash_cmd,
2709 pflashcomp[i].optype, flash_op, num_bytes);
2710 if (status) {
2711 dev_err(&adapter->pdev->dev,
2712 "cmd to write to flash rom failed.\n");
2713 return -1;
2714 }
2715 yield();
84517482 2716 }
84517482 2717 }
84517482
AK
2718 return 0;
2719}
2720
3f0d4560
AK
2721static int get_ufigen_type(struct flash_file_hdr_g2 *fhdr)
2722{
2723 if (fhdr == NULL)
2724 return 0;
2725 if (fhdr->build[0] == '3')
2726 return BE_GEN3;
2727 else if (fhdr->build[0] == '2')
2728 return BE_GEN2;
2729 else
2730 return 0;
2731}
2732
84517482
AK
2733int be_load_fw(struct be_adapter *adapter, u8 *func)
2734{
2735 char fw_file[ETHTOOL_FLASH_MAX_FILENAME];
2736 const struct firmware *fw;
3f0d4560
AK
2737 struct flash_file_hdr_g2 *fhdr;
2738 struct flash_file_hdr_g3 *fhdr3;
2739 struct image_hdr *img_hdr_ptr = NULL;
84517482 2740 struct be_dma_mem flash_cmd;
8b93b710 2741 int status, i = 0, num_imgs = 0;
84517482 2742 const u8 *p;
84517482 2743
d9efd2af
SB
2744 if (!netif_running(adapter->netdev)) {
2745 dev_err(&adapter->pdev->dev,
2746 "Firmware load not allowed (interface is down)\n");
2747 return -EPERM;
2748 }
2749
84517482
AK
2750 strcpy(fw_file, func);
2751
2752 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
2753 if (status)
2754 goto fw_exit;
2755
2756 p = fw->data;
3f0d4560 2757 fhdr = (struct flash_file_hdr_g2 *) p;
84517482
AK
2758 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
2759
84517482 2760 flash_cmd.size = sizeof(struct be_cmd_write_flashrom) + 32*1024;
2b7bcebf
IV
2761 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
2762 &flash_cmd.dma, GFP_KERNEL);
84517482
AK
2763 if (!flash_cmd.va) {
2764 status = -ENOMEM;
2765 dev_err(&adapter->pdev->dev,
2766 "Memory allocation failure while flashing\n");
2767 goto fw_exit;
2768 }
2769
3f0d4560
AK
2770 if ((adapter->generation == BE_GEN3) &&
2771 (get_ufigen_type(fhdr) == BE_GEN3)) {
2772 fhdr3 = (struct flash_file_hdr_g3 *) fw->data;
8b93b710
AK
2773 num_imgs = le32_to_cpu(fhdr3->num_imgs);
2774 for (i = 0; i < num_imgs; i++) {
3f0d4560
AK
2775 img_hdr_ptr = (struct image_hdr *) (fw->data +
2776 (sizeof(struct flash_file_hdr_g3) +
8b93b710
AK
2777 i * sizeof(struct image_hdr)));
2778 if (le32_to_cpu(img_hdr_ptr->imageid) == 1)
2779 status = be_flash_data(adapter, fw, &flash_cmd,
2780 num_imgs);
3f0d4560
AK
2781 }
2782 } else if ((adapter->generation == BE_GEN2) &&
2783 (get_ufigen_type(fhdr) == BE_GEN2)) {
2784 status = be_flash_data(adapter, fw, &flash_cmd, 0);
2785 } else {
2786 dev_err(&adapter->pdev->dev,
2787 "UFI and Interface are not compatible for flashing\n");
2788 status = -1;
84517482
AK
2789 }
2790
2b7bcebf
IV
2791 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
2792 flash_cmd.dma);
84517482
AK
2793 if (status) {
2794 dev_err(&adapter->pdev->dev, "Firmware load error\n");
2795 goto fw_exit;
2796 }
2797
af901ca1 2798 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
84517482
AK
2799
2800fw_exit:
2801 release_firmware(fw);
2802 return status;
2803}
2804
6b7c5b94
SP
2805static struct net_device_ops be_netdev_ops = {
2806 .ndo_open = be_open,
2807 .ndo_stop = be_close,
2808 .ndo_start_xmit = be_xmit,
6b7c5b94
SP
2809 .ndo_set_rx_mode = be_set_multicast_list,
2810 .ndo_set_mac_address = be_mac_addr_set,
2811 .ndo_change_mtu = be_change_mtu,
2812 .ndo_validate_addr = eth_validate_addr,
2813 .ndo_vlan_rx_register = be_vlan_register,
2814 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
2815 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
64600ea5 2816 .ndo_set_vf_mac = be_set_vf_mac,
1da87b7f 2817 .ndo_set_vf_vlan = be_set_vf_vlan,
e1d18735 2818 .ndo_set_vf_tx_rate = be_set_vf_tx_rate,
64600ea5 2819 .ndo_get_vf_config = be_get_vf_config
6b7c5b94
SP
2820};
2821
2822static void be_netdev_init(struct net_device *netdev)
2823{
2824 struct be_adapter *adapter = netdev_priv(netdev);
3abcdeda
SP
2825 struct be_rx_obj *rxo;
2826 int i;
6b7c5b94 2827
6332c8d3 2828 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
8b8ddc68
MM
2829 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
2830 NETIF_F_HW_VLAN_TX;
2831 if (be_multi_rxq(adapter))
2832 netdev->hw_features |= NETIF_F_RXHASH;
6332c8d3
MM
2833
2834 netdev->features |= netdev->hw_features |
8b8ddc68 2835 NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
4b972914 2836
79032644
MM
2837 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO |
2838 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
51c59870 2839
fe6d2a38
SP
2840 if (lancer_chip(adapter))
2841 netdev->vlan_features |= NETIF_F_TSO6;
2842
6b7c5b94
SP
2843 netdev->flags |= IFF_MULTICAST;
2844
9e90c961
AK
2845 /* Default settings for Rx and Tx flow control */
2846 adapter->rx_fc = true;
2847 adapter->tx_fc = true;
2848
c190e3c8
AK
2849 netif_set_gso_max_size(netdev, 65535);
2850
6b7c5b94
SP
2851 BE_SET_NETDEV_OPS(netdev, &be_netdev_ops);
2852
2853 SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
2854
3abcdeda
SP
2855 for_all_rx_queues(adapter, rxo, i)
2856 netif_napi_add(netdev, &rxo->rx_eq.napi, be_poll_rx,
2857 BE_NAPI_WEIGHT);
2858
5fb379ee 2859 netif_napi_add(netdev, &adapter->tx_eq.napi, be_poll_tx_mcc,
6b7c5b94 2860 BE_NAPI_WEIGHT);
6b7c5b94
SP
2861}
2862
2863static void be_unmap_pci_bars(struct be_adapter *adapter)
2864{
8788fdc2
SP
2865 if (adapter->csr)
2866 iounmap(adapter->csr);
2867 if (adapter->db)
2868 iounmap(adapter->db);
ba343c77 2869 if (adapter->pcicfg && be_physfn(adapter))
8788fdc2 2870 iounmap(adapter->pcicfg);
6b7c5b94
SP
2871}
2872
2873static int be_map_pci_bars(struct be_adapter *adapter)
2874{
2875 u8 __iomem *addr;
ba343c77 2876 int pcicfg_reg, db_reg;
6b7c5b94 2877
fe6d2a38
SP
2878 if (lancer_chip(adapter)) {
2879 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 0),
2880 pci_resource_len(adapter->pdev, 0));
2881 if (addr == NULL)
2882 return -ENOMEM;
2883 adapter->db = addr;
2884 return 0;
2885 }
2886
ba343c77
SB
2887 if (be_physfn(adapter)) {
2888 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 2),
2889 pci_resource_len(adapter->pdev, 2));
2890 if (addr == NULL)
2891 return -ENOMEM;
2892 adapter->csr = addr;
2893 }
6b7c5b94 2894
ba343c77 2895 if (adapter->generation == BE_GEN2) {
7b139c83 2896 pcicfg_reg = 1;
ba343c77
SB
2897 db_reg = 4;
2898 } else {
7b139c83 2899 pcicfg_reg = 0;
ba343c77
SB
2900 if (be_physfn(adapter))
2901 db_reg = 4;
2902 else
2903 db_reg = 0;
2904 }
2905 addr = ioremap_nocache(pci_resource_start(adapter->pdev, db_reg),
2906 pci_resource_len(adapter->pdev, db_reg));
6b7c5b94
SP
2907 if (addr == NULL)
2908 goto pci_map_err;
ba343c77
SB
2909 adapter->db = addr;
2910
2911 if (be_physfn(adapter)) {
2912 addr = ioremap_nocache(
2913 pci_resource_start(adapter->pdev, pcicfg_reg),
2914 pci_resource_len(adapter->pdev, pcicfg_reg));
2915 if (addr == NULL)
2916 goto pci_map_err;
2917 adapter->pcicfg = addr;
2918 } else
2919 adapter->pcicfg = adapter->db + SRIOV_VF_PCICFG_OFFSET;
6b7c5b94
SP
2920
2921 return 0;
2922pci_map_err:
2923 be_unmap_pci_bars(adapter);
2924 return -ENOMEM;
2925}
2926
2927
2928static void be_ctrl_cleanup(struct be_adapter *adapter)
2929{
8788fdc2 2930 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
6b7c5b94
SP
2931
2932 be_unmap_pci_bars(adapter);
2933
2934 if (mem->va)
2b7bcebf
IV
2935 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
2936 mem->dma);
e7b909a6
SP
2937
2938 mem = &adapter->mc_cmd_mem;
2939 if (mem->va)
2b7bcebf
IV
2940 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
2941 mem->dma);
6b7c5b94
SP
2942}
2943
6b7c5b94
SP
2944static int be_ctrl_init(struct be_adapter *adapter)
2945{
8788fdc2
SP
2946 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
2947 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
e7b909a6 2948 struct be_dma_mem *mc_cmd_mem = &adapter->mc_cmd_mem;
6b7c5b94 2949 int status;
6b7c5b94
SP
2950
2951 status = be_map_pci_bars(adapter);
2952 if (status)
e7b909a6 2953 goto done;
6b7c5b94
SP
2954
2955 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
2b7bcebf
IV
2956 mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
2957 mbox_mem_alloc->size,
2958 &mbox_mem_alloc->dma,
2959 GFP_KERNEL);
6b7c5b94 2960 if (!mbox_mem_alloc->va) {
e7b909a6
SP
2961 status = -ENOMEM;
2962 goto unmap_pci_bars;
6b7c5b94 2963 }
e7b909a6 2964
6b7c5b94
SP
2965 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
2966 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
2967 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
2968 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
e7b909a6
SP
2969
2970 mc_cmd_mem->size = sizeof(struct be_cmd_req_mcast_mac_config);
2b7bcebf
IV
2971 mc_cmd_mem->va = dma_alloc_coherent(&adapter->pdev->dev,
2972 mc_cmd_mem->size, &mc_cmd_mem->dma,
2973 GFP_KERNEL);
e7b909a6
SP
2974 if (mc_cmd_mem->va == NULL) {
2975 status = -ENOMEM;
2976 goto free_mbox;
2977 }
2978 memset(mc_cmd_mem->va, 0, mc_cmd_mem->size);
2979
2984961c 2980 mutex_init(&adapter->mbox_lock);
8788fdc2
SP
2981 spin_lock_init(&adapter->mcc_lock);
2982 spin_lock_init(&adapter->mcc_cq_lock);
a8f447bd 2983
dd131e76 2984 init_completion(&adapter->flash_compl);
cf588477 2985 pci_save_state(adapter->pdev);
6b7c5b94 2986 return 0;
e7b909a6
SP
2987
2988free_mbox:
2b7bcebf
IV
2989 dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
2990 mbox_mem_alloc->va, mbox_mem_alloc->dma);
e7b909a6
SP
2991
2992unmap_pci_bars:
2993 be_unmap_pci_bars(adapter);
2994
2995done:
2996 return status;
6b7c5b94
SP
2997}
2998
2999static void be_stats_cleanup(struct be_adapter *adapter)
3000{
3abcdeda 3001 struct be_dma_mem *cmd = &adapter->stats_cmd;
6b7c5b94
SP
3002
3003 if (cmd->va)
2b7bcebf
IV
3004 dma_free_coherent(&adapter->pdev->dev, cmd->size,
3005 cmd->va, cmd->dma);
6b7c5b94
SP
3006}
3007
3008static int be_stats_init(struct be_adapter *adapter)
3009{
3abcdeda 3010 struct be_dma_mem *cmd = &adapter->stats_cmd;
6b7c5b94 3011
005d5696 3012 if (adapter->generation == BE_GEN2) {
89a88ab8 3013 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
005d5696
SX
3014 } else {
3015 if (lancer_chip(adapter))
3016 cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
3017 else
3018 cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
3019 }
2b7bcebf
IV
3020 cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
3021 GFP_KERNEL);
6b7c5b94
SP
3022 if (cmd->va == NULL)
3023 return -1;
d291b9af 3024 memset(cmd->va, 0, cmd->size);
6b7c5b94
SP
3025 return 0;
3026}
3027
3028static void __devexit be_remove(struct pci_dev *pdev)
3029{
3030 struct be_adapter *adapter = pci_get_drvdata(pdev);
8d56ff11 3031
6b7c5b94
SP
3032 if (!adapter)
3033 return;
3034
f203af70
SK
3035 cancel_delayed_work_sync(&adapter->work);
3036
6b7c5b94
SP
3037 unregister_netdev(adapter->netdev);
3038
5fb379ee
SP
3039 be_clear(adapter);
3040
6b7c5b94
SP
3041 be_stats_cleanup(adapter);
3042
3043 be_ctrl_cleanup(adapter);
3044
48f5a191 3045 kfree(adapter->vf_cfg);
ba343c77
SB
3046 be_sriov_disable(adapter);
3047
8d56ff11 3048 be_msix_disable(adapter);
6b7c5b94
SP
3049
3050 pci_set_drvdata(pdev, NULL);
3051 pci_release_regions(pdev);
3052 pci_disable_device(pdev);
3053
3054 free_netdev(adapter->netdev);
3055}
3056
2243e2e9 3057static int be_get_config(struct be_adapter *adapter)
6b7c5b94 3058{
6b7c5b94 3059 int status;
2243e2e9 3060 u8 mac[ETH_ALEN];
6b7c5b94 3061
2243e2e9 3062 status = be_cmd_get_fw_ver(adapter, adapter->fw_ver);
6b7c5b94
SP
3063 if (status)
3064 return status;
3065
3abcdeda
SP
3066 status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
3067 &adapter->function_mode, &adapter->function_caps);
43a04fdc
SP
3068 if (status)
3069 return status;
3070
2243e2e9 3071 memset(mac, 0, ETH_ALEN);
ba343c77
SB
3072
3073 if (be_physfn(adapter)) {
3074 status = be_cmd_mac_addr_query(adapter, mac,
2243e2e9 3075 MAC_ADDRESS_TYPE_NETWORK, true /*permanent */, 0);
ca9e4988 3076
ba343c77
SB
3077 if (status)
3078 return status;
ca9e4988 3079
ba343c77
SB
3080 if (!is_valid_ether_addr(mac))
3081 return -EADDRNOTAVAIL;
3082
3083 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
3084 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
3085 }
6b7c5b94 3086
3486be29 3087 if (adapter->function_mode & 0x400)
82903e4b
AK
3088 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/4;
3089 else
3090 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
3091
9e1453c5
AK
3092 status = be_cmd_get_cntl_attributes(adapter);
3093 if (status)
3094 return status;
3095
2e588f84 3096 be_cmd_check_native_mode(adapter);
2243e2e9 3097 return 0;
6b7c5b94
SP
3098}
3099
fe6d2a38
SP
3100static int be_dev_family_check(struct be_adapter *adapter)
3101{
3102 struct pci_dev *pdev = adapter->pdev;
3103 u32 sli_intf = 0, if_type;
3104
3105 switch (pdev->device) {
3106 case BE_DEVICE_ID1:
3107 case OC_DEVICE_ID1:
3108 adapter->generation = BE_GEN2;
3109 break;
3110 case BE_DEVICE_ID2:
3111 case OC_DEVICE_ID2:
3112 adapter->generation = BE_GEN3;
3113 break;
3114 case OC_DEVICE_ID3:
3115 pci_read_config_dword(pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3116 if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
3117 SLI_INTF_IF_TYPE_SHIFT;
3118
3119 if (((sli_intf & SLI_INTF_VALID_MASK) != SLI_INTF_VALID) ||
3120 if_type != 0x02) {
3121 dev_err(&pdev->dev, "SLI_INTF reg val is not valid\n");
3122 return -EINVAL;
3123 }
3124 if (num_vfs > 0) {
3125 dev_err(&pdev->dev, "VFs not supported\n");
3126 return -EINVAL;
3127 }
3128 adapter->sli_family = ((sli_intf & SLI_INTF_FAMILY_MASK) >>
3129 SLI_INTF_FAMILY_SHIFT);
3130 adapter->generation = BE_GEN3;
3131 break;
3132 default:
3133 adapter->generation = 0;
3134 }
3135 return 0;
3136}
3137
37eed1cb
PR
3138static int lancer_wait_ready(struct be_adapter *adapter)
3139{
3140#define SLIPORT_READY_TIMEOUT 500
3141 u32 sliport_status;
3142 int status = 0, i;
3143
3144 for (i = 0; i < SLIPORT_READY_TIMEOUT; i++) {
3145 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3146 if (sliport_status & SLIPORT_STATUS_RDY_MASK)
3147 break;
3148
3149 msleep(20);
3150 }
3151
3152 if (i == SLIPORT_READY_TIMEOUT)
3153 status = -1;
3154
3155 return status;
3156}
3157
3158static int lancer_test_and_set_rdy_state(struct be_adapter *adapter)
3159{
3160 int status;
3161 u32 sliport_status, err, reset_needed;
3162 status = lancer_wait_ready(adapter);
3163 if (!status) {
3164 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3165 err = sliport_status & SLIPORT_STATUS_ERR_MASK;
3166 reset_needed = sliport_status & SLIPORT_STATUS_RN_MASK;
3167 if (err && reset_needed) {
3168 iowrite32(SLI_PORT_CONTROL_IP_MASK,
3169 adapter->db + SLIPORT_CONTROL_OFFSET);
3170
3171 /* check adapter has corrected the error */
3172 status = lancer_wait_ready(adapter);
3173 sliport_status = ioread32(adapter->db +
3174 SLIPORT_STATUS_OFFSET);
3175 sliport_status &= (SLIPORT_STATUS_ERR_MASK |
3176 SLIPORT_STATUS_RN_MASK);
3177 if (status || sliport_status)
3178 status = -1;
3179 } else if (err || reset_needed) {
3180 status = -1;
3181 }
3182 }
3183 return status;
3184}
3185
6b7c5b94
SP
3186static int __devinit be_probe(struct pci_dev *pdev,
3187 const struct pci_device_id *pdev_id)
3188{
3189 int status = 0;
3190 struct be_adapter *adapter;
3191 struct net_device *netdev;
6b7c5b94
SP
3192
3193 status = pci_enable_device(pdev);
3194 if (status)
3195 goto do_none;
3196
3197 status = pci_request_regions(pdev, DRV_NAME);
3198 if (status)
3199 goto disable_dev;
3200 pci_set_master(pdev);
3201
3202 netdev = alloc_etherdev(sizeof(struct be_adapter));
3203 if (netdev == NULL) {
3204 status = -ENOMEM;
3205 goto rel_reg;
3206 }
3207 adapter = netdev_priv(netdev);
3208 adapter->pdev = pdev;
3209 pci_set_drvdata(pdev, adapter);
fe6d2a38
SP
3210
3211 status = be_dev_family_check(adapter);
63657b9c 3212 if (status)
fe6d2a38
SP
3213 goto free_netdev;
3214
6b7c5b94 3215 adapter->netdev = netdev;
2243e2e9 3216 SET_NETDEV_DEV(netdev, &pdev->dev);
6b7c5b94 3217
2b7bcebf 3218 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
6b7c5b94
SP
3219 if (!status) {
3220 netdev->features |= NETIF_F_HIGHDMA;
3221 } else {
2b7bcebf 3222 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
6b7c5b94
SP
3223 if (status) {
3224 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
3225 goto free_netdev;
3226 }
3227 }
3228
ba343c77 3229 be_sriov_enable(adapter);
48f5a191
AK
3230 if (adapter->sriov_enabled) {
3231 adapter->vf_cfg = kcalloc(num_vfs,
3232 sizeof(struct be_vf_cfg), GFP_KERNEL);
3233
3234 if (!adapter->vf_cfg)
3235 goto free_netdev;
3236 }
ba343c77 3237
6b7c5b94
SP
3238 status = be_ctrl_init(adapter);
3239 if (status)
48f5a191 3240 goto free_vf_cfg;
6b7c5b94 3241
37eed1cb
PR
3242 if (lancer_chip(adapter)) {
3243 status = lancer_test_and_set_rdy_state(adapter);
3244 if (status) {
3245 dev_err(&pdev->dev, "Adapter in non recoverable error\n");
48f5a191 3246 goto ctrl_clean;
37eed1cb
PR
3247 }
3248 }
3249
2243e2e9 3250 /* sync up with fw's ready state */
ba343c77
SB
3251 if (be_physfn(adapter)) {
3252 status = be_cmd_POST(adapter);
3253 if (status)
3254 goto ctrl_clean;
ba343c77 3255 }
6b7c5b94 3256
2243e2e9
SP
3257 /* tell fw we're ready to fire cmds */
3258 status = be_cmd_fw_init(adapter);
6b7c5b94 3259 if (status)
2243e2e9
SP
3260 goto ctrl_clean;
3261
a4b4dfab
AK
3262 status = be_cmd_reset_function(adapter);
3263 if (status)
3264 goto ctrl_clean;
556ae191 3265
2243e2e9
SP
3266 status = be_stats_init(adapter);
3267 if (status)
3268 goto ctrl_clean;
3269
3270 status = be_get_config(adapter);
6b7c5b94
SP
3271 if (status)
3272 goto stats_clean;
6b7c5b94 3273
3abcdeda
SP
3274 be_msix_enable(adapter);
3275
6b7c5b94 3276 INIT_DELAYED_WORK(&adapter->work, be_worker);
6b7c5b94 3277
5fb379ee
SP
3278 status = be_setup(adapter);
3279 if (status)
3abcdeda 3280 goto msix_disable;
2243e2e9 3281
3abcdeda 3282 be_netdev_init(netdev);
6b7c5b94
SP
3283 status = register_netdev(netdev);
3284 if (status != 0)
5fb379ee 3285 goto unsetup;
63a76944 3286 netif_carrier_off(netdev);
6b7c5b94 3287
e6319365 3288 if (be_physfn(adapter) && adapter->sriov_enabled) {
d0381c42
AK
3289 u8 mac_speed;
3290 bool link_up;
3291 u16 vf, lnk_speed;
3292
e6319365
AK
3293 status = be_vf_eth_addr_config(adapter);
3294 if (status)
3295 goto unreg_netdev;
d0381c42
AK
3296
3297 for (vf = 0; vf < num_vfs; vf++) {
3298 status = be_cmd_link_status_query(adapter, &link_up,
3299 &mac_speed, &lnk_speed, vf + 1);
3300 if (!status)
3301 adapter->vf_cfg[vf].vf_tx_rate = lnk_speed * 10;
3302 else
3303 goto unreg_netdev;
3304 }
e6319365
AK
3305 }
3306
c4ca2374 3307 dev_info(&pdev->dev, "%s port %d\n", nic_name(pdev), adapter->port_num);
f203af70 3308 schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
6b7c5b94
SP
3309 return 0;
3310
e6319365
AK
3311unreg_netdev:
3312 unregister_netdev(netdev);
5fb379ee
SP
3313unsetup:
3314 be_clear(adapter);
3abcdeda
SP
3315msix_disable:
3316 be_msix_disable(adapter);
6b7c5b94
SP
3317stats_clean:
3318 be_stats_cleanup(adapter);
3319ctrl_clean:
3320 be_ctrl_cleanup(adapter);
48f5a191
AK
3321free_vf_cfg:
3322 kfree(adapter->vf_cfg);
6b7c5b94 3323free_netdev:
ba343c77 3324 be_sriov_disable(adapter);
fe6d2a38 3325 free_netdev(netdev);
8d56ff11 3326 pci_set_drvdata(pdev, NULL);
6b7c5b94
SP
3327rel_reg:
3328 pci_release_regions(pdev);
3329disable_dev:
3330 pci_disable_device(pdev);
3331do_none:
c4ca2374 3332 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
6b7c5b94
SP
3333 return status;
3334}
3335
3336static int be_suspend(struct pci_dev *pdev, pm_message_t state)
3337{
3338 struct be_adapter *adapter = pci_get_drvdata(pdev);
3339 struct net_device *netdev = adapter->netdev;
3340
a4ca055f 3341 cancel_delayed_work_sync(&adapter->work);
71d8d1b5
AK
3342 if (adapter->wol)
3343 be_setup_wol(adapter, true);
3344
6b7c5b94
SP
3345 netif_device_detach(netdev);
3346 if (netif_running(netdev)) {
3347 rtnl_lock();
3348 be_close(netdev);
3349 rtnl_unlock();
3350 }
9e90c961 3351 be_cmd_get_flow_control(adapter, &adapter->tx_fc, &adapter->rx_fc);
9b0365f1 3352 be_clear(adapter);
6b7c5b94 3353
a4ca055f 3354 be_msix_disable(adapter);
6b7c5b94
SP
3355 pci_save_state(pdev);
3356 pci_disable_device(pdev);
3357 pci_set_power_state(pdev, pci_choose_state(pdev, state));
3358 return 0;
3359}
3360
3361static int be_resume(struct pci_dev *pdev)
3362{
3363 int status = 0;
3364 struct be_adapter *adapter = pci_get_drvdata(pdev);
3365 struct net_device *netdev = adapter->netdev;
3366
3367 netif_device_detach(netdev);
3368
3369 status = pci_enable_device(pdev);
3370 if (status)
3371 return status;
3372
3373 pci_set_power_state(pdev, 0);
3374 pci_restore_state(pdev);
3375
a4ca055f 3376 be_msix_enable(adapter);
2243e2e9
SP
3377 /* tell fw we're ready to fire cmds */
3378 status = be_cmd_fw_init(adapter);
3379 if (status)
3380 return status;
3381
9b0365f1 3382 be_setup(adapter);
6b7c5b94
SP
3383 if (netif_running(netdev)) {
3384 rtnl_lock();
3385 be_open(netdev);
3386 rtnl_unlock();
3387 }
3388 netif_device_attach(netdev);
71d8d1b5
AK
3389
3390 if (adapter->wol)
3391 be_setup_wol(adapter, false);
a4ca055f
AK
3392
3393 schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
6b7c5b94
SP
3394 return 0;
3395}
3396
82456b03
SP
3397/*
3398 * An FLR will stop BE from DMAing any data.
3399 */
3400static void be_shutdown(struct pci_dev *pdev)
3401{
3402 struct be_adapter *adapter = pci_get_drvdata(pdev);
82456b03 3403
2d5d4154
AK
3404 if (!adapter)
3405 return;
82456b03 3406
0f4a6828 3407 cancel_delayed_work_sync(&adapter->work);
a4ca055f 3408
2d5d4154 3409 netif_device_detach(adapter->netdev);
82456b03 3410
82456b03
SP
3411 if (adapter->wol)
3412 be_setup_wol(adapter, true);
3413
57841869
AK
3414 be_cmd_reset_function(adapter);
3415
82456b03 3416 pci_disable_device(pdev);
82456b03
SP
3417}
3418
cf588477
SP
3419static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
3420 pci_channel_state_t state)
3421{
3422 struct be_adapter *adapter = pci_get_drvdata(pdev);
3423 struct net_device *netdev = adapter->netdev;
3424
3425 dev_err(&adapter->pdev->dev, "EEH error detected\n");
3426
3427 adapter->eeh_err = true;
3428
3429 netif_device_detach(netdev);
3430
3431 if (netif_running(netdev)) {
3432 rtnl_lock();
3433 be_close(netdev);
3434 rtnl_unlock();
3435 }
3436 be_clear(adapter);
3437
3438 if (state == pci_channel_io_perm_failure)
3439 return PCI_ERS_RESULT_DISCONNECT;
3440
3441 pci_disable_device(pdev);
3442
3443 return PCI_ERS_RESULT_NEED_RESET;
3444}
3445
3446static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
3447{
3448 struct be_adapter *adapter = pci_get_drvdata(pdev);
3449 int status;
3450
3451 dev_info(&adapter->pdev->dev, "EEH reset\n");
3452 adapter->eeh_err = false;
3453
3454 status = pci_enable_device(pdev);
3455 if (status)
3456 return PCI_ERS_RESULT_DISCONNECT;
3457
3458 pci_set_master(pdev);
3459 pci_set_power_state(pdev, 0);
3460 pci_restore_state(pdev);
3461
3462 /* Check if card is ok and fw is ready */
3463 status = be_cmd_POST(adapter);
3464 if (status)
3465 return PCI_ERS_RESULT_DISCONNECT;
3466
3467 return PCI_ERS_RESULT_RECOVERED;
3468}
3469
3470static void be_eeh_resume(struct pci_dev *pdev)
3471{
3472 int status = 0;
3473 struct be_adapter *adapter = pci_get_drvdata(pdev);
3474 struct net_device *netdev = adapter->netdev;
3475
3476 dev_info(&adapter->pdev->dev, "EEH resume\n");
3477
3478 pci_save_state(pdev);
3479
3480 /* tell fw we're ready to fire cmds */
3481 status = be_cmd_fw_init(adapter);
3482 if (status)
3483 goto err;
3484
3485 status = be_setup(adapter);
3486 if (status)
3487 goto err;
3488
3489 if (netif_running(netdev)) {
3490 status = be_open(netdev);
3491 if (status)
3492 goto err;
3493 }
3494 netif_device_attach(netdev);
3495 return;
3496err:
3497 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
cf588477
SP
3498}
3499
3500static struct pci_error_handlers be_eeh_handlers = {
3501 .error_detected = be_eeh_err_detected,
3502 .slot_reset = be_eeh_reset,
3503 .resume = be_eeh_resume,
3504};
3505
6b7c5b94
SP
3506static struct pci_driver be_driver = {
3507 .name = DRV_NAME,
3508 .id_table = be_dev_ids,
3509 .probe = be_probe,
3510 .remove = be_remove,
3511 .suspend = be_suspend,
cf588477 3512 .resume = be_resume,
82456b03 3513 .shutdown = be_shutdown,
cf588477 3514 .err_handler = &be_eeh_handlers
6b7c5b94
SP
3515};
3516
3517static int __init be_init_module(void)
3518{
8e95a202
JP
3519 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
3520 rx_frag_size != 2048) {
6b7c5b94
SP
3521 printk(KERN_WARNING DRV_NAME
3522 " : Module param rx_frag_size must be 2048/4096/8192."
3523 " Using 2048\n");
3524 rx_frag_size = 2048;
3525 }
6b7c5b94
SP
3526
3527 return pci_register_driver(&be_driver);
3528}
3529module_init(be_init_module);
3530
3531static void __exit be_exit_module(void)
3532{
3533 pci_unregister_driver(&be_driver);
3534}
3535module_exit(be_exit_module);