be2net: cleanup and refactor stats code
[linux-2.6-block.git] / drivers / net / benet / be_main.c
CommitLineData
6b7c5b94 1/*
d2145cde 2 * Copyright (C) 2005 - 2011 Emulex
6b7c5b94
SP
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
d2145cde 11 * linux-drivers@emulex.com
6b7c5b94 12 *
d2145cde
AK
13 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
6b7c5b94
SP
16 */
17
70c71606 18#include <linux/prefetch.h>
6b7c5b94 19#include "be.h"
8788fdc2 20#include "be_cmds.h"
65f71b8b 21#include <asm/div64.h>
6b7c5b94
SP
22
23MODULE_VERSION(DRV_VER);
24MODULE_DEVICE_TABLE(pci, be_dev_ids);
25MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
26MODULE_AUTHOR("ServerEngines Corporation");
27MODULE_LICENSE("GPL");
28
2e588f84 29static ushort rx_frag_size = 2048;
ba343c77 30static unsigned int num_vfs;
2e588f84 31module_param(rx_frag_size, ushort, S_IRUGO);
ba343c77 32module_param(num_vfs, uint, S_IRUGO);
6b7c5b94 33MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
ba343c77 34MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
6b7c5b94 35
6b7c5b94 36static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
c4ca2374 37 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
59fd5d87 38 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
c4ca2374
AK
39 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
40 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
fe6d2a38 41 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
12f4d0a8 42 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
6b7c5b94
SP
43 { 0 }
44};
45MODULE_DEVICE_TABLE(pci, be_dev_ids);
7c185276 46/* UE Status Low CSR */
42c8b11e 47static const char * const ue_status_low_desc[] = {
7c185276
AK
48 "CEV",
49 "CTX",
50 "DBUF",
51 "ERX",
52 "Host",
53 "MPU",
54 "NDMA",
55 "PTC ",
56 "RDMA ",
57 "RXF ",
58 "RXIPS ",
59 "RXULP0 ",
60 "RXULP1 ",
61 "RXULP2 ",
62 "TIM ",
63 "TPOST ",
64 "TPRE ",
65 "TXIPS ",
66 "TXULP0 ",
67 "TXULP1 ",
68 "UC ",
69 "WDMA ",
70 "TXULP2 ",
71 "HOST1 ",
72 "P0_OB_LINK ",
73 "P1_OB_LINK ",
74 "HOST_GPIO ",
75 "MBOX ",
76 "AXGMAC0",
77 "AXGMAC1",
78 "JTAG",
79 "MPU_INTPEND"
80};
81/* UE Status High CSR */
42c8b11e 82static const char * const ue_status_hi_desc[] = {
7c185276
AK
83 "LPCMEMHOST",
84 "MGMT_MAC",
85 "PCS0ONLINE",
86 "MPU_IRAM",
87 "PCS1ONLINE",
88 "PCTL0",
89 "PCTL1",
90 "PMEM",
91 "RR",
92 "TXPB",
93 "RXPP",
94 "XAUI",
95 "TXP",
96 "ARM",
97 "IPC",
98 "HOST2",
99 "HOST3",
100 "HOST4",
101 "HOST5",
102 "HOST6",
103 "HOST7",
104 "HOST8",
105 "HOST9",
42c8b11e 106 "NETC",
7c185276
AK
107 "Unknown",
108 "Unknown",
109 "Unknown",
110 "Unknown",
111 "Unknown",
112 "Unknown",
113 "Unknown",
114 "Unknown"
115};
6b7c5b94
SP
116
117static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
118{
119 struct be_dma_mem *mem = &q->dma_mem;
120 if (mem->va)
2b7bcebf
IV
121 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
122 mem->dma);
6b7c5b94
SP
123}
124
125static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
126 u16 len, u16 entry_size)
127{
128 struct be_dma_mem *mem = &q->dma_mem;
129
130 memset(q, 0, sizeof(*q));
131 q->len = len;
132 q->entry_size = entry_size;
133 mem->size = len * entry_size;
2b7bcebf
IV
134 mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
135 GFP_KERNEL);
6b7c5b94
SP
136 if (!mem->va)
137 return -1;
138 memset(mem->va, 0, mem->size);
139 return 0;
140}
141
8788fdc2 142static void be_intr_set(struct be_adapter *adapter, bool enable)
6b7c5b94 143{
8788fdc2 144 u8 __iomem *addr = adapter->pcicfg + PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET;
6b7c5b94
SP
145 u32 reg = ioread32(addr);
146 u32 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
5f0b849e 147
cf588477
SP
148 if (adapter->eeh_err)
149 return;
150
5f0b849e 151 if (!enabled && enable)
6b7c5b94 152 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
5f0b849e 153 else if (enabled && !enable)
6b7c5b94 154 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
5f0b849e 155 else
6b7c5b94 156 return;
5f0b849e 157
6b7c5b94
SP
158 iowrite32(reg, addr);
159}
160
8788fdc2 161static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
6b7c5b94
SP
162{
163 u32 val = 0;
164 val |= qid & DB_RQ_RING_ID_MASK;
165 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
f3eb62d2
SP
166
167 wmb();
8788fdc2 168 iowrite32(val, adapter->db + DB_RQ_OFFSET);
6b7c5b94
SP
169}
170
8788fdc2 171static void be_txq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
6b7c5b94
SP
172{
173 u32 val = 0;
174 val |= qid & DB_TXULP_RING_ID_MASK;
175 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
f3eb62d2
SP
176
177 wmb();
8788fdc2 178 iowrite32(val, adapter->db + DB_TXULP1_OFFSET);
6b7c5b94
SP
179}
180
8788fdc2 181static void be_eq_notify(struct be_adapter *adapter, u16 qid,
6b7c5b94
SP
182 bool arm, bool clear_int, u16 num_popped)
183{
184 u32 val = 0;
185 val |= qid & DB_EQ_RING_ID_MASK;
fe6d2a38
SP
186 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
187 DB_EQ_RING_ID_EXT_MASK_SHIFT);
cf588477
SP
188
189 if (adapter->eeh_err)
190 return;
191
6b7c5b94
SP
192 if (arm)
193 val |= 1 << DB_EQ_REARM_SHIFT;
194 if (clear_int)
195 val |= 1 << DB_EQ_CLR_SHIFT;
196 val |= 1 << DB_EQ_EVNT_SHIFT;
197 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
8788fdc2 198 iowrite32(val, adapter->db + DB_EQ_OFFSET);
6b7c5b94
SP
199}
200
8788fdc2 201void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
6b7c5b94
SP
202{
203 u32 val = 0;
204 val |= qid & DB_CQ_RING_ID_MASK;
fe6d2a38
SP
205 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
206 DB_CQ_RING_ID_EXT_MASK_SHIFT);
cf588477
SP
207
208 if (adapter->eeh_err)
209 return;
210
6b7c5b94
SP
211 if (arm)
212 val |= 1 << DB_CQ_REARM_SHIFT;
213 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
8788fdc2 214 iowrite32(val, adapter->db + DB_CQ_OFFSET);
6b7c5b94
SP
215}
216
6b7c5b94
SP
217static int be_mac_addr_set(struct net_device *netdev, void *p)
218{
219 struct be_adapter *adapter = netdev_priv(netdev);
220 struct sockaddr *addr = p;
221 int status = 0;
222
ca9e4988
AK
223 if (!is_valid_ether_addr(addr->sa_data))
224 return -EADDRNOTAVAIL;
225
ba343c77
SB
226 /* MAC addr configuration will be done in hardware for VFs
227 * by their corresponding PFs. Just copy to netdev addr here
228 */
229 if (!be_physfn(adapter))
230 goto netdev_addr;
231
f8617e08
AK
232 status = be_cmd_pmac_del(adapter, adapter->if_handle,
233 adapter->pmac_id, 0);
a65027e4
SP
234 if (status)
235 return status;
6b7c5b94 236
a65027e4 237 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
f8617e08 238 adapter->if_handle, &adapter->pmac_id, 0);
ba343c77 239netdev_addr:
6b7c5b94
SP
240 if (!status)
241 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
242
243 return status;
244}
245
89a88ab8
AK
246static void populate_be2_stats(struct be_adapter *adapter)
247{
ac124ff9
SP
248 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
249 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
250 struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
89a88ab8 251 struct be_port_rxf_stats_v0 *port_stats =
ac124ff9
SP
252 &rxf_stats->port[adapter->port_num];
253 struct be_drv_stats *drvs = &adapter->drv_stats;
89a88ab8 254
ac124ff9 255 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
89a88ab8
AK
256 drvs->rx_pause_frames = port_stats->rx_pause_frames;
257 drvs->rx_crc_errors = port_stats->rx_crc_errors;
258 drvs->rx_control_frames = port_stats->rx_control_frames;
259 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
260 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
261 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
262 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
263 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
264 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
265 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
266 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
267 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
268 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
269 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
ac124ff9 270 drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
89a88ab8
AK
271 drvs->rx_dropped_header_too_small =
272 port_stats->rx_dropped_header_too_small;
ac124ff9 273 drvs->rx_address_match_errors = port_stats->rx_address_match_errors;
89a88ab8
AK
274 drvs->rx_alignment_symbol_errors =
275 port_stats->rx_alignment_symbol_errors;
276
277 drvs->tx_pauseframes = port_stats->tx_pauseframes;
278 drvs->tx_controlframes = port_stats->tx_controlframes;
279
280 if (adapter->port_num)
ac124ff9 281 drvs->jabber_events = rxf_stats->port1_jabber_events;
89a88ab8 282 else
ac124ff9 283 drvs->jabber_events = rxf_stats->port0_jabber_events;
89a88ab8
AK
284 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
285 drvs->rx_drops_no_txpb = rxf_stats->rx_drops_no_txpb;
286 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
287 drvs->rx_drops_invalid_ring = rxf_stats->rx_drops_invalid_ring;
288 drvs->forwarded_packets = rxf_stats->forwarded_packets;
289 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
ac124ff9
SP
290 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
291 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
89a88ab8
AK
292 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
293}
294
295static void populate_be3_stats(struct be_adapter *adapter)
296{
ac124ff9
SP
297 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
298 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
299 struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
89a88ab8 300 struct be_port_rxf_stats_v1 *port_stats =
ac124ff9
SP
301 &rxf_stats->port[adapter->port_num];
302 struct be_drv_stats *drvs = &adapter->drv_stats;
89a88ab8 303
ac124ff9 304 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
89a88ab8
AK
305 drvs->rx_pause_frames = port_stats->rx_pause_frames;
306 drvs->rx_crc_errors = port_stats->rx_crc_errors;
307 drvs->rx_control_frames = port_stats->rx_control_frames;
308 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
309 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
310 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
311 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
312 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
313 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
314 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
315 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
316 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
317 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
318 drvs->rx_dropped_header_too_small =
319 port_stats->rx_dropped_header_too_small;
320 drvs->rx_input_fifo_overflow_drop =
321 port_stats->rx_input_fifo_overflow_drop;
ac124ff9 322 drvs->rx_address_match_errors = port_stats->rx_address_match_errors;
89a88ab8
AK
323 drvs->rx_alignment_symbol_errors =
324 port_stats->rx_alignment_symbol_errors;
ac124ff9 325 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
89a88ab8
AK
326 drvs->tx_pauseframes = port_stats->tx_pauseframes;
327 drvs->tx_controlframes = port_stats->tx_controlframes;
328 drvs->jabber_events = port_stats->jabber_events;
329 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
330 drvs->rx_drops_no_txpb = rxf_stats->rx_drops_no_txpb;
331 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
332 drvs->rx_drops_invalid_ring = rxf_stats->rx_drops_invalid_ring;
333 drvs->forwarded_packets = rxf_stats->forwarded_packets;
334 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
ac124ff9
SP
335 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
336 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
89a88ab8
AK
337 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
338}
339
005d5696
SX
340static void populate_lancer_stats(struct be_adapter *adapter)
341{
89a88ab8 342
005d5696 343 struct be_drv_stats *drvs = &adapter->drv_stats;
ac124ff9
SP
344 struct lancer_pport_stats *pport_stats =
345 pport_stats_from_cmd(adapter);
346
347 be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
348 drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
349 drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
350 drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
005d5696 351 drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
ac124ff9 352 drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
005d5696
SX
353 drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
354 drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
355 drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
356 drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
357 drvs->rx_dropped_tcp_length =
358 pport_stats->rx_dropped_invalid_tcp_length;
359 drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
360 drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
361 drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
362 drvs->rx_dropped_header_too_small =
363 pport_stats->rx_dropped_header_too_small;
364 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
365 drvs->rx_address_match_errors = pport_stats->rx_address_match_errors;
ac124ff9 366 drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
005d5696 367 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
ac124ff9
SP
368 drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
369 drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
005d5696 370 drvs->jabber_events = pport_stats->rx_jabbers;
005d5696 371 drvs->rx_drops_invalid_ring = pport_stats->rx_drops_invalid_queue;
ac124ff9
SP
372 drvs->forwarded_packets = pport_stats->num_forwards_lo;
373 drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
005d5696 374 drvs->rx_drops_too_many_frags =
ac124ff9 375 pport_stats->rx_drops_too_many_frags_lo;
005d5696 376}
89a88ab8
AK
377
378void be_parse_stats(struct be_adapter *adapter)
379{
ac124ff9
SP
380 struct be_erx_stats_v1 *erx = be_erx_stats_from_cmd(adapter);
381 struct be_rx_obj *rxo;
382 int i;
383
005d5696
SX
384 if (adapter->generation == BE_GEN3) {
385 if (lancer_chip(adapter))
386 populate_lancer_stats(adapter);
387 else
388 populate_be3_stats(adapter);
389 } else {
89a88ab8 390 populate_be2_stats(adapter);
005d5696 391 }
ac124ff9
SP
392
393 /* as erx_v1 is longer than v0, ok to use v1 defn for v0 access */
394 for_all_rx_queues(adapter, rxo, i)
395 rx_stats(rxo)->rx_drops_no_frags =
396 erx->rx_drops_no_fragments[rxo->q.id];
89a88ab8
AK
397}
398
b31c50a7 399void netdev_stats_update(struct be_adapter *adapter)
6b7c5b94 400{
89a88ab8 401 struct be_drv_stats *drvs = &adapter->drv_stats;
78122a52 402 struct net_device_stats *dev_stats = &adapter->netdev->stats;
3abcdeda 403 struct be_rx_obj *rxo;
3c8def97 404 struct be_tx_obj *txo;
6e53391c 405 unsigned long pkts = 0, bytes = 0, mcast = 0, drops = 0;
3abcdeda 406 int i;
6b7c5b94 407
3abcdeda 408 for_all_rx_queues(adapter, rxo, i) {
6e53391c
SP
409 pkts += rx_stats(rxo)->rx_pkts;
410 bytes += rx_stats(rxo)->rx_bytes;
411 mcast += rx_stats(rxo)->rx_mcast_pkts;
ac124ff9 412 drops += rx_stats(rxo)->rx_drops_no_skbs;
3abcdeda 413 }
6e53391c
SP
414 dev_stats->rx_packets = pkts;
415 dev_stats->rx_bytes = bytes;
416 dev_stats->multicast = mcast;
417 dev_stats->rx_dropped = drops;
3abcdeda 418
6e53391c 419 pkts = bytes = 0;
3c8def97 420 for_all_tx_queues(adapter, txo, i) {
ac124ff9
SP
421 pkts += tx_stats(txo)->tx_pkts;
422 bytes += tx_stats(txo)->tx_bytes;
3c8def97 423 }
6e53391c
SP
424 dev_stats->tx_packets = pkts;
425 dev_stats->tx_bytes = bytes;
6b7c5b94
SP
426
427 /* bad pkts received */
89a88ab8
AK
428 dev_stats->rx_errors = drvs->rx_crc_errors +
429 drvs->rx_alignment_symbol_errors +
430 drvs->rx_in_range_errors +
431 drvs->rx_out_range_errors +
432 drvs->rx_frame_too_long +
433 drvs->rx_dropped_too_small +
434 drvs->rx_dropped_too_short +
435 drvs->rx_dropped_header_too_small +
436 drvs->rx_dropped_tcp_length +
437 drvs->rx_dropped_runt +
438 drvs->rx_tcp_checksum_errs +
439 drvs->rx_ip_checksum_errs +
440 drvs->rx_udp_checksum_errs;
68110868 441
6b7c5b94 442 /* detailed rx errors */
89a88ab8
AK
443 dev_stats->rx_length_errors = drvs->rx_in_range_errors +
444 drvs->rx_out_range_errors +
445 drvs->rx_frame_too_long;
68110868 446
89a88ab8 447 dev_stats->rx_crc_errors = drvs->rx_crc_errors;
6b7c5b94
SP
448
449 /* frame alignment errors */
89a88ab8 450 dev_stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
68110868 451
6b7c5b94
SP
452 /* receiver fifo overrun */
453 /* drops_no_pbuf is no per i/f, it's per BE card */
89a88ab8
AK
454 dev_stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
455 drvs->rx_input_fifo_overflow_drop +
456 drvs->rx_drops_no_pbuf;
6b7c5b94
SP
457}
458
8788fdc2 459void be_link_status_update(struct be_adapter *adapter, bool link_up)
6b7c5b94 460{
6b7c5b94
SP
461 struct net_device *netdev = adapter->netdev;
462
6b7c5b94 463 /* If link came up or went down */
a8f447bd 464 if (adapter->link_up != link_up) {
0dffc83e 465 adapter->link_speed = -1;
a8f447bd 466 if (link_up) {
6b7c5b94
SP
467 netif_carrier_on(netdev);
468 printk(KERN_INFO "%s: Link up\n", netdev->name);
a8f447bd 469 } else {
a8f447bd
SP
470 netif_carrier_off(netdev);
471 printk(KERN_INFO "%s: Link down\n", netdev->name);
6b7c5b94 472 }
a8f447bd 473 adapter->link_up = link_up;
6b7c5b94 474 }
6b7c5b94
SP
475}
476
3c8def97 477static void be_tx_stats_update(struct be_tx_obj *txo,
91992e44 478 u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
6b7c5b94 479{
3c8def97
SP
480 struct be_tx_stats *stats = tx_stats(txo);
481
ac124ff9
SP
482 stats->tx_reqs++;
483 stats->tx_wrbs += wrb_cnt;
484 stats->tx_bytes += copied;
485 stats->tx_pkts += (gso_segs ? gso_segs : 1);
6b7c5b94 486 if (stopped)
ac124ff9 487 stats->tx_stops++;
6b7c5b94
SP
488}
489
490/* Determine number of WRB entries needed to xmit data in an skb */
fe6d2a38
SP
491static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
492 bool *dummy)
6b7c5b94 493{
ebc8d2ab
DM
494 int cnt = (skb->len > skb->data_len);
495
496 cnt += skb_shinfo(skb)->nr_frags;
497
6b7c5b94
SP
498 /* to account for hdr wrb */
499 cnt++;
fe6d2a38
SP
500 if (lancer_chip(adapter) || !(cnt & 1)) {
501 *dummy = false;
502 } else {
6b7c5b94
SP
503 /* add a dummy to make it an even num */
504 cnt++;
505 *dummy = true;
fe6d2a38 506 }
6b7c5b94
SP
507 BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
508 return cnt;
509}
510
511static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
512{
513 wrb->frag_pa_hi = upper_32_bits(addr);
514 wrb->frag_pa_lo = addr & 0xFFFFFFFF;
515 wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
516}
517
cc4ce020
SK
518static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
519 struct sk_buff *skb, u32 wrb_cnt, u32 len)
6b7c5b94 520{
cc4ce020
SK
521 u8 vlan_prio = 0;
522 u16 vlan_tag = 0;
523
6b7c5b94
SP
524 memset(hdr, 0, sizeof(*hdr));
525
526 AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
527
49e4b847 528 if (skb_is_gso(skb)) {
6b7c5b94
SP
529 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
530 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
531 hdr, skb_shinfo(skb)->gso_size);
fe6d2a38 532 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
49e4b847 533 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
fe6d2a38
SP
534 if (lancer_chip(adapter) && adapter->sli_family ==
535 LANCER_A0_SLI_FAMILY) {
536 AMAP_SET_BITS(struct amap_eth_hdr_wrb, ipcs, hdr, 1);
537 if (is_tcp_pkt(skb))
538 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
539 tcpcs, hdr, 1);
540 else if (is_udp_pkt(skb))
541 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
542 udpcs, hdr, 1);
543 }
6b7c5b94
SP
544 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
545 if (is_tcp_pkt(skb))
546 AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
547 else if (is_udp_pkt(skb))
548 AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
549 }
550
4c5102f9 551 if (vlan_tx_tag_present(skb)) {
6b7c5b94 552 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
cc4ce020
SK
553 vlan_tag = vlan_tx_tag_get(skb);
554 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
555 /* If vlan priority provided by OS is NOT in available bmap */
556 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
557 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
558 adapter->recommended_prio;
559 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
6b7c5b94
SP
560 }
561
562 AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
563 AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, 1);
564 AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
565 AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
566}
567
2b7bcebf 568static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
7101e111
SP
569 bool unmap_single)
570{
571 dma_addr_t dma;
572
573 be_dws_le_to_cpu(wrb, sizeof(*wrb));
574
575 dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
b681ee77 576 if (wrb->frag_len) {
7101e111 577 if (unmap_single)
2b7bcebf
IV
578 dma_unmap_single(dev, dma, wrb->frag_len,
579 DMA_TO_DEVICE);
7101e111 580 else
2b7bcebf 581 dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
7101e111
SP
582 }
583}
6b7c5b94 584
3c8def97 585static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
6b7c5b94
SP
586 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb)
587{
7101e111
SP
588 dma_addr_t busaddr;
589 int i, copied = 0;
2b7bcebf 590 struct device *dev = &adapter->pdev->dev;
6b7c5b94 591 struct sk_buff *first_skb = skb;
6b7c5b94
SP
592 struct be_eth_wrb *wrb;
593 struct be_eth_hdr_wrb *hdr;
7101e111
SP
594 bool map_single = false;
595 u16 map_head;
6b7c5b94 596
6b7c5b94
SP
597 hdr = queue_head_node(txq);
598 queue_head_inc(txq);
7101e111 599 map_head = txq->head;
6b7c5b94 600
ebc8d2ab 601 if (skb->len > skb->data_len) {
e743d313 602 int len = skb_headlen(skb);
2b7bcebf
IV
603 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
604 if (dma_mapping_error(dev, busaddr))
7101e111
SP
605 goto dma_err;
606 map_single = true;
ebc8d2ab
DM
607 wrb = queue_head_node(txq);
608 wrb_fill(wrb, busaddr, len);
609 be_dws_cpu_to_le(wrb, sizeof(*wrb));
610 queue_head_inc(txq);
611 copied += len;
612 }
6b7c5b94 613
ebc8d2ab
DM
614 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
615 struct skb_frag_struct *frag =
616 &skb_shinfo(skb)->frags[i];
2b7bcebf
IV
617 busaddr = dma_map_page(dev, frag->page, frag->page_offset,
618 frag->size, DMA_TO_DEVICE);
619 if (dma_mapping_error(dev, busaddr))
7101e111 620 goto dma_err;
ebc8d2ab
DM
621 wrb = queue_head_node(txq);
622 wrb_fill(wrb, busaddr, frag->size);
623 be_dws_cpu_to_le(wrb, sizeof(*wrb));
624 queue_head_inc(txq);
625 copied += frag->size;
6b7c5b94
SP
626 }
627
628 if (dummy_wrb) {
629 wrb = queue_head_node(txq);
630 wrb_fill(wrb, 0, 0);
631 be_dws_cpu_to_le(wrb, sizeof(*wrb));
632 queue_head_inc(txq);
633 }
634
cc4ce020 635 wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied);
6b7c5b94
SP
636 be_dws_cpu_to_le(hdr, sizeof(*hdr));
637
638 return copied;
7101e111
SP
639dma_err:
640 txq->head = map_head;
641 while (copied) {
642 wrb = queue_head_node(txq);
2b7bcebf 643 unmap_tx_frag(dev, wrb, map_single);
7101e111
SP
644 map_single = false;
645 copied -= wrb->frag_len;
646 queue_head_inc(txq);
647 }
648 return 0;
6b7c5b94
SP
649}
650
61357325 651static netdev_tx_t be_xmit(struct sk_buff *skb,
b31c50a7 652 struct net_device *netdev)
6b7c5b94
SP
653{
654 struct be_adapter *adapter = netdev_priv(netdev);
3c8def97
SP
655 struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
656 struct be_queue_info *txq = &txo->q;
6b7c5b94
SP
657 u32 wrb_cnt = 0, copied = 0;
658 u32 start = txq->head;
659 bool dummy_wrb, stopped = false;
660
fe6d2a38 661 wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
6b7c5b94 662
3c8def97 663 copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb);
c190e3c8
AK
664 if (copied) {
665 /* record the sent skb in the sent_skb table */
3c8def97
SP
666 BUG_ON(txo->sent_skb_list[start]);
667 txo->sent_skb_list[start] = skb;
c190e3c8
AK
668
669 /* Ensure txq has space for the next skb; Else stop the queue
670 * *BEFORE* ringing the tx doorbell, so that we serialze the
671 * tx compls of the current transmit which'll wake up the queue
672 */
7101e111 673 atomic_add(wrb_cnt, &txq->used);
c190e3c8
AK
674 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
675 txq->len) {
3c8def97 676 netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
c190e3c8
AK
677 stopped = true;
678 }
6b7c5b94 679
c190e3c8 680 be_txq_notify(adapter, txq->id, wrb_cnt);
6b7c5b94 681
3c8def97 682 be_tx_stats_update(txo, wrb_cnt, copied,
91992e44 683 skb_shinfo(skb)->gso_segs, stopped);
c190e3c8
AK
684 } else {
685 txq->head = start;
686 dev_kfree_skb_any(skb);
6b7c5b94 687 }
6b7c5b94
SP
688 return NETDEV_TX_OK;
689}
690
691static int be_change_mtu(struct net_device *netdev, int new_mtu)
692{
693 struct be_adapter *adapter = netdev_priv(netdev);
694 if (new_mtu < BE_MIN_MTU ||
34a89b8c
AK
695 new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
696 (ETH_HLEN + ETH_FCS_LEN))) {
6b7c5b94
SP
697 dev_info(&adapter->pdev->dev,
698 "MTU must be between %d and %d bytes\n",
34a89b8c
AK
699 BE_MIN_MTU,
700 (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
6b7c5b94
SP
701 return -EINVAL;
702 }
703 dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
704 netdev->mtu, new_mtu);
705 netdev->mtu = new_mtu;
706 return 0;
707}
708
709/*
82903e4b
AK
710 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
711 * If the user configures more, place BE in vlan promiscuous mode.
6b7c5b94 712 */
1da87b7f 713static int be_vid_config(struct be_adapter *adapter, bool vf, u32 vf_num)
6b7c5b94 714{
6b7c5b94
SP
715 u16 vtag[BE_NUM_VLANS_SUPPORTED];
716 u16 ntags = 0, i;
82903e4b 717 int status = 0;
1da87b7f
AK
718 u32 if_handle;
719
720 if (vf) {
721 if_handle = adapter->vf_cfg[vf_num].vf_if_handle;
722 vtag[0] = cpu_to_le16(adapter->vf_cfg[vf_num].vf_vlan_tag);
723 status = be_cmd_vlan_config(adapter, if_handle, vtag, 1, 1, 0);
724 }
6b7c5b94 725
82903e4b 726 if (adapter->vlans_added <= adapter->max_vlans) {
6b7c5b94 727 /* Construct VLAN Table to give to HW */
b738127d 728 for (i = 0; i < VLAN_N_VID; i++) {
6b7c5b94
SP
729 if (adapter->vlan_tag[i]) {
730 vtag[ntags] = cpu_to_le16(i);
731 ntags++;
732 }
733 }
b31c50a7
SP
734 status = be_cmd_vlan_config(adapter, adapter->if_handle,
735 vtag, ntags, 1, 0);
6b7c5b94 736 } else {
b31c50a7
SP
737 status = be_cmd_vlan_config(adapter, adapter->if_handle,
738 NULL, 0, 1, 1);
6b7c5b94 739 }
1da87b7f 740
b31c50a7 741 return status;
6b7c5b94
SP
742}
743
6b7c5b94
SP
744static void be_vlan_add_vid(struct net_device *netdev, u16 vid)
745{
746 struct be_adapter *adapter = netdev_priv(netdev);
747
1da87b7f 748 adapter->vlans_added++;
ba343c77
SB
749 if (!be_physfn(adapter))
750 return;
751
6b7c5b94 752 adapter->vlan_tag[vid] = 1;
82903e4b 753 if (adapter->vlans_added <= (adapter->max_vlans + 1))
1da87b7f 754 be_vid_config(adapter, false, 0);
6b7c5b94
SP
755}
756
757static void be_vlan_rem_vid(struct net_device *netdev, u16 vid)
758{
759 struct be_adapter *adapter = netdev_priv(netdev);
760
1da87b7f 761 adapter->vlans_added--;
1da87b7f 762
ba343c77
SB
763 if (!be_physfn(adapter))
764 return;
765
6b7c5b94 766 adapter->vlan_tag[vid] = 0;
82903e4b 767 if (adapter->vlans_added <= adapter->max_vlans)
1da87b7f 768 be_vid_config(adapter, false, 0);
6b7c5b94
SP
769}
770
24307eef 771static void be_set_multicast_list(struct net_device *netdev)
6b7c5b94
SP
772{
773 struct be_adapter *adapter = netdev_priv(netdev);
6b7c5b94 774
24307eef 775 if (netdev->flags & IFF_PROMISC) {
ecd0bf0f 776 be_cmd_promiscuous_config(adapter, true);
24307eef
SP
777 adapter->promiscuous = true;
778 goto done;
6b7c5b94
SP
779 }
780
25985edc 781 /* BE was previously in promiscuous mode; disable it */
24307eef
SP
782 if (adapter->promiscuous) {
783 adapter->promiscuous = false;
ecd0bf0f 784 be_cmd_promiscuous_config(adapter, false);
6b7c5b94
SP
785 }
786
e7b909a6 787 /* Enable multicast promisc if num configured exceeds what we support */
4cd24eaf
JP
788 if (netdev->flags & IFF_ALLMULTI ||
789 netdev_mc_count(netdev) > BE_MAX_MC) {
0ddf477b 790 be_cmd_multicast_set(adapter, adapter->if_handle, NULL,
e7b909a6 791 &adapter->mc_cmd_mem);
24307eef 792 goto done;
6b7c5b94 793 }
6b7c5b94 794
0ddf477b 795 be_cmd_multicast_set(adapter, adapter->if_handle, netdev,
f31e50a8 796 &adapter->mc_cmd_mem);
24307eef
SP
797done:
798 return;
6b7c5b94
SP
799}
800
ba343c77
SB
801static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
802{
803 struct be_adapter *adapter = netdev_priv(netdev);
804 int status;
805
806 if (!adapter->sriov_enabled)
807 return -EPERM;
808
809 if (!is_valid_ether_addr(mac) || (vf >= num_vfs))
810 return -EINVAL;
811
64600ea5
AK
812 if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID)
813 status = be_cmd_pmac_del(adapter,
814 adapter->vf_cfg[vf].vf_if_handle,
f8617e08 815 adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
ba343c77 816
64600ea5
AK
817 status = be_cmd_pmac_add(adapter, mac,
818 adapter->vf_cfg[vf].vf_if_handle,
f8617e08 819 &adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
64600ea5
AK
820
821 if (status)
ba343c77
SB
822 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
823 mac, vf);
64600ea5
AK
824 else
825 memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
826
ba343c77
SB
827 return status;
828}
829
64600ea5
AK
830static int be_get_vf_config(struct net_device *netdev, int vf,
831 struct ifla_vf_info *vi)
832{
833 struct be_adapter *adapter = netdev_priv(netdev);
834
835 if (!adapter->sriov_enabled)
836 return -EPERM;
837
838 if (vf >= num_vfs)
839 return -EINVAL;
840
841 vi->vf = vf;
e1d18735 842 vi->tx_rate = adapter->vf_cfg[vf].vf_tx_rate;
1da87b7f 843 vi->vlan = adapter->vf_cfg[vf].vf_vlan_tag;
64600ea5
AK
844 vi->qos = 0;
845 memcpy(&vi->mac, adapter->vf_cfg[vf].vf_mac_addr, ETH_ALEN);
846
847 return 0;
848}
849
1da87b7f
AK
850static int be_set_vf_vlan(struct net_device *netdev,
851 int vf, u16 vlan, u8 qos)
852{
853 struct be_adapter *adapter = netdev_priv(netdev);
854 int status = 0;
855
856 if (!adapter->sriov_enabled)
857 return -EPERM;
858
859 if ((vf >= num_vfs) || (vlan > 4095))
860 return -EINVAL;
861
862 if (vlan) {
863 adapter->vf_cfg[vf].vf_vlan_tag = vlan;
864 adapter->vlans_added++;
865 } else {
866 adapter->vf_cfg[vf].vf_vlan_tag = 0;
867 adapter->vlans_added--;
868 }
869
870 status = be_vid_config(adapter, true, vf);
871
872 if (status)
873 dev_info(&adapter->pdev->dev,
874 "VLAN %d config on VF %d failed\n", vlan, vf);
875 return status;
876}
877
e1d18735
AK
878static int be_set_vf_tx_rate(struct net_device *netdev,
879 int vf, int rate)
880{
881 struct be_adapter *adapter = netdev_priv(netdev);
882 int status = 0;
883
884 if (!adapter->sriov_enabled)
885 return -EPERM;
886
887 if ((vf >= num_vfs) || (rate < 0))
888 return -EINVAL;
889
890 if (rate > 10000)
891 rate = 10000;
892
893 adapter->vf_cfg[vf].vf_tx_rate = rate;
856c4012 894 status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
e1d18735
AK
895
896 if (status)
897 dev_info(&adapter->pdev->dev,
898 "tx rate %d on VF %d failed\n", rate, vf);
899 return status;
900}
901
ac124ff9 902static void be_rx_eqd_update(struct be_adapter *adapter, struct be_rx_obj *rxo)
6b7c5b94 903{
ac124ff9
SP
904 struct be_eq_obj *rx_eq = &rxo->rx_eq;
905 struct be_rx_stats *stats = rx_stats(rxo);
4097f663 906 ulong now = jiffies;
ac124ff9
SP
907 ulong delta = now - stats->rx_jiffies;
908 u32 eqd;
909
910 if (!rx_eq->enable_aic)
911 return;
6b7c5b94 912
4097f663 913 /* Wrapped around */
3abcdeda
SP
914 if (time_before(now, stats->rx_jiffies)) {
915 stats->rx_jiffies = now;
4097f663
SP
916 return;
917 }
6b7c5b94 918
ac124ff9
SP
919 /* Update once a second */
920 if (delta < HZ)
6b7c5b94
SP
921 return;
922
ac124ff9
SP
923 stats->rx_pps = (stats->rx_pkts - stats->rx_pkts_prev) / (delta / HZ);
924 stats->rx_pkts_prev = stats->rx_pkts;
3abcdeda 925 stats->rx_jiffies = now;
ac124ff9
SP
926 eqd = stats->rx_pps / 110000;
927 eqd = eqd << 3;
928 if (eqd > rx_eq->max_eqd)
929 eqd = rx_eq->max_eqd;
930 if (eqd < rx_eq->min_eqd)
931 eqd = rx_eq->min_eqd;
932 if (eqd < 10)
933 eqd = 0;
934 if (eqd != rx_eq->cur_eqd) {
935 be_cmd_modify_eqd(adapter, rx_eq->q.id, eqd);
936 rx_eq->cur_eqd = eqd;
937 }
6b7c5b94
SP
938}
939
3abcdeda 940static void be_rx_stats_update(struct be_rx_obj *rxo,
2e588f84 941 struct be_rx_compl_info *rxcp)
4097f663 942{
ac124ff9 943 struct be_rx_stats *stats = rx_stats(rxo);
1ef78abe 944
3abcdeda 945 stats->rx_compl++;
2e588f84 946 stats->rx_bytes += rxcp->pkt_size;
3abcdeda 947 stats->rx_pkts++;
2e588f84 948 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
3abcdeda 949 stats->rx_mcast_pkts++;
2e588f84 950 if (rxcp->err)
ac124ff9 951 stats->rx_compl_err++;
4097f663
SP
952}
953
2e588f84 954static inline bool csum_passed(struct be_rx_compl_info *rxcp)
728a9972 955{
19fad86f
PR
956 /* L4 checksum is not reliable for non TCP/UDP packets.
957 * Also ignore ipcksm for ipv6 pkts */
2e588f84
SP
958 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
959 (rxcp->ip_csum || rxcp->ipv6);
728a9972
AK
960}
961
6b7c5b94 962static struct be_rx_page_info *
3abcdeda
SP
963get_rx_page_info(struct be_adapter *adapter,
964 struct be_rx_obj *rxo,
965 u16 frag_idx)
6b7c5b94
SP
966{
967 struct be_rx_page_info *rx_page_info;
3abcdeda 968 struct be_queue_info *rxq = &rxo->q;
6b7c5b94 969
3abcdeda 970 rx_page_info = &rxo->page_info_tbl[frag_idx];
6b7c5b94
SP
971 BUG_ON(!rx_page_info->page);
972
205859a2 973 if (rx_page_info->last_page_user) {
2b7bcebf
IV
974 dma_unmap_page(&adapter->pdev->dev,
975 dma_unmap_addr(rx_page_info, bus),
976 adapter->big_page_size, DMA_FROM_DEVICE);
205859a2
AK
977 rx_page_info->last_page_user = false;
978 }
6b7c5b94
SP
979
980 atomic_dec(&rxq->used);
981 return rx_page_info;
982}
983
984/* Throwaway the data in the Rx completion */
985static void be_rx_compl_discard(struct be_adapter *adapter,
3abcdeda 986 struct be_rx_obj *rxo,
2e588f84 987 struct be_rx_compl_info *rxcp)
6b7c5b94 988{
3abcdeda 989 struct be_queue_info *rxq = &rxo->q;
6b7c5b94 990 struct be_rx_page_info *page_info;
2e588f84 991 u16 i, num_rcvd = rxcp->num_rcvd;
6b7c5b94 992
e80d9da6 993 for (i = 0; i < num_rcvd; i++) {
2e588f84 994 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
e80d9da6
PR
995 put_page(page_info->page);
996 memset(page_info, 0, sizeof(*page_info));
2e588f84 997 index_inc(&rxcp->rxq_idx, rxq->len);
6b7c5b94
SP
998 }
999}
1000
1001/*
1002 * skb_fill_rx_data forms a complete skb for an ether frame
1003 * indicated by rxcp.
1004 */
3abcdeda 1005static void skb_fill_rx_data(struct be_adapter *adapter, struct be_rx_obj *rxo,
2e588f84 1006 struct sk_buff *skb, struct be_rx_compl_info *rxcp)
6b7c5b94 1007{
3abcdeda 1008 struct be_queue_info *rxq = &rxo->q;
6b7c5b94 1009 struct be_rx_page_info *page_info;
2e588f84
SP
1010 u16 i, j;
1011 u16 hdr_len, curr_frag_len, remaining;
6b7c5b94 1012 u8 *start;
6b7c5b94 1013
2e588f84 1014 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
6b7c5b94
SP
1015 start = page_address(page_info->page) + page_info->page_offset;
1016 prefetch(start);
1017
1018 /* Copy data in the first descriptor of this completion */
2e588f84 1019 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
6b7c5b94
SP
1020
1021 /* Copy the header portion into skb_data */
2e588f84 1022 hdr_len = min(BE_HDR_LEN, curr_frag_len);
6b7c5b94
SP
1023 memcpy(skb->data, start, hdr_len);
1024 skb->len = curr_frag_len;
1025 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
1026 /* Complete packet has now been moved to data */
1027 put_page(page_info->page);
1028 skb->data_len = 0;
1029 skb->tail += curr_frag_len;
1030 } else {
1031 skb_shinfo(skb)->nr_frags = 1;
1032 skb_shinfo(skb)->frags[0].page = page_info->page;
1033 skb_shinfo(skb)->frags[0].page_offset =
1034 page_info->page_offset + hdr_len;
1035 skb_shinfo(skb)->frags[0].size = curr_frag_len - hdr_len;
1036 skb->data_len = curr_frag_len - hdr_len;
1037 skb->tail += hdr_len;
1038 }
205859a2 1039 page_info->page = NULL;
6b7c5b94 1040
2e588f84
SP
1041 if (rxcp->pkt_size <= rx_frag_size) {
1042 BUG_ON(rxcp->num_rcvd != 1);
1043 return;
6b7c5b94
SP
1044 }
1045
1046 /* More frags present for this completion */
2e588f84
SP
1047 index_inc(&rxcp->rxq_idx, rxq->len);
1048 remaining = rxcp->pkt_size - curr_frag_len;
1049 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
1050 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
1051 curr_frag_len = min(remaining, rx_frag_size);
6b7c5b94 1052
bd46cb6c
AK
1053 /* Coalesce all frags from the same physical page in one slot */
1054 if (page_info->page_offset == 0) {
1055 /* Fresh page */
1056 j++;
1057 skb_shinfo(skb)->frags[j].page = page_info->page;
1058 skb_shinfo(skb)->frags[j].page_offset =
1059 page_info->page_offset;
1060 skb_shinfo(skb)->frags[j].size = 0;
1061 skb_shinfo(skb)->nr_frags++;
1062 } else {
1063 put_page(page_info->page);
1064 }
1065
1066 skb_shinfo(skb)->frags[j].size += curr_frag_len;
6b7c5b94
SP
1067 skb->len += curr_frag_len;
1068 skb->data_len += curr_frag_len;
6b7c5b94 1069
2e588f84
SP
1070 remaining -= curr_frag_len;
1071 index_inc(&rxcp->rxq_idx, rxq->len);
205859a2 1072 page_info->page = NULL;
6b7c5b94 1073 }
bd46cb6c 1074 BUG_ON(j > MAX_SKB_FRAGS);
6b7c5b94
SP
1075}
1076
5be93b9a 1077/* Process the RX completion indicated by rxcp when GRO is disabled */
6b7c5b94 1078static void be_rx_compl_process(struct be_adapter *adapter,
3abcdeda 1079 struct be_rx_obj *rxo,
2e588f84 1080 struct be_rx_compl_info *rxcp)
6b7c5b94 1081{
6332c8d3 1082 struct net_device *netdev = adapter->netdev;
6b7c5b94 1083 struct sk_buff *skb;
89420424 1084
6332c8d3 1085 skb = netdev_alloc_skb_ip_align(netdev, BE_HDR_LEN);
a058a632 1086 if (unlikely(!skb)) {
ac124ff9 1087 rx_stats(rxo)->rx_drops_no_skbs++;
3abcdeda 1088 be_rx_compl_discard(adapter, rxo, rxcp);
6b7c5b94
SP
1089 return;
1090 }
1091
2e588f84 1092 skb_fill_rx_data(adapter, rxo, skb, rxcp);
6b7c5b94 1093
6332c8d3 1094 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
728a9972 1095 skb->ip_summed = CHECKSUM_UNNECESSARY;
c6ce2f4b
SK
1096 else
1097 skb_checksum_none_assert(skb);
6b7c5b94
SP
1098
1099 skb->truesize = skb->len + sizeof(struct sk_buff);
6332c8d3 1100 skb->protocol = eth_type_trans(skb, netdev);
4b972914
AK
1101 if (adapter->netdev->features & NETIF_F_RXHASH)
1102 skb->rxhash = rxcp->rss_hash;
1103
6b7c5b94 1104
4c5102f9
AK
1105 if (unlikely(rxcp->vlanf))
1106 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1107
1108 netif_receive_skb(skb);
6b7c5b94
SP
1109}
1110
5be93b9a
AK
1111/* Process the RX completion indicated by rxcp when GRO is enabled */
1112static void be_rx_compl_process_gro(struct be_adapter *adapter,
3abcdeda 1113 struct be_rx_obj *rxo,
2e588f84 1114 struct be_rx_compl_info *rxcp)
6b7c5b94
SP
1115{
1116 struct be_rx_page_info *page_info;
5be93b9a 1117 struct sk_buff *skb = NULL;
3abcdeda
SP
1118 struct be_queue_info *rxq = &rxo->q;
1119 struct be_eq_obj *eq_obj = &rxo->rx_eq;
2e588f84
SP
1120 u16 remaining, curr_frag_len;
1121 u16 i, j;
3968fa1e 1122
5be93b9a
AK
1123 skb = napi_get_frags(&eq_obj->napi);
1124 if (!skb) {
3abcdeda 1125 be_rx_compl_discard(adapter, rxo, rxcp);
5be93b9a
AK
1126 return;
1127 }
1128
2e588f84
SP
1129 remaining = rxcp->pkt_size;
1130 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
1131 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
6b7c5b94
SP
1132
1133 curr_frag_len = min(remaining, rx_frag_size);
1134
bd46cb6c
AK
1135 /* Coalesce all frags from the same physical page in one slot */
1136 if (i == 0 || page_info->page_offset == 0) {
1137 /* First frag or Fresh page */
1138 j++;
5be93b9a
AK
1139 skb_shinfo(skb)->frags[j].page = page_info->page;
1140 skb_shinfo(skb)->frags[j].page_offset =
1141 page_info->page_offset;
1142 skb_shinfo(skb)->frags[j].size = 0;
bd46cb6c
AK
1143 } else {
1144 put_page(page_info->page);
1145 }
5be93b9a 1146 skb_shinfo(skb)->frags[j].size += curr_frag_len;
6b7c5b94 1147
bd46cb6c 1148 remaining -= curr_frag_len;
2e588f84 1149 index_inc(&rxcp->rxq_idx, rxq->len);
6b7c5b94
SP
1150 memset(page_info, 0, sizeof(*page_info));
1151 }
bd46cb6c 1152 BUG_ON(j > MAX_SKB_FRAGS);
6b7c5b94 1153
5be93b9a 1154 skb_shinfo(skb)->nr_frags = j + 1;
2e588f84
SP
1155 skb->len = rxcp->pkt_size;
1156 skb->data_len = rxcp->pkt_size;
1157 skb->truesize += rxcp->pkt_size;
5be93b9a 1158 skb->ip_summed = CHECKSUM_UNNECESSARY;
4b972914
AK
1159 if (adapter->netdev->features & NETIF_F_RXHASH)
1160 skb->rxhash = rxcp->rss_hash;
5be93b9a 1161
4c5102f9
AK
1162 if (unlikely(rxcp->vlanf))
1163 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1164
1165 napi_gro_frags(&eq_obj->napi);
2e588f84
SP
1166}
1167
1168static void be_parse_rx_compl_v1(struct be_adapter *adapter,
1169 struct be_eth_rx_compl *compl,
1170 struct be_rx_compl_info *rxcp)
1171{
1172 rxcp->pkt_size =
1173 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1174 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1175 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1176 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
9ecb42fd 1177 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
2e588f84
SP
1178 rxcp->ip_csum =
1179 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1180 rxcp->l4_csum =
1181 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1182 rxcp->ipv6 =
1183 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
1184 rxcp->rxq_idx =
1185 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, fragndx, compl);
1186 rxcp->num_rcvd =
1187 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1188 rxcp->pkt_type =
1189 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
4b972914
AK
1190 rxcp->rss_hash =
1191 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, rxcp);
15d72184
SP
1192 if (rxcp->vlanf) {
1193 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm,
3c709f8f
DM
1194 compl);
1195 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
1196 compl);
15d72184 1197 }
2e588f84
SP
1198}
1199
1200static void be_parse_rx_compl_v0(struct be_adapter *adapter,
1201 struct be_eth_rx_compl *compl,
1202 struct be_rx_compl_info *rxcp)
1203{
1204 rxcp->pkt_size =
1205 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1206 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1207 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1208 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
9ecb42fd 1209 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
2e588f84
SP
1210 rxcp->ip_csum =
1211 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1212 rxcp->l4_csum =
1213 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1214 rxcp->ipv6 =
1215 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
1216 rxcp->rxq_idx =
1217 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, fragndx, compl);
1218 rxcp->num_rcvd =
1219 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1220 rxcp->pkt_type =
1221 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
4b972914
AK
1222 rxcp->rss_hash =
1223 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, rxcp);
15d72184
SP
1224 if (rxcp->vlanf) {
1225 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm,
3c709f8f
DM
1226 compl);
1227 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
1228 compl);
15d72184 1229 }
2e588f84
SP
1230}
1231
1232static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1233{
1234 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1235 struct be_rx_compl_info *rxcp = &rxo->rxcp;
1236 struct be_adapter *adapter = rxo->adapter;
6b7c5b94 1237
2e588f84
SP
1238 /* For checking the valid bit it is Ok to use either definition as the
1239 * valid bit is at the same position in both v0 and v1 Rx compl */
1240 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
1241 return NULL;
6b7c5b94 1242
2e588f84
SP
1243 rmb();
1244 be_dws_le_to_cpu(compl, sizeof(*compl));
6b7c5b94 1245
2e588f84
SP
1246 if (adapter->be3_native)
1247 be_parse_rx_compl_v1(adapter, compl, rxcp);
1248 else
1249 be_parse_rx_compl_v0(adapter, compl, rxcp);
6b7c5b94 1250
15d72184
SP
1251 if (rxcp->vlanf) {
1252 /* vlanf could be wrongly set in some cards.
1253 * ignore if vtm is not set */
1254 if ((adapter->function_mode & 0x400) && !rxcp->vtm)
1255 rxcp->vlanf = 0;
6b7c5b94 1256
15d72184 1257 if (!lancer_chip(adapter))
3c709f8f 1258 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
6b7c5b94 1259
3c709f8f
DM
1260 if (((adapter->pvid & VLAN_VID_MASK) ==
1261 (rxcp->vlan_tag & VLAN_VID_MASK)) &&
1262 !adapter->vlan_tag[rxcp->vlan_tag])
15d72184
SP
1263 rxcp->vlanf = 0;
1264 }
2e588f84
SP
1265
1266 /* As the compl has been parsed, reset it; we wont touch it again */
1267 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
6b7c5b94 1268
3abcdeda 1269 queue_tail_inc(&rxo->cq);
6b7c5b94
SP
1270 return rxcp;
1271}
1272
1829b086 1273static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
6b7c5b94 1274{
6b7c5b94 1275 u32 order = get_order(size);
1829b086 1276
6b7c5b94 1277 if (order > 0)
1829b086
ED
1278 gfp |= __GFP_COMP;
1279 return alloc_pages(gfp, order);
6b7c5b94
SP
1280}
1281
1282/*
1283 * Allocate a page, split it to fragments of size rx_frag_size and post as
1284 * receive buffers to BE
1285 */
1829b086 1286static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
6b7c5b94 1287{
3abcdeda
SP
1288 struct be_adapter *adapter = rxo->adapter;
1289 struct be_rx_page_info *page_info_tbl = rxo->page_info_tbl;
26d92f92 1290 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
3abcdeda 1291 struct be_queue_info *rxq = &rxo->q;
6b7c5b94
SP
1292 struct page *pagep = NULL;
1293 struct be_eth_rx_d *rxd;
1294 u64 page_dmaaddr = 0, frag_dmaaddr;
1295 u32 posted, page_offset = 0;
1296
3abcdeda 1297 page_info = &rxo->page_info_tbl[rxq->head];
6b7c5b94
SP
1298 for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1299 if (!pagep) {
1829b086 1300 pagep = be_alloc_pages(adapter->big_page_size, gfp);
6b7c5b94 1301 if (unlikely(!pagep)) {
ac124ff9 1302 rx_stats(rxo)->rx_post_fail++;
6b7c5b94
SP
1303 break;
1304 }
2b7bcebf
IV
1305 page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
1306 0, adapter->big_page_size,
1307 DMA_FROM_DEVICE);
6b7c5b94
SP
1308 page_info->page_offset = 0;
1309 } else {
1310 get_page(pagep);
1311 page_info->page_offset = page_offset + rx_frag_size;
1312 }
1313 page_offset = page_info->page_offset;
1314 page_info->page = pagep;
fac6da5b 1315 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
6b7c5b94
SP
1316 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1317
1318 rxd = queue_head_node(rxq);
1319 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1320 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
6b7c5b94
SP
1321
1322 /* Any space left in the current big page for another frag? */
1323 if ((page_offset + rx_frag_size + rx_frag_size) >
1324 adapter->big_page_size) {
1325 pagep = NULL;
1326 page_info->last_page_user = true;
1327 }
26d92f92
SP
1328
1329 prev_page_info = page_info;
1330 queue_head_inc(rxq);
6b7c5b94
SP
1331 page_info = &page_info_tbl[rxq->head];
1332 }
1333 if (pagep)
26d92f92 1334 prev_page_info->last_page_user = true;
6b7c5b94
SP
1335
1336 if (posted) {
6b7c5b94 1337 atomic_add(posted, &rxq->used);
8788fdc2 1338 be_rxq_notify(adapter, rxq->id, posted);
ea1dae11
SP
1339 } else if (atomic_read(&rxq->used) == 0) {
1340 /* Let be_worker replenish when memory is available */
3abcdeda 1341 rxo->rx_post_starved = true;
6b7c5b94 1342 }
6b7c5b94
SP
1343}
1344
5fb379ee 1345static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
6b7c5b94 1346{
6b7c5b94
SP
1347 struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1348
1349 if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1350 return NULL;
1351
f3eb62d2 1352 rmb();
6b7c5b94
SP
1353 be_dws_le_to_cpu(txcp, sizeof(*txcp));
1354
1355 txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1356
1357 queue_tail_inc(tx_cq);
1358 return txcp;
1359}
1360
3c8def97
SP
1361static u16 be_tx_compl_process(struct be_adapter *adapter,
1362 struct be_tx_obj *txo, u16 last_index)
6b7c5b94 1363{
3c8def97 1364 struct be_queue_info *txq = &txo->q;
a73b796e 1365 struct be_eth_wrb *wrb;
3c8def97 1366 struct sk_buff **sent_skbs = txo->sent_skb_list;
6b7c5b94 1367 struct sk_buff *sent_skb;
ec43b1a6
SP
1368 u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1369 bool unmap_skb_hdr = true;
6b7c5b94 1370
ec43b1a6 1371 sent_skb = sent_skbs[txq->tail];
6b7c5b94 1372 BUG_ON(!sent_skb);
ec43b1a6
SP
1373 sent_skbs[txq->tail] = NULL;
1374
1375 /* skip header wrb */
a73b796e 1376 queue_tail_inc(txq);
6b7c5b94 1377
ec43b1a6 1378 do {
6b7c5b94 1379 cur_index = txq->tail;
a73b796e 1380 wrb = queue_tail_node(txq);
2b7bcebf
IV
1381 unmap_tx_frag(&adapter->pdev->dev, wrb,
1382 (unmap_skb_hdr && skb_headlen(sent_skb)));
ec43b1a6
SP
1383 unmap_skb_hdr = false;
1384
6b7c5b94
SP
1385 num_wrbs++;
1386 queue_tail_inc(txq);
ec43b1a6 1387 } while (cur_index != last_index);
6b7c5b94 1388
6b7c5b94 1389 kfree_skb(sent_skb);
4d586b82 1390 return num_wrbs;
6b7c5b94
SP
1391}
1392
859b1e4e
SP
1393static inline struct be_eq_entry *event_get(struct be_eq_obj *eq_obj)
1394{
1395 struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
1396
1397 if (!eqe->evt)
1398 return NULL;
1399
f3eb62d2 1400 rmb();
859b1e4e
SP
1401 eqe->evt = le32_to_cpu(eqe->evt);
1402 queue_tail_inc(&eq_obj->q);
1403 return eqe;
1404}
1405
1406static int event_handle(struct be_adapter *adapter,
3c8def97
SP
1407 struct be_eq_obj *eq_obj,
1408 bool rearm)
859b1e4e
SP
1409{
1410 struct be_eq_entry *eqe;
1411 u16 num = 0;
1412
1413 while ((eqe = event_get(eq_obj)) != NULL) {
1414 eqe->evt = 0;
1415 num++;
1416 }
1417
1418 /* Deal with any spurious interrupts that come
1419 * without events
1420 */
3c8def97
SP
1421 if (!num)
1422 rearm = true;
1423
1424 be_eq_notify(adapter, eq_obj->q.id, rearm, true, num);
859b1e4e
SP
1425 if (num)
1426 napi_schedule(&eq_obj->napi);
1427
1428 return num;
1429}
1430
1431/* Just read and notify events without processing them.
1432 * Used at the time of destroying event queues */
1433static void be_eq_clean(struct be_adapter *adapter,
1434 struct be_eq_obj *eq_obj)
1435{
1436 struct be_eq_entry *eqe;
1437 u16 num = 0;
1438
1439 while ((eqe = event_get(eq_obj)) != NULL) {
1440 eqe->evt = 0;
1441 num++;
1442 }
1443
1444 if (num)
1445 be_eq_notify(adapter, eq_obj->q.id, false, true, num);
1446}
1447
3abcdeda 1448static void be_rx_q_clean(struct be_adapter *adapter, struct be_rx_obj *rxo)
6b7c5b94
SP
1449{
1450 struct be_rx_page_info *page_info;
3abcdeda
SP
1451 struct be_queue_info *rxq = &rxo->q;
1452 struct be_queue_info *rx_cq = &rxo->cq;
2e588f84 1453 struct be_rx_compl_info *rxcp;
6b7c5b94
SP
1454 u16 tail;
1455
1456 /* First cleanup pending rx completions */
3abcdeda
SP
1457 while ((rxcp = be_rx_compl_get(rxo)) != NULL) {
1458 be_rx_compl_discard(adapter, rxo, rxcp);
64642811 1459 be_cq_notify(adapter, rx_cq->id, false, 1);
6b7c5b94
SP
1460 }
1461
1462 /* Then free posted rx buffer that were not used */
1463 tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
cdab23b7 1464 for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
3abcdeda 1465 page_info = get_rx_page_info(adapter, rxo, tail);
6b7c5b94
SP
1466 put_page(page_info->page);
1467 memset(page_info, 0, sizeof(*page_info));
1468 }
1469 BUG_ON(atomic_read(&rxq->used));
482c9e79 1470 rxq->tail = rxq->head = 0;
6b7c5b94
SP
1471}
1472
3c8def97
SP
1473static void be_tx_compl_clean(struct be_adapter *adapter,
1474 struct be_tx_obj *txo)
6b7c5b94 1475{
3c8def97
SP
1476 struct be_queue_info *tx_cq = &txo->cq;
1477 struct be_queue_info *txq = &txo->q;
a8e9179a 1478 struct be_eth_tx_compl *txcp;
4d586b82 1479 u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
3c8def97 1480 struct sk_buff **sent_skbs = txo->sent_skb_list;
b03388d6
SP
1481 struct sk_buff *sent_skb;
1482 bool dummy_wrb;
a8e9179a
SP
1483
1484 /* Wait for a max of 200ms for all the tx-completions to arrive. */
1485 do {
1486 while ((txcp = be_tx_compl_get(tx_cq))) {
1487 end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl,
1488 wrb_index, txcp);
3c8def97 1489 num_wrbs += be_tx_compl_process(adapter, txo, end_idx);
a8e9179a
SP
1490 cmpl++;
1491 }
1492 if (cmpl) {
1493 be_cq_notify(adapter, tx_cq->id, false, cmpl);
4d586b82 1494 atomic_sub(num_wrbs, &txq->used);
a8e9179a 1495 cmpl = 0;
4d586b82 1496 num_wrbs = 0;
a8e9179a
SP
1497 }
1498
1499 if (atomic_read(&txq->used) == 0 || ++timeo > 200)
1500 break;
1501
1502 mdelay(1);
1503 } while (true);
1504
1505 if (atomic_read(&txq->used))
1506 dev_err(&adapter->pdev->dev, "%d pending tx-completions\n",
1507 atomic_read(&txq->used));
b03388d6
SP
1508
1509 /* free posted tx for which compls will never arrive */
1510 while (atomic_read(&txq->used)) {
1511 sent_skb = sent_skbs[txq->tail];
1512 end_idx = txq->tail;
1513 index_adv(&end_idx,
fe6d2a38
SP
1514 wrb_cnt_for_skb(adapter, sent_skb, &dummy_wrb) - 1,
1515 txq->len);
3c8def97 1516 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
4d586b82 1517 atomic_sub(num_wrbs, &txq->used);
b03388d6 1518 }
6b7c5b94
SP
1519}
1520
5fb379ee
SP
1521static void be_mcc_queues_destroy(struct be_adapter *adapter)
1522{
1523 struct be_queue_info *q;
5fb379ee 1524
8788fdc2 1525 q = &adapter->mcc_obj.q;
5fb379ee 1526 if (q->created)
8788fdc2 1527 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
5fb379ee
SP
1528 be_queue_free(adapter, q);
1529
8788fdc2 1530 q = &adapter->mcc_obj.cq;
5fb379ee 1531 if (q->created)
8788fdc2 1532 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
5fb379ee
SP
1533 be_queue_free(adapter, q);
1534}
1535
1536/* Must be called only after TX qs are created as MCC shares TX EQ */
1537static int be_mcc_queues_create(struct be_adapter *adapter)
1538{
1539 struct be_queue_info *q, *cq;
5fb379ee
SP
1540
1541 /* Alloc MCC compl queue */
8788fdc2 1542 cq = &adapter->mcc_obj.cq;
5fb379ee 1543 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
efd2e40a 1544 sizeof(struct be_mcc_compl)))
5fb379ee
SP
1545 goto err;
1546
1547 /* Ask BE to create MCC compl queue; share TX's eq */
8788fdc2 1548 if (be_cmd_cq_create(adapter, cq, &adapter->tx_eq.q, false, true, 0))
5fb379ee
SP
1549 goto mcc_cq_free;
1550
1551 /* Alloc MCC queue */
8788fdc2 1552 q = &adapter->mcc_obj.q;
5fb379ee
SP
1553 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
1554 goto mcc_cq_destroy;
1555
1556 /* Ask BE to create MCC queue */
8788fdc2 1557 if (be_cmd_mccq_create(adapter, q, cq))
5fb379ee
SP
1558 goto mcc_q_free;
1559
1560 return 0;
1561
1562mcc_q_free:
1563 be_queue_free(adapter, q);
1564mcc_cq_destroy:
8788fdc2 1565 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
5fb379ee
SP
1566mcc_cq_free:
1567 be_queue_free(adapter, cq);
1568err:
1569 return -1;
1570}
1571
6b7c5b94
SP
1572static void be_tx_queues_destroy(struct be_adapter *adapter)
1573{
1574 struct be_queue_info *q;
3c8def97
SP
1575 struct be_tx_obj *txo;
1576 u8 i;
6b7c5b94 1577
3c8def97
SP
1578 for_all_tx_queues(adapter, txo, i) {
1579 q = &txo->q;
1580 if (q->created)
1581 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
1582 be_queue_free(adapter, q);
6b7c5b94 1583
3c8def97
SP
1584 q = &txo->cq;
1585 if (q->created)
1586 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1587 be_queue_free(adapter, q);
1588 }
6b7c5b94 1589
859b1e4e
SP
1590 /* Clear any residual events */
1591 be_eq_clean(adapter, &adapter->tx_eq);
1592
6b7c5b94
SP
1593 q = &adapter->tx_eq.q;
1594 if (q->created)
8788fdc2 1595 be_cmd_q_destroy(adapter, q, QTYPE_EQ);
6b7c5b94
SP
1596 be_queue_free(adapter, q);
1597}
1598
3c8def97 1599/* One TX event queue is shared by all TX compl qs */
6b7c5b94
SP
1600static int be_tx_queues_create(struct be_adapter *adapter)
1601{
1602 struct be_queue_info *eq, *q, *cq;
3c8def97
SP
1603 struct be_tx_obj *txo;
1604 u8 i;
6b7c5b94
SP
1605
1606 adapter->tx_eq.max_eqd = 0;
1607 adapter->tx_eq.min_eqd = 0;
1608 adapter->tx_eq.cur_eqd = 96;
1609 adapter->tx_eq.enable_aic = false;
3c8def97 1610
6b7c5b94 1611 eq = &adapter->tx_eq.q;
3c8def97
SP
1612 if (be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1613 sizeof(struct be_eq_entry)))
6b7c5b94
SP
1614 return -1;
1615
8788fdc2 1616 if (be_cmd_eq_create(adapter, eq, adapter->tx_eq.cur_eqd))
3c8def97 1617 goto err;
ecd62107 1618 adapter->tx_eq.eq_idx = adapter->eq_next_idx++;
fe6d2a38 1619
3c8def97
SP
1620 for_all_tx_queues(adapter, txo, i) {
1621 cq = &txo->cq;
1622 if (be_queue_alloc(adapter, cq, TX_CQ_LEN,
6b7c5b94 1623 sizeof(struct be_eth_tx_compl)))
3c8def97 1624 goto err;
6b7c5b94 1625
3c8def97
SP
1626 if (be_cmd_cq_create(adapter, cq, eq, false, false, 3))
1627 goto err;
6b7c5b94 1628
3c8def97
SP
1629 q = &txo->q;
1630 if (be_queue_alloc(adapter, q, TX_Q_LEN,
1631 sizeof(struct be_eth_wrb)))
1632 goto err;
6b7c5b94 1633
3c8def97
SP
1634 if (be_cmd_txq_create(adapter, q, cq))
1635 goto err;
1636 }
6b7c5b94
SP
1637 return 0;
1638
3c8def97
SP
1639err:
1640 be_tx_queues_destroy(adapter);
6b7c5b94
SP
1641 return -1;
1642}
1643
1644static void be_rx_queues_destroy(struct be_adapter *adapter)
1645{
1646 struct be_queue_info *q;
3abcdeda
SP
1647 struct be_rx_obj *rxo;
1648 int i;
1649
1650 for_all_rx_queues(adapter, rxo, i) {
482c9e79 1651 be_queue_free(adapter, &rxo->q);
3abcdeda
SP
1652
1653 q = &rxo->cq;
1654 if (q->created)
1655 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1656 be_queue_free(adapter, q);
1657
3abcdeda 1658 q = &rxo->rx_eq.q;
482c9e79 1659 if (q->created)
3abcdeda 1660 be_cmd_q_destroy(adapter, q, QTYPE_EQ);
3abcdeda 1661 be_queue_free(adapter, q);
6b7c5b94 1662 }
6b7c5b94
SP
1663}
1664
ac6a0c4a
SP
1665static u32 be_num_rxqs_want(struct be_adapter *adapter)
1666{
c814fd36 1667 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
ac6a0c4a
SP
1668 !adapter->sriov_enabled && !(adapter->function_mode & 0x400)) {
1669 return 1 + MAX_RSS_QS; /* one default non-RSS queue */
1670 } else {
1671 dev_warn(&adapter->pdev->dev,
1672 "No support for multiple RX queues\n");
1673 return 1;
1674 }
1675}
1676
6b7c5b94
SP
1677static int be_rx_queues_create(struct be_adapter *adapter)
1678{
1679 struct be_queue_info *eq, *q, *cq;
3abcdeda
SP
1680 struct be_rx_obj *rxo;
1681 int rc, i;
6b7c5b94 1682
ac6a0c4a
SP
1683 adapter->num_rx_qs = min(be_num_rxqs_want(adapter),
1684 msix_enabled(adapter) ?
1685 adapter->num_msix_vec - 1 : 1);
1686 if (adapter->num_rx_qs != MAX_RX_QS)
1687 dev_warn(&adapter->pdev->dev,
1688 "Can create only %d RX queues", adapter->num_rx_qs);
1689
6b7c5b94 1690 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
3abcdeda
SP
1691 for_all_rx_queues(adapter, rxo, i) {
1692 rxo->adapter = adapter;
1693 rxo->rx_eq.max_eqd = BE_MAX_EQD;
1694 rxo->rx_eq.enable_aic = true;
1695
1696 /* EQ */
1697 eq = &rxo->rx_eq.q;
1698 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1699 sizeof(struct be_eq_entry));
1700 if (rc)
1701 goto err;
1702
1703 rc = be_cmd_eq_create(adapter, eq, rxo->rx_eq.cur_eqd);
1704 if (rc)
1705 goto err;
1706
ecd62107 1707 rxo->rx_eq.eq_idx = adapter->eq_next_idx++;
fe6d2a38 1708
3abcdeda
SP
1709 /* CQ */
1710 cq = &rxo->cq;
1711 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
1712 sizeof(struct be_eth_rx_compl));
1713 if (rc)
1714 goto err;
1715
1716 rc = be_cmd_cq_create(adapter, cq, eq, false, false, 3);
1717 if (rc)
1718 goto err;
482c9e79
SP
1719
1720 /* Rx Q - will be created in be_open() */
3abcdeda
SP
1721 q = &rxo->q;
1722 rc = be_queue_alloc(adapter, q, RX_Q_LEN,
1723 sizeof(struct be_eth_rx_d));
1724 if (rc)
1725 goto err;
1726
3abcdeda 1727 }
6b7c5b94
SP
1728
1729 return 0;
3abcdeda
SP
1730err:
1731 be_rx_queues_destroy(adapter);
1732 return -1;
6b7c5b94 1733}
6b7c5b94 1734
fe6d2a38 1735static bool event_peek(struct be_eq_obj *eq_obj)
b628bde2 1736{
fe6d2a38
SP
1737 struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
1738 if (!eqe->evt)
1739 return false;
1740 else
1741 return true;
b628bde2
SP
1742}
1743
6b7c5b94
SP
1744static irqreturn_t be_intx(int irq, void *dev)
1745{
1746 struct be_adapter *adapter = dev;
3abcdeda 1747 struct be_rx_obj *rxo;
fe6d2a38 1748 int isr, i, tx = 0 , rx = 0;
6b7c5b94 1749
fe6d2a38
SP
1750 if (lancer_chip(adapter)) {
1751 if (event_peek(&adapter->tx_eq))
3c8def97 1752 tx = event_handle(adapter, &adapter->tx_eq, false);
fe6d2a38
SP
1753 for_all_rx_queues(adapter, rxo, i) {
1754 if (event_peek(&rxo->rx_eq))
3c8def97 1755 rx |= event_handle(adapter, &rxo->rx_eq, true);
fe6d2a38 1756 }
6b7c5b94 1757
fe6d2a38
SP
1758 if (!(tx || rx))
1759 return IRQ_NONE;
3abcdeda 1760
fe6d2a38
SP
1761 } else {
1762 isr = ioread32(adapter->csr + CEV_ISR0_OFFSET +
1763 (adapter->tx_eq.q.id / 8) * CEV_ISR_SIZE);
1764 if (!isr)
1765 return IRQ_NONE;
1766
ecd62107 1767 if ((1 << adapter->tx_eq.eq_idx & isr))
3c8def97 1768 event_handle(adapter, &adapter->tx_eq, false);
fe6d2a38
SP
1769
1770 for_all_rx_queues(adapter, rxo, i) {
ecd62107 1771 if ((1 << rxo->rx_eq.eq_idx & isr))
3c8def97 1772 event_handle(adapter, &rxo->rx_eq, true);
fe6d2a38 1773 }
3abcdeda 1774 }
c001c213 1775
8788fdc2 1776 return IRQ_HANDLED;
6b7c5b94
SP
1777}
1778
1779static irqreturn_t be_msix_rx(int irq, void *dev)
1780{
3abcdeda
SP
1781 struct be_rx_obj *rxo = dev;
1782 struct be_adapter *adapter = rxo->adapter;
6b7c5b94 1783
3c8def97 1784 event_handle(adapter, &rxo->rx_eq, true);
6b7c5b94
SP
1785
1786 return IRQ_HANDLED;
1787}
1788
5fb379ee 1789static irqreturn_t be_msix_tx_mcc(int irq, void *dev)
6b7c5b94
SP
1790{
1791 struct be_adapter *adapter = dev;
1792
3c8def97 1793 event_handle(adapter, &adapter->tx_eq, false);
6b7c5b94
SP
1794
1795 return IRQ_HANDLED;
1796}
1797
2e588f84 1798static inline bool do_gro(struct be_rx_compl_info *rxcp)
6b7c5b94 1799{
2e588f84 1800 return (rxcp->tcpf && !rxcp->err) ? true : false;
6b7c5b94
SP
1801}
1802
49b05221 1803static int be_poll_rx(struct napi_struct *napi, int budget)
6b7c5b94
SP
1804{
1805 struct be_eq_obj *rx_eq = container_of(napi, struct be_eq_obj, napi);
3abcdeda
SP
1806 struct be_rx_obj *rxo = container_of(rx_eq, struct be_rx_obj, rx_eq);
1807 struct be_adapter *adapter = rxo->adapter;
1808 struct be_queue_info *rx_cq = &rxo->cq;
2e588f84 1809 struct be_rx_compl_info *rxcp;
6b7c5b94
SP
1810 u32 work_done;
1811
ac124ff9 1812 rx_stats(rxo)->rx_polls++;
6b7c5b94 1813 for (work_done = 0; work_done < budget; work_done++) {
3abcdeda 1814 rxcp = be_rx_compl_get(rxo);
6b7c5b94
SP
1815 if (!rxcp)
1816 break;
1817
e80d9da6 1818 /* Ignore flush completions */
009dd872 1819 if (rxcp->num_rcvd && rxcp->pkt_size) {
2e588f84 1820 if (do_gro(rxcp))
64642811
SP
1821 be_rx_compl_process_gro(adapter, rxo, rxcp);
1822 else
1823 be_rx_compl_process(adapter, rxo, rxcp);
009dd872
PR
1824 } else if (rxcp->pkt_size == 0) {
1825 be_rx_compl_discard(adapter, rxo, rxcp);
64642811 1826 }
009dd872 1827
2e588f84 1828 be_rx_stats_update(rxo, rxcp);
6b7c5b94
SP
1829 }
1830
6b7c5b94 1831 /* Refill the queue */
3abcdeda 1832 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
1829b086 1833 be_post_rx_frags(rxo, GFP_ATOMIC);
6b7c5b94
SP
1834
1835 /* All consumed */
1836 if (work_done < budget) {
1837 napi_complete(napi);
8788fdc2 1838 be_cq_notify(adapter, rx_cq->id, true, work_done);
6b7c5b94
SP
1839 } else {
1840 /* More to be consumed; continue with interrupts disabled */
8788fdc2 1841 be_cq_notify(adapter, rx_cq->id, false, work_done);
6b7c5b94
SP
1842 }
1843 return work_done;
1844}
1845
f31e50a8
SP
1846/* As TX and MCC share the same EQ check for both TX and MCC completions.
1847 * For TX/MCC we don't honour budget; consume everything
1848 */
1849static int be_poll_tx_mcc(struct napi_struct *napi, int budget)
6b7c5b94 1850{
f31e50a8
SP
1851 struct be_eq_obj *tx_eq = container_of(napi, struct be_eq_obj, napi);
1852 struct be_adapter *adapter =
1853 container_of(tx_eq, struct be_adapter, tx_eq);
3c8def97 1854 struct be_tx_obj *txo;
6b7c5b94 1855 struct be_eth_tx_compl *txcp;
3c8def97
SP
1856 int tx_compl, mcc_compl, status = 0;
1857 u8 i;
1858 u16 num_wrbs;
1859
1860 for_all_tx_queues(adapter, txo, i) {
1861 tx_compl = 0;
1862 num_wrbs = 0;
1863 while ((txcp = be_tx_compl_get(&txo->cq))) {
1864 num_wrbs += be_tx_compl_process(adapter, txo,
1865 AMAP_GET_BITS(struct amap_eth_tx_compl,
1866 wrb_index, txcp));
1867 tx_compl++;
1868 }
1869 if (tx_compl) {
1870 be_cq_notify(adapter, txo->cq.id, true, tx_compl);
1871
1872 atomic_sub(num_wrbs, &txo->q.used);
6b7c5b94 1873
3c8def97
SP
1874 /* As Tx wrbs have been freed up, wake up netdev queue
1875 * if it was stopped due to lack of tx wrbs. */
1876 if (__netif_subqueue_stopped(adapter->netdev, i) &&
1877 atomic_read(&txo->q.used) < txo->q.len / 2) {
1878 netif_wake_subqueue(adapter->netdev, i);
1879 }
1880
ac124ff9
SP
1881 adapter->drv_stats.tx_events++;
1882 tx_stats(txo)->tx_compl += tx_compl;
3c8def97 1883 }
6b7c5b94
SP
1884 }
1885
f31e50a8
SP
1886 mcc_compl = be_process_mcc(adapter, &status);
1887
f31e50a8
SP
1888 if (mcc_compl) {
1889 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
1890 be_cq_notify(adapter, mcc_obj->cq.id, true, mcc_compl);
1891 }
1892
3c8def97 1893 napi_complete(napi);
6b7c5b94 1894
3c8def97 1895 be_eq_notify(adapter, tx_eq->q.id, true, false, 0);
6b7c5b94
SP
1896 return 1;
1897}
1898
d053de91 1899void be_detect_dump_ue(struct be_adapter *adapter)
7c185276
AK
1900{
1901 u32 ue_status_lo, ue_status_hi, ue_status_lo_mask, ue_status_hi_mask;
1902 u32 i;
1903
1904 pci_read_config_dword(adapter->pdev,
1905 PCICFG_UE_STATUS_LOW, &ue_status_lo);
1906 pci_read_config_dword(adapter->pdev,
1907 PCICFG_UE_STATUS_HIGH, &ue_status_hi);
1908 pci_read_config_dword(adapter->pdev,
1909 PCICFG_UE_STATUS_LOW_MASK, &ue_status_lo_mask);
1910 pci_read_config_dword(adapter->pdev,
1911 PCICFG_UE_STATUS_HI_MASK, &ue_status_hi_mask);
1912
1913 ue_status_lo = (ue_status_lo & (~ue_status_lo_mask));
1914 ue_status_hi = (ue_status_hi & (~ue_status_hi_mask));
1915
d053de91
AK
1916 if (ue_status_lo || ue_status_hi) {
1917 adapter->ue_detected = true;
7acc2087 1918 adapter->eeh_err = true;
d053de91
AK
1919 dev_err(&adapter->pdev->dev, "UE Detected!!\n");
1920 }
1921
7c185276
AK
1922 if (ue_status_lo) {
1923 for (i = 0; ue_status_lo; ue_status_lo >>= 1, i++) {
1924 if (ue_status_lo & 1)
1925 dev_err(&adapter->pdev->dev,
1926 "UE: %s bit set\n", ue_status_low_desc[i]);
1927 }
1928 }
1929 if (ue_status_hi) {
1930 for (i = 0; ue_status_hi; ue_status_hi >>= 1, i++) {
1931 if (ue_status_hi & 1)
1932 dev_err(&adapter->pdev->dev,
1933 "UE: %s bit set\n", ue_status_hi_desc[i]);
1934 }
1935 }
1936
1937}
1938
ea1dae11
SP
1939static void be_worker(struct work_struct *work)
1940{
1941 struct be_adapter *adapter =
1942 container_of(work, struct be_adapter, work.work);
3abcdeda
SP
1943 struct be_rx_obj *rxo;
1944 int i;
ea1dae11 1945
16da8250
SP
1946 if (!adapter->ue_detected && !lancer_chip(adapter))
1947 be_detect_dump_ue(adapter);
1948
f203af70
SK
1949 /* when interrupts are not yet enabled, just reap any pending
1950 * mcc completions */
1951 if (!netif_running(adapter->netdev)) {
1952 int mcc_compl, status = 0;
1953
1954 mcc_compl = be_process_mcc(adapter, &status);
1955
1956 if (mcc_compl) {
1957 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
1958 be_cq_notify(adapter, mcc_obj->cq.id, false, mcc_compl);
1959 }
9b037f38 1960
f203af70
SK
1961 goto reschedule;
1962 }
1963
005d5696
SX
1964 if (!adapter->stats_cmd_sent) {
1965 if (lancer_chip(adapter))
1966 lancer_cmd_get_pport_stats(adapter,
1967 &adapter->stats_cmd);
1968 else
1969 be_cmd_get_stats(adapter, &adapter->stats_cmd);
1970 }
3c8def97 1971
3abcdeda 1972 for_all_rx_queues(adapter, rxo, i) {
3abcdeda
SP
1973 be_rx_eqd_update(adapter, rxo);
1974
1975 if (rxo->rx_post_starved) {
1976 rxo->rx_post_starved = false;
1829b086 1977 be_post_rx_frags(rxo, GFP_KERNEL);
3abcdeda 1978 }
ea1dae11
SP
1979 }
1980
f203af70 1981reschedule:
e74fbd03 1982 adapter->work_counter++;
ea1dae11
SP
1983 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
1984}
1985
8d56ff11
SP
1986static void be_msix_disable(struct be_adapter *adapter)
1987{
ac6a0c4a 1988 if (msix_enabled(adapter)) {
8d56ff11 1989 pci_disable_msix(adapter->pdev);
ac6a0c4a 1990 adapter->num_msix_vec = 0;
3abcdeda
SP
1991 }
1992}
1993
6b7c5b94
SP
1994static void be_msix_enable(struct be_adapter *adapter)
1995{
3abcdeda 1996#define BE_MIN_MSIX_VECTORS (1 + 1) /* Rx + Tx */
ac6a0c4a 1997 int i, status, num_vec;
6b7c5b94 1998
ac6a0c4a 1999 num_vec = be_num_rxqs_want(adapter) + 1;
3abcdeda 2000
ac6a0c4a 2001 for (i = 0; i < num_vec; i++)
6b7c5b94
SP
2002 adapter->msix_entries[i].entry = i;
2003
ac6a0c4a 2004 status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
3abcdeda
SP
2005 if (status == 0) {
2006 goto done;
2007 } else if (status >= BE_MIN_MSIX_VECTORS) {
ac6a0c4a 2008 num_vec = status;
3abcdeda 2009 if (pci_enable_msix(adapter->pdev, adapter->msix_entries,
ac6a0c4a 2010 num_vec) == 0)
3abcdeda 2011 goto done;
3abcdeda
SP
2012 }
2013 return;
2014done:
ac6a0c4a
SP
2015 adapter->num_msix_vec = num_vec;
2016 return;
6b7c5b94
SP
2017}
2018
ba343c77
SB
2019static void be_sriov_enable(struct be_adapter *adapter)
2020{
344dbf10 2021 be_check_sriov_fn_type(adapter);
6dedec81 2022#ifdef CONFIG_PCI_IOV
ba343c77 2023 if (be_physfn(adapter) && num_vfs) {
81be8f0a
AK
2024 int status, pos;
2025 u16 nvfs;
2026
2027 pos = pci_find_ext_capability(adapter->pdev,
2028 PCI_EXT_CAP_ID_SRIOV);
2029 pci_read_config_word(adapter->pdev,
2030 pos + PCI_SRIOV_TOTAL_VF, &nvfs);
2031
2032 if (num_vfs > nvfs) {
2033 dev_info(&adapter->pdev->dev,
2034 "Device supports %d VFs and not %d\n",
2035 nvfs, num_vfs);
2036 num_vfs = nvfs;
2037 }
6dedec81 2038
ba343c77
SB
2039 status = pci_enable_sriov(adapter->pdev, num_vfs);
2040 adapter->sriov_enabled = status ? false : true;
2041 }
2042#endif
ba343c77
SB
2043}
2044
2045static void be_sriov_disable(struct be_adapter *adapter)
2046{
2047#ifdef CONFIG_PCI_IOV
2048 if (adapter->sriov_enabled) {
2049 pci_disable_sriov(adapter->pdev);
2050 adapter->sriov_enabled = false;
2051 }
2052#endif
2053}
2054
fe6d2a38
SP
2055static inline int be_msix_vec_get(struct be_adapter *adapter,
2056 struct be_eq_obj *eq_obj)
6b7c5b94 2057{
ecd62107 2058 return adapter->msix_entries[eq_obj->eq_idx].vector;
6b7c5b94
SP
2059}
2060
b628bde2
SP
2061static int be_request_irq(struct be_adapter *adapter,
2062 struct be_eq_obj *eq_obj,
3abcdeda 2063 void *handler, char *desc, void *context)
6b7c5b94
SP
2064{
2065 struct net_device *netdev = adapter->netdev;
b628bde2
SP
2066 int vec;
2067
2068 sprintf(eq_obj->desc, "%s-%s", netdev->name, desc);
fe6d2a38 2069 vec = be_msix_vec_get(adapter, eq_obj);
3abcdeda 2070 return request_irq(vec, handler, 0, eq_obj->desc, context);
b628bde2
SP
2071}
2072
3abcdeda
SP
2073static void be_free_irq(struct be_adapter *adapter, struct be_eq_obj *eq_obj,
2074 void *context)
b628bde2 2075{
fe6d2a38 2076 int vec = be_msix_vec_get(adapter, eq_obj);
3abcdeda 2077 free_irq(vec, context);
b628bde2 2078}
6b7c5b94 2079
b628bde2
SP
2080static int be_msix_register(struct be_adapter *adapter)
2081{
3abcdeda
SP
2082 struct be_rx_obj *rxo;
2083 int status, i;
2084 char qname[10];
b628bde2 2085
3abcdeda
SP
2086 status = be_request_irq(adapter, &adapter->tx_eq, be_msix_tx_mcc, "tx",
2087 adapter);
6b7c5b94
SP
2088 if (status)
2089 goto err;
2090
3abcdeda
SP
2091 for_all_rx_queues(adapter, rxo, i) {
2092 sprintf(qname, "rxq%d", i);
2093 status = be_request_irq(adapter, &rxo->rx_eq, be_msix_rx,
2094 qname, rxo);
2095 if (status)
2096 goto err_msix;
2097 }
b628bde2 2098
6b7c5b94 2099 return 0;
b628bde2 2100
3abcdeda
SP
2101err_msix:
2102 be_free_irq(adapter, &adapter->tx_eq, adapter);
2103
2104 for (i--, rxo = &adapter->rx_obj[i]; i >= 0; i--, rxo--)
2105 be_free_irq(adapter, &rxo->rx_eq, rxo);
2106
6b7c5b94
SP
2107err:
2108 dev_warn(&adapter->pdev->dev,
2109 "MSIX Request IRQ failed - err %d\n", status);
ac6a0c4a 2110 be_msix_disable(adapter);
6b7c5b94
SP
2111 return status;
2112}
2113
2114static int be_irq_register(struct be_adapter *adapter)
2115{
2116 struct net_device *netdev = adapter->netdev;
2117 int status;
2118
ac6a0c4a 2119 if (msix_enabled(adapter)) {
6b7c5b94
SP
2120 status = be_msix_register(adapter);
2121 if (status == 0)
2122 goto done;
ba343c77
SB
2123 /* INTx is not supported for VF */
2124 if (!be_physfn(adapter))
2125 return status;
6b7c5b94
SP
2126 }
2127
2128 /* INTx */
2129 netdev->irq = adapter->pdev->irq;
2130 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
2131 adapter);
2132 if (status) {
2133 dev_err(&adapter->pdev->dev,
2134 "INTx request IRQ failed - err %d\n", status);
2135 return status;
2136 }
2137done:
2138 adapter->isr_registered = true;
2139 return 0;
2140}
2141
2142static void be_irq_unregister(struct be_adapter *adapter)
2143{
2144 struct net_device *netdev = adapter->netdev;
3abcdeda
SP
2145 struct be_rx_obj *rxo;
2146 int i;
6b7c5b94
SP
2147
2148 if (!adapter->isr_registered)
2149 return;
2150
2151 /* INTx */
ac6a0c4a 2152 if (!msix_enabled(adapter)) {
6b7c5b94
SP
2153 free_irq(netdev->irq, adapter);
2154 goto done;
2155 }
2156
2157 /* MSIx */
3abcdeda
SP
2158 be_free_irq(adapter, &adapter->tx_eq, adapter);
2159
2160 for_all_rx_queues(adapter, rxo, i)
2161 be_free_irq(adapter, &rxo->rx_eq, rxo);
2162
6b7c5b94
SP
2163done:
2164 adapter->isr_registered = false;
6b7c5b94
SP
2165}
2166
482c9e79
SP
2167static void be_rx_queues_clear(struct be_adapter *adapter)
2168{
2169 struct be_queue_info *q;
2170 struct be_rx_obj *rxo;
2171 int i;
2172
2173 for_all_rx_queues(adapter, rxo, i) {
2174 q = &rxo->q;
2175 if (q->created) {
2176 be_cmd_rxq_destroy(adapter, q);
2177 /* After the rxq is invalidated, wait for a grace time
2178 * of 1ms for all dma to end and the flush compl to
2179 * arrive
2180 */
2181 mdelay(1);
2182 be_rx_q_clean(adapter, rxo);
2183 }
2184
2185 /* Clear any residual events */
2186 q = &rxo->rx_eq.q;
2187 if (q->created)
2188 be_eq_clean(adapter, &rxo->rx_eq);
2189 }
2190}
2191
889cd4b2
SP
2192static int be_close(struct net_device *netdev)
2193{
2194 struct be_adapter *adapter = netdev_priv(netdev);
3abcdeda 2195 struct be_rx_obj *rxo;
3c8def97 2196 struct be_tx_obj *txo;
889cd4b2 2197 struct be_eq_obj *tx_eq = &adapter->tx_eq;
3abcdeda 2198 int vec, i;
889cd4b2 2199
889cd4b2
SP
2200 be_async_mcc_disable(adapter);
2201
889cd4b2
SP
2202 netif_carrier_off(netdev);
2203 adapter->link_up = false;
2204
fe6d2a38
SP
2205 if (!lancer_chip(adapter))
2206 be_intr_set(adapter, false);
889cd4b2 2207
63fcb27f
PR
2208 for_all_rx_queues(adapter, rxo, i)
2209 napi_disable(&rxo->rx_eq.napi);
2210
2211 napi_disable(&tx_eq->napi);
2212
2213 if (lancer_chip(adapter)) {
63fcb27f
PR
2214 be_cq_notify(adapter, adapter->mcc_obj.cq.id, false, 0);
2215 for_all_rx_queues(adapter, rxo, i)
2216 be_cq_notify(adapter, rxo->cq.id, false, 0);
3c8def97
SP
2217 for_all_tx_queues(adapter, txo, i)
2218 be_cq_notify(adapter, txo->cq.id, false, 0);
63fcb27f
PR
2219 }
2220
ac6a0c4a 2221 if (msix_enabled(adapter)) {
fe6d2a38 2222 vec = be_msix_vec_get(adapter, tx_eq);
889cd4b2 2223 synchronize_irq(vec);
3abcdeda
SP
2224
2225 for_all_rx_queues(adapter, rxo, i) {
fe6d2a38 2226 vec = be_msix_vec_get(adapter, &rxo->rx_eq);
3abcdeda
SP
2227 synchronize_irq(vec);
2228 }
889cd4b2
SP
2229 } else {
2230 synchronize_irq(netdev->irq);
2231 }
2232 be_irq_unregister(adapter);
2233
889cd4b2
SP
2234 /* Wait for all pending tx completions to arrive so that
2235 * all tx skbs are freed.
2236 */
3c8def97
SP
2237 for_all_tx_queues(adapter, txo, i)
2238 be_tx_compl_clean(adapter, txo);
889cd4b2 2239
482c9e79
SP
2240 be_rx_queues_clear(adapter);
2241 return 0;
2242}
2243
2244static int be_rx_queues_setup(struct be_adapter *adapter)
2245{
2246 struct be_rx_obj *rxo;
2247 int rc, i;
2248 u8 rsstable[MAX_RSS_QS];
2249
2250 for_all_rx_queues(adapter, rxo, i) {
2251 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
2252 rx_frag_size, BE_MAX_JUMBO_FRAME_SIZE,
2253 adapter->if_handle,
2254 (i > 0) ? 1 : 0/* rss enable */, &rxo->rss_id);
2255 if (rc)
2256 return rc;
2257 }
2258
2259 if (be_multi_rxq(adapter)) {
2260 for_all_rss_queues(adapter, rxo, i)
2261 rsstable[i] = rxo->rss_id;
2262
2263 rc = be_cmd_rss_config(adapter, rsstable,
2264 adapter->num_rx_qs - 1);
2265 if (rc)
2266 return rc;
2267 }
2268
2269 /* First time posting */
2270 for_all_rx_queues(adapter, rxo, i) {
2271 be_post_rx_frags(rxo, GFP_KERNEL);
2272 napi_enable(&rxo->rx_eq.napi);
2273 }
889cd4b2
SP
2274 return 0;
2275}
2276
6b7c5b94
SP
2277static int be_open(struct net_device *netdev)
2278{
2279 struct be_adapter *adapter = netdev_priv(netdev);
6b7c5b94 2280 struct be_eq_obj *tx_eq = &adapter->tx_eq;
3abcdeda 2281 struct be_rx_obj *rxo;
a8f447bd 2282 bool link_up;
3abcdeda 2283 int status, i;
0388f251
SB
2284 u8 mac_speed;
2285 u16 link_speed;
5fb379ee 2286
482c9e79
SP
2287 status = be_rx_queues_setup(adapter);
2288 if (status)
2289 goto err;
2290
5fb379ee
SP
2291 napi_enable(&tx_eq->napi);
2292
2293 be_irq_register(adapter);
2294
fe6d2a38
SP
2295 if (!lancer_chip(adapter))
2296 be_intr_set(adapter, true);
5fb379ee
SP
2297
2298 /* The evt queues are created in unarmed state; arm them */
3abcdeda
SP
2299 for_all_rx_queues(adapter, rxo, i) {
2300 be_eq_notify(adapter, rxo->rx_eq.q.id, true, false, 0);
2301 be_cq_notify(adapter, rxo->cq.id, true, 0);
2302 }
8788fdc2 2303 be_eq_notify(adapter, tx_eq->q.id, true, false, 0);
5fb379ee 2304
7a1e9b20
SP
2305 /* Now that interrupts are on we can process async mcc */
2306 be_async_mcc_enable(adapter);
2307
0388f251 2308 status = be_cmd_link_status_query(adapter, &link_up, &mac_speed,
187e8756 2309 &link_speed, 0);
a8f447bd 2310 if (status)
889cd4b2 2311 goto err;
a8f447bd 2312 be_link_status_update(adapter, link_up);
5fb379ee 2313
889cd4b2 2314 if (be_physfn(adapter)) {
1da87b7f 2315 status = be_vid_config(adapter, false, 0);
889cd4b2
SP
2316 if (status)
2317 goto err;
4f2aa89c 2318
ba343c77
SB
2319 status = be_cmd_set_flow_control(adapter,
2320 adapter->tx_fc, adapter->rx_fc);
2321 if (status)
889cd4b2 2322 goto err;
ba343c77 2323 }
4f2aa89c 2324
889cd4b2
SP
2325 return 0;
2326err:
2327 be_close(adapter->netdev);
2328 return -EIO;
5fb379ee
SP
2329}
2330
71d8d1b5
AK
2331static int be_setup_wol(struct be_adapter *adapter, bool enable)
2332{
2333 struct be_dma_mem cmd;
2334 int status = 0;
2335 u8 mac[ETH_ALEN];
2336
2337 memset(mac, 0, ETH_ALEN);
2338
2339 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
2b7bcebf
IV
2340 cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2341 GFP_KERNEL);
71d8d1b5
AK
2342 if (cmd.va == NULL)
2343 return -1;
2344 memset(cmd.va, 0, cmd.size);
2345
2346 if (enable) {
2347 status = pci_write_config_dword(adapter->pdev,
2348 PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2349 if (status) {
2350 dev_err(&adapter->pdev->dev,
2381a55c 2351 "Could not enable Wake-on-lan\n");
2b7bcebf
IV
2352 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2353 cmd.dma);
71d8d1b5
AK
2354 return status;
2355 }
2356 status = be_cmd_enable_magic_wol(adapter,
2357 adapter->netdev->dev_addr, &cmd);
2358 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2359 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2360 } else {
2361 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2362 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2363 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2364 }
2365
2b7bcebf 2366 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
71d8d1b5
AK
2367 return status;
2368}
2369
6d87f5c3
AK
2370/*
2371 * Generate a seed MAC address from the PF MAC Address using jhash.
2372 * MAC Address for VFs are assigned incrementally starting from the seed.
2373 * These addresses are programmed in the ASIC by the PF and the VF driver
2374 * queries for the MAC address during its probe.
2375 */
2376static inline int be_vf_eth_addr_config(struct be_adapter *adapter)
2377{
2378 u32 vf = 0;
3abcdeda 2379 int status = 0;
6d87f5c3
AK
2380 u8 mac[ETH_ALEN];
2381
2382 be_vf_eth_addr_generate(adapter, mac);
2383
2384 for (vf = 0; vf < num_vfs; vf++) {
2385 status = be_cmd_pmac_add(adapter, mac,
2386 adapter->vf_cfg[vf].vf_if_handle,
f8617e08
AK
2387 &adapter->vf_cfg[vf].vf_pmac_id,
2388 vf + 1);
6d87f5c3
AK
2389 if (status)
2390 dev_err(&adapter->pdev->dev,
2391 "Mac address add failed for VF %d\n", vf);
2392 else
2393 memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
2394
2395 mac[5] += 1;
2396 }
2397 return status;
2398}
2399
2400static inline void be_vf_eth_addr_rem(struct be_adapter *adapter)
2401{
2402 u32 vf;
2403
2404 for (vf = 0; vf < num_vfs; vf++) {
2405 if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID)
2406 be_cmd_pmac_del(adapter,
2407 adapter->vf_cfg[vf].vf_if_handle,
f8617e08 2408 adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
6d87f5c3
AK
2409 }
2410}
2411
5fb379ee
SP
2412static int be_setup(struct be_adapter *adapter)
2413{
5fb379ee 2414 struct net_device *netdev = adapter->netdev;
ba343c77 2415 u32 cap_flags, en_flags, vf = 0;
6b7c5b94 2416 int status;
ba343c77
SB
2417 u8 mac[ETH_ALEN];
2418
2dc1deb6
SP
2419 be_cmd_req_native_mode(adapter);
2420
f21b538c
PR
2421 cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED |
2422 BE_IF_FLAGS_BROADCAST |
2423 BE_IF_FLAGS_MULTICAST;
6b7c5b94 2424
ba343c77
SB
2425 if (be_physfn(adapter)) {
2426 cap_flags |= BE_IF_FLAGS_MCAST_PROMISCUOUS |
2427 BE_IF_FLAGS_PROMISCUOUS |
2428 BE_IF_FLAGS_PASS_L3L4_ERRORS;
2429 en_flags |= BE_IF_FLAGS_PASS_L3L4_ERRORS;
3abcdeda 2430
ac6a0c4a 2431 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS) {
3abcdeda
SP
2432 cap_flags |= BE_IF_FLAGS_RSS;
2433 en_flags |= BE_IF_FLAGS_RSS;
2434 }
ba343c77 2435 }
73d540f2
SP
2436
2437 status = be_cmd_if_create(adapter, cap_flags, en_flags,
2438 netdev->dev_addr, false/* pmac_invalid */,
ba343c77 2439 &adapter->if_handle, &adapter->pmac_id, 0);
6b7c5b94
SP
2440 if (status != 0)
2441 goto do_none;
2442
ba343c77 2443 if (be_physfn(adapter)) {
c99ac3e7
AK
2444 if (adapter->sriov_enabled) {
2445 while (vf < num_vfs) {
2446 cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED |
2447 BE_IF_FLAGS_BROADCAST;
2448 status = be_cmd_if_create(adapter, cap_flags,
2449 en_flags, mac, true,
64600ea5 2450 &adapter->vf_cfg[vf].vf_if_handle,
ba343c77 2451 NULL, vf+1);
c99ac3e7
AK
2452 if (status) {
2453 dev_err(&adapter->pdev->dev,
2454 "Interface Create failed for VF %d\n",
2455 vf);
2456 goto if_destroy;
2457 }
2458 adapter->vf_cfg[vf].vf_pmac_id =
2459 BE_INVALID_PMAC_ID;
2460 vf++;
ba343c77 2461 }
84e5b9f7 2462 }
c99ac3e7 2463 } else {
ba343c77
SB
2464 status = be_cmd_mac_addr_query(adapter, mac,
2465 MAC_ADDRESS_TYPE_NETWORK, false, adapter->if_handle);
2466 if (!status) {
2467 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2468 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2469 }
2470 }
2471
6b7c5b94
SP
2472 status = be_tx_queues_create(adapter);
2473 if (status != 0)
2474 goto if_destroy;
2475
2476 status = be_rx_queues_create(adapter);
2477 if (status != 0)
2478 goto tx_qs_destroy;
2479
2903dd65
SP
2480 /* Allow all priorities by default. A GRP5 evt may modify this */
2481 adapter->vlan_prio_bmap = 0xff;
2482
5fb379ee
SP
2483 status = be_mcc_queues_create(adapter);
2484 if (status != 0)
2485 goto rx_qs_destroy;
6b7c5b94 2486
0dffc83e
AK
2487 adapter->link_speed = -1;
2488
6b7c5b94
SP
2489 return 0;
2490
5fb379ee
SP
2491rx_qs_destroy:
2492 be_rx_queues_destroy(adapter);
6b7c5b94
SP
2493tx_qs_destroy:
2494 be_tx_queues_destroy(adapter);
2495if_destroy:
c99ac3e7
AK
2496 if (be_physfn(adapter) && adapter->sriov_enabled)
2497 for (vf = 0; vf < num_vfs; vf++)
2498 if (adapter->vf_cfg[vf].vf_if_handle)
2499 be_cmd_if_destroy(adapter,
658681f7
AK
2500 adapter->vf_cfg[vf].vf_if_handle,
2501 vf + 1);
2502 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
6b7c5b94
SP
2503do_none:
2504 return status;
2505}
2506
5fb379ee
SP
2507static int be_clear(struct be_adapter *adapter)
2508{
7ab8b0b4
AK
2509 int vf;
2510
c99ac3e7 2511 if (be_physfn(adapter) && adapter->sriov_enabled)
6d87f5c3
AK
2512 be_vf_eth_addr_rem(adapter);
2513
1a8887d8 2514 be_mcc_queues_destroy(adapter);
5fb379ee
SP
2515 be_rx_queues_destroy(adapter);
2516 be_tx_queues_destroy(adapter);
1f5db833 2517 adapter->eq_next_idx = 0;
5fb379ee 2518
7ab8b0b4
AK
2519 if (be_physfn(adapter) && adapter->sriov_enabled)
2520 for (vf = 0; vf < num_vfs; vf++)
2521 if (adapter->vf_cfg[vf].vf_if_handle)
2522 be_cmd_if_destroy(adapter,
2523 adapter->vf_cfg[vf].vf_if_handle,
2524 vf + 1);
2525
658681f7 2526 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
5fb379ee 2527
2dc1deb6
SP
2528 adapter->be3_native = 0;
2529
2243e2e9
SP
2530 /* tell fw we're done with firing cmds */
2531 be_cmd_fw_clean(adapter);
5fb379ee
SP
2532 return 0;
2533}
2534
6b7c5b94 2535
84517482 2536#define FW_FILE_HDR_SIGN "ServerEngines Corp. "
fa9a6fed 2537static bool be_flash_redboot(struct be_adapter *adapter,
3f0d4560
AK
2538 const u8 *p, u32 img_start, int image_size,
2539 int hdr_size)
fa9a6fed
SB
2540{
2541 u32 crc_offset;
2542 u8 flashed_crc[4];
2543 int status;
3f0d4560
AK
2544
2545 crc_offset = hdr_size + img_start + image_size - 4;
2546
fa9a6fed 2547 p += crc_offset;
3f0d4560
AK
2548
2549 status = be_cmd_get_flash_crc(adapter, flashed_crc,
f510fc64 2550 (image_size - 4));
fa9a6fed
SB
2551 if (status) {
2552 dev_err(&adapter->pdev->dev,
2553 "could not get crc from flash, not flashing redboot\n");
2554 return false;
2555 }
2556
2557 /*update redboot only if crc does not match*/
2558 if (!memcmp(flashed_crc, p, 4))
2559 return false;
2560 else
2561 return true;
fa9a6fed
SB
2562}
2563
3f0d4560 2564static int be_flash_data(struct be_adapter *adapter,
84517482 2565 const struct firmware *fw,
3f0d4560
AK
2566 struct be_dma_mem *flash_cmd, int num_of_images)
2567
84517482 2568{
3f0d4560
AK
2569 int status = 0, i, filehdr_size = 0;
2570 u32 total_bytes = 0, flash_op;
84517482
AK
2571 int num_bytes;
2572 const u8 *p = fw->data;
2573 struct be_cmd_write_flashrom *req = flash_cmd->va;
215faf9c 2574 const struct flash_comp *pflashcomp;
9fe96934 2575 int num_comp;
3f0d4560 2576
215faf9c 2577 static const struct flash_comp gen3_flash_types[9] = {
3f0d4560
AK
2578 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, IMG_TYPE_ISCSI_ACTIVE,
2579 FLASH_IMAGE_MAX_SIZE_g3},
2580 { FLASH_REDBOOT_START_g3, IMG_TYPE_REDBOOT,
2581 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3},
2582 { FLASH_iSCSI_BIOS_START_g3, IMG_TYPE_BIOS,
2583 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2584 { FLASH_PXE_BIOS_START_g3, IMG_TYPE_PXE_BIOS,
2585 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2586 { FLASH_FCoE_BIOS_START_g3, IMG_TYPE_FCOE_BIOS,
2587 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2588 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, IMG_TYPE_ISCSI_BACKUP,
2589 FLASH_IMAGE_MAX_SIZE_g3},
2590 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, IMG_TYPE_FCOE_FW_ACTIVE,
2591 FLASH_IMAGE_MAX_SIZE_g3},
2592 { FLASH_FCoE_BACKUP_IMAGE_START_g3, IMG_TYPE_FCOE_FW_BACKUP,
9fe96934
SB
2593 FLASH_IMAGE_MAX_SIZE_g3},
2594 { FLASH_NCSI_START_g3, IMG_TYPE_NCSI_FW,
2595 FLASH_NCSI_IMAGE_MAX_SIZE_g3}
3f0d4560 2596 };
215faf9c 2597 static const struct flash_comp gen2_flash_types[8] = {
3f0d4560
AK
2598 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, IMG_TYPE_ISCSI_ACTIVE,
2599 FLASH_IMAGE_MAX_SIZE_g2},
2600 { FLASH_REDBOOT_START_g2, IMG_TYPE_REDBOOT,
2601 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2},
2602 { FLASH_iSCSI_BIOS_START_g2, IMG_TYPE_BIOS,
2603 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2604 { FLASH_PXE_BIOS_START_g2, IMG_TYPE_PXE_BIOS,
2605 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2606 { FLASH_FCoE_BIOS_START_g2, IMG_TYPE_FCOE_BIOS,
2607 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2608 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, IMG_TYPE_ISCSI_BACKUP,
2609 FLASH_IMAGE_MAX_SIZE_g2},
2610 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, IMG_TYPE_FCOE_FW_ACTIVE,
2611 FLASH_IMAGE_MAX_SIZE_g2},
2612 { FLASH_FCoE_BACKUP_IMAGE_START_g2, IMG_TYPE_FCOE_FW_BACKUP,
2613 FLASH_IMAGE_MAX_SIZE_g2}
2614 };
2615
2616 if (adapter->generation == BE_GEN3) {
2617 pflashcomp = gen3_flash_types;
2618 filehdr_size = sizeof(struct flash_file_hdr_g3);
215faf9c 2619 num_comp = ARRAY_SIZE(gen3_flash_types);
3f0d4560
AK
2620 } else {
2621 pflashcomp = gen2_flash_types;
2622 filehdr_size = sizeof(struct flash_file_hdr_g2);
215faf9c 2623 num_comp = ARRAY_SIZE(gen2_flash_types);
84517482 2624 }
9fe96934
SB
2625 for (i = 0; i < num_comp; i++) {
2626 if ((pflashcomp[i].optype == IMG_TYPE_NCSI_FW) &&
2627 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
2628 continue;
3f0d4560
AK
2629 if ((pflashcomp[i].optype == IMG_TYPE_REDBOOT) &&
2630 (!be_flash_redboot(adapter, fw->data,
fae21a4d
AK
2631 pflashcomp[i].offset, pflashcomp[i].size, filehdr_size +
2632 (num_of_images * sizeof(struct image_hdr)))))
3f0d4560
AK
2633 continue;
2634 p = fw->data;
2635 p += filehdr_size + pflashcomp[i].offset
2636 + (num_of_images * sizeof(struct image_hdr));
2637 if (p + pflashcomp[i].size > fw->data + fw->size)
84517482 2638 return -1;
3f0d4560
AK
2639 total_bytes = pflashcomp[i].size;
2640 while (total_bytes) {
2641 if (total_bytes > 32*1024)
2642 num_bytes = 32*1024;
2643 else
2644 num_bytes = total_bytes;
2645 total_bytes -= num_bytes;
2646
2647 if (!total_bytes)
2648 flash_op = FLASHROM_OPER_FLASH;
2649 else
2650 flash_op = FLASHROM_OPER_SAVE;
2651 memcpy(req->params.data_buf, p, num_bytes);
2652 p += num_bytes;
2653 status = be_cmd_write_flashrom(adapter, flash_cmd,
2654 pflashcomp[i].optype, flash_op, num_bytes);
2655 if (status) {
2656 dev_err(&adapter->pdev->dev,
2657 "cmd to write to flash rom failed.\n");
2658 return -1;
2659 }
84517482 2660 }
84517482 2661 }
84517482
AK
2662 return 0;
2663}
2664
3f0d4560
AK
2665static int get_ufigen_type(struct flash_file_hdr_g2 *fhdr)
2666{
2667 if (fhdr == NULL)
2668 return 0;
2669 if (fhdr->build[0] == '3')
2670 return BE_GEN3;
2671 else if (fhdr->build[0] == '2')
2672 return BE_GEN2;
2673 else
2674 return 0;
2675}
2676
485bf569
SN
2677static int lancer_fw_download(struct be_adapter *adapter,
2678 const struct firmware *fw)
84517482 2679{
485bf569
SN
2680#define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024)
2681#define LANCER_FW_DOWNLOAD_LOCATION "/prg"
84517482 2682 struct be_dma_mem flash_cmd;
485bf569
SN
2683 const u8 *data_ptr = NULL;
2684 u8 *dest_image_ptr = NULL;
2685 size_t image_size = 0;
2686 u32 chunk_size = 0;
2687 u32 data_written = 0;
2688 u32 offset = 0;
2689 int status = 0;
2690 u8 add_status = 0;
84517482 2691
485bf569 2692 if (!IS_ALIGNED(fw->size, sizeof(u32))) {
d9efd2af 2693 dev_err(&adapter->pdev->dev,
485bf569
SN
2694 "FW Image not properly aligned. "
2695 "Length must be 4 byte aligned.\n");
2696 status = -EINVAL;
2697 goto lancer_fw_exit;
d9efd2af
SB
2698 }
2699
485bf569
SN
2700 flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
2701 + LANCER_FW_DOWNLOAD_CHUNK;
2702 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
2703 &flash_cmd.dma, GFP_KERNEL);
2704 if (!flash_cmd.va) {
2705 status = -ENOMEM;
2706 dev_err(&adapter->pdev->dev,
2707 "Memory allocation failure while flashing\n");
2708 goto lancer_fw_exit;
2709 }
84517482 2710
485bf569
SN
2711 dest_image_ptr = flash_cmd.va +
2712 sizeof(struct lancer_cmd_req_write_object);
2713 image_size = fw->size;
2714 data_ptr = fw->data;
2715
2716 while (image_size) {
2717 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
2718
2719 /* Copy the image chunk content. */
2720 memcpy(dest_image_ptr, data_ptr, chunk_size);
2721
2722 status = lancer_cmd_write_object(adapter, &flash_cmd,
2723 chunk_size, offset, LANCER_FW_DOWNLOAD_LOCATION,
2724 &data_written, &add_status);
2725
2726 if (status)
2727 break;
2728
2729 offset += data_written;
2730 data_ptr += data_written;
2731 image_size -= data_written;
2732 }
2733
2734 if (!status) {
2735 /* Commit the FW written */
2736 status = lancer_cmd_write_object(adapter, &flash_cmd,
2737 0, offset, LANCER_FW_DOWNLOAD_LOCATION,
2738 &data_written, &add_status);
2739 }
2740
2741 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
2742 flash_cmd.dma);
2743 if (status) {
2744 dev_err(&adapter->pdev->dev,
2745 "Firmware load error. "
2746 "Status code: 0x%x Additional Status: 0x%x\n",
2747 status, add_status);
2748 goto lancer_fw_exit;
2749 }
2750
2751 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
2752lancer_fw_exit:
2753 return status;
2754}
2755
2756static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
2757{
2758 struct flash_file_hdr_g2 *fhdr;
2759 struct flash_file_hdr_g3 *fhdr3;
2760 struct image_hdr *img_hdr_ptr = NULL;
2761 struct be_dma_mem flash_cmd;
2762 const u8 *p;
2763 int status = 0, i = 0, num_imgs = 0;
84517482
AK
2764
2765 p = fw->data;
3f0d4560 2766 fhdr = (struct flash_file_hdr_g2 *) p;
84517482 2767
84517482 2768 flash_cmd.size = sizeof(struct be_cmd_write_flashrom) + 32*1024;
2b7bcebf
IV
2769 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
2770 &flash_cmd.dma, GFP_KERNEL);
84517482
AK
2771 if (!flash_cmd.va) {
2772 status = -ENOMEM;
2773 dev_err(&adapter->pdev->dev,
2774 "Memory allocation failure while flashing\n");
485bf569 2775 goto be_fw_exit;
84517482
AK
2776 }
2777
3f0d4560
AK
2778 if ((adapter->generation == BE_GEN3) &&
2779 (get_ufigen_type(fhdr) == BE_GEN3)) {
2780 fhdr3 = (struct flash_file_hdr_g3 *) fw->data;
8b93b710
AK
2781 num_imgs = le32_to_cpu(fhdr3->num_imgs);
2782 for (i = 0; i < num_imgs; i++) {
3f0d4560
AK
2783 img_hdr_ptr = (struct image_hdr *) (fw->data +
2784 (sizeof(struct flash_file_hdr_g3) +
8b93b710
AK
2785 i * sizeof(struct image_hdr)));
2786 if (le32_to_cpu(img_hdr_ptr->imageid) == 1)
2787 status = be_flash_data(adapter, fw, &flash_cmd,
2788 num_imgs);
3f0d4560
AK
2789 }
2790 } else if ((adapter->generation == BE_GEN2) &&
2791 (get_ufigen_type(fhdr) == BE_GEN2)) {
2792 status = be_flash_data(adapter, fw, &flash_cmd, 0);
2793 } else {
2794 dev_err(&adapter->pdev->dev,
2795 "UFI and Interface are not compatible for flashing\n");
2796 status = -1;
84517482
AK
2797 }
2798
2b7bcebf
IV
2799 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
2800 flash_cmd.dma);
84517482
AK
2801 if (status) {
2802 dev_err(&adapter->pdev->dev, "Firmware load error\n");
485bf569 2803 goto be_fw_exit;
84517482
AK
2804 }
2805
af901ca1 2806 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
84517482 2807
485bf569
SN
2808be_fw_exit:
2809 return status;
2810}
2811
2812int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
2813{
2814 const struct firmware *fw;
2815 int status;
2816
2817 if (!netif_running(adapter->netdev)) {
2818 dev_err(&adapter->pdev->dev,
2819 "Firmware load not allowed (interface is down)\n");
2820 return -1;
2821 }
2822
2823 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
2824 if (status)
2825 goto fw_exit;
2826
2827 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
2828
2829 if (lancer_chip(adapter))
2830 status = lancer_fw_download(adapter, fw);
2831 else
2832 status = be_fw_download(adapter, fw);
2833
84517482
AK
2834fw_exit:
2835 release_firmware(fw);
2836 return status;
2837}
2838
6b7c5b94
SP
2839static struct net_device_ops be_netdev_ops = {
2840 .ndo_open = be_open,
2841 .ndo_stop = be_close,
2842 .ndo_start_xmit = be_xmit,
6b7c5b94
SP
2843 .ndo_set_rx_mode = be_set_multicast_list,
2844 .ndo_set_mac_address = be_mac_addr_set,
2845 .ndo_change_mtu = be_change_mtu,
2846 .ndo_validate_addr = eth_validate_addr,
6b7c5b94
SP
2847 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
2848 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
64600ea5 2849 .ndo_set_vf_mac = be_set_vf_mac,
1da87b7f 2850 .ndo_set_vf_vlan = be_set_vf_vlan,
e1d18735 2851 .ndo_set_vf_tx_rate = be_set_vf_tx_rate,
64600ea5 2852 .ndo_get_vf_config = be_get_vf_config
6b7c5b94
SP
2853};
2854
2855static void be_netdev_init(struct net_device *netdev)
2856{
2857 struct be_adapter *adapter = netdev_priv(netdev);
3abcdeda
SP
2858 struct be_rx_obj *rxo;
2859 int i;
6b7c5b94 2860
6332c8d3 2861 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
8b8ddc68
MM
2862 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
2863 NETIF_F_HW_VLAN_TX;
2864 if (be_multi_rxq(adapter))
2865 netdev->hw_features |= NETIF_F_RXHASH;
6332c8d3
MM
2866
2867 netdev->features |= netdev->hw_features |
8b8ddc68 2868 NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
4b972914 2869
eb8a50d9 2870 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
79032644 2871 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
51c59870 2872
6b7c5b94
SP
2873 netdev->flags |= IFF_MULTICAST;
2874
9e90c961
AK
2875 /* Default settings for Rx and Tx flow control */
2876 adapter->rx_fc = true;
2877 adapter->tx_fc = true;
2878
c190e3c8
AK
2879 netif_set_gso_max_size(netdev, 65535);
2880
6b7c5b94
SP
2881 BE_SET_NETDEV_OPS(netdev, &be_netdev_ops);
2882
2883 SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
2884
3abcdeda
SP
2885 for_all_rx_queues(adapter, rxo, i)
2886 netif_napi_add(netdev, &rxo->rx_eq.napi, be_poll_rx,
2887 BE_NAPI_WEIGHT);
2888
5fb379ee 2889 netif_napi_add(netdev, &adapter->tx_eq.napi, be_poll_tx_mcc,
6b7c5b94 2890 BE_NAPI_WEIGHT);
6b7c5b94
SP
2891}
2892
2893static void be_unmap_pci_bars(struct be_adapter *adapter)
2894{
8788fdc2
SP
2895 if (adapter->csr)
2896 iounmap(adapter->csr);
2897 if (adapter->db)
2898 iounmap(adapter->db);
ba343c77 2899 if (adapter->pcicfg && be_physfn(adapter))
8788fdc2 2900 iounmap(adapter->pcicfg);
6b7c5b94
SP
2901}
2902
2903static int be_map_pci_bars(struct be_adapter *adapter)
2904{
2905 u8 __iomem *addr;
ba343c77 2906 int pcicfg_reg, db_reg;
6b7c5b94 2907
fe6d2a38
SP
2908 if (lancer_chip(adapter)) {
2909 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 0),
2910 pci_resource_len(adapter->pdev, 0));
2911 if (addr == NULL)
2912 return -ENOMEM;
2913 adapter->db = addr;
2914 return 0;
2915 }
2916
ba343c77
SB
2917 if (be_physfn(adapter)) {
2918 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 2),
2919 pci_resource_len(adapter->pdev, 2));
2920 if (addr == NULL)
2921 return -ENOMEM;
2922 adapter->csr = addr;
2923 }
6b7c5b94 2924
ba343c77 2925 if (adapter->generation == BE_GEN2) {
7b139c83 2926 pcicfg_reg = 1;
ba343c77
SB
2927 db_reg = 4;
2928 } else {
7b139c83 2929 pcicfg_reg = 0;
ba343c77
SB
2930 if (be_physfn(adapter))
2931 db_reg = 4;
2932 else
2933 db_reg = 0;
2934 }
2935 addr = ioremap_nocache(pci_resource_start(adapter->pdev, db_reg),
2936 pci_resource_len(adapter->pdev, db_reg));
6b7c5b94
SP
2937 if (addr == NULL)
2938 goto pci_map_err;
ba343c77
SB
2939 adapter->db = addr;
2940
2941 if (be_physfn(adapter)) {
2942 addr = ioremap_nocache(
2943 pci_resource_start(adapter->pdev, pcicfg_reg),
2944 pci_resource_len(adapter->pdev, pcicfg_reg));
2945 if (addr == NULL)
2946 goto pci_map_err;
2947 adapter->pcicfg = addr;
2948 } else
2949 adapter->pcicfg = adapter->db + SRIOV_VF_PCICFG_OFFSET;
6b7c5b94
SP
2950
2951 return 0;
2952pci_map_err:
2953 be_unmap_pci_bars(adapter);
2954 return -ENOMEM;
2955}
2956
2957
2958static void be_ctrl_cleanup(struct be_adapter *adapter)
2959{
8788fdc2 2960 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
6b7c5b94
SP
2961
2962 be_unmap_pci_bars(adapter);
2963
2964 if (mem->va)
2b7bcebf
IV
2965 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
2966 mem->dma);
e7b909a6
SP
2967
2968 mem = &adapter->mc_cmd_mem;
2969 if (mem->va)
2b7bcebf
IV
2970 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
2971 mem->dma);
6b7c5b94
SP
2972}
2973
6b7c5b94
SP
2974static int be_ctrl_init(struct be_adapter *adapter)
2975{
8788fdc2
SP
2976 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
2977 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
e7b909a6 2978 struct be_dma_mem *mc_cmd_mem = &adapter->mc_cmd_mem;
6b7c5b94 2979 int status;
6b7c5b94
SP
2980
2981 status = be_map_pci_bars(adapter);
2982 if (status)
e7b909a6 2983 goto done;
6b7c5b94
SP
2984
2985 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
2b7bcebf
IV
2986 mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
2987 mbox_mem_alloc->size,
2988 &mbox_mem_alloc->dma,
2989 GFP_KERNEL);
6b7c5b94 2990 if (!mbox_mem_alloc->va) {
e7b909a6
SP
2991 status = -ENOMEM;
2992 goto unmap_pci_bars;
6b7c5b94 2993 }
e7b909a6 2994
6b7c5b94
SP
2995 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
2996 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
2997 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
2998 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
e7b909a6
SP
2999
3000 mc_cmd_mem->size = sizeof(struct be_cmd_req_mcast_mac_config);
2b7bcebf
IV
3001 mc_cmd_mem->va = dma_alloc_coherent(&adapter->pdev->dev,
3002 mc_cmd_mem->size, &mc_cmd_mem->dma,
3003 GFP_KERNEL);
e7b909a6
SP
3004 if (mc_cmd_mem->va == NULL) {
3005 status = -ENOMEM;
3006 goto free_mbox;
3007 }
3008 memset(mc_cmd_mem->va, 0, mc_cmd_mem->size);
3009
2984961c 3010 mutex_init(&adapter->mbox_lock);
8788fdc2
SP
3011 spin_lock_init(&adapter->mcc_lock);
3012 spin_lock_init(&adapter->mcc_cq_lock);
a8f447bd 3013
dd131e76 3014 init_completion(&adapter->flash_compl);
cf588477 3015 pci_save_state(adapter->pdev);
6b7c5b94 3016 return 0;
e7b909a6
SP
3017
3018free_mbox:
2b7bcebf
IV
3019 dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
3020 mbox_mem_alloc->va, mbox_mem_alloc->dma);
e7b909a6
SP
3021
3022unmap_pci_bars:
3023 be_unmap_pci_bars(adapter);
3024
3025done:
3026 return status;
6b7c5b94
SP
3027}
3028
3029static void be_stats_cleanup(struct be_adapter *adapter)
3030{
3abcdeda 3031 struct be_dma_mem *cmd = &adapter->stats_cmd;
6b7c5b94
SP
3032
3033 if (cmd->va)
2b7bcebf
IV
3034 dma_free_coherent(&adapter->pdev->dev, cmd->size,
3035 cmd->va, cmd->dma);
6b7c5b94
SP
3036}
3037
3038static int be_stats_init(struct be_adapter *adapter)
3039{
3abcdeda 3040 struct be_dma_mem *cmd = &adapter->stats_cmd;
6b7c5b94 3041
005d5696 3042 if (adapter->generation == BE_GEN2) {
89a88ab8 3043 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
005d5696
SX
3044 } else {
3045 if (lancer_chip(adapter))
3046 cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
3047 else
3048 cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
3049 }
2b7bcebf
IV
3050 cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
3051 GFP_KERNEL);
6b7c5b94
SP
3052 if (cmd->va == NULL)
3053 return -1;
d291b9af 3054 memset(cmd->va, 0, cmd->size);
6b7c5b94
SP
3055 return 0;
3056}
3057
3058static void __devexit be_remove(struct pci_dev *pdev)
3059{
3060 struct be_adapter *adapter = pci_get_drvdata(pdev);
8d56ff11 3061
6b7c5b94
SP
3062 if (!adapter)
3063 return;
3064
f203af70
SK
3065 cancel_delayed_work_sync(&adapter->work);
3066
6b7c5b94
SP
3067 unregister_netdev(adapter->netdev);
3068
5fb379ee
SP
3069 be_clear(adapter);
3070
6b7c5b94
SP
3071 be_stats_cleanup(adapter);
3072
3073 be_ctrl_cleanup(adapter);
3074
48f5a191 3075 kfree(adapter->vf_cfg);
ba343c77
SB
3076 be_sriov_disable(adapter);
3077
8d56ff11 3078 be_msix_disable(adapter);
6b7c5b94
SP
3079
3080 pci_set_drvdata(pdev, NULL);
3081 pci_release_regions(pdev);
3082 pci_disable_device(pdev);
3083
3084 free_netdev(adapter->netdev);
3085}
3086
2243e2e9 3087static int be_get_config(struct be_adapter *adapter)
6b7c5b94 3088{
6b7c5b94 3089 int status;
2243e2e9 3090 u8 mac[ETH_ALEN];
6b7c5b94 3091
2243e2e9 3092 status = be_cmd_get_fw_ver(adapter, adapter->fw_ver);
6b7c5b94
SP
3093 if (status)
3094 return status;
3095
3abcdeda
SP
3096 status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
3097 &adapter->function_mode, &adapter->function_caps);
43a04fdc
SP
3098 if (status)
3099 return status;
3100
2243e2e9 3101 memset(mac, 0, ETH_ALEN);
ba343c77 3102
12f4d0a8
ME
3103 /* A default permanent address is given to each VF for Lancer*/
3104 if (be_physfn(adapter) || lancer_chip(adapter)) {
ba343c77 3105 status = be_cmd_mac_addr_query(adapter, mac,
2243e2e9 3106 MAC_ADDRESS_TYPE_NETWORK, true /*permanent */, 0);
ca9e4988 3107
ba343c77
SB
3108 if (status)
3109 return status;
ca9e4988 3110
ba343c77
SB
3111 if (!is_valid_ether_addr(mac))
3112 return -EADDRNOTAVAIL;
3113
3114 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
3115 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
3116 }
6b7c5b94 3117
3486be29 3118 if (adapter->function_mode & 0x400)
82903e4b
AK
3119 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/4;
3120 else
3121 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
3122
9e1453c5
AK
3123 status = be_cmd_get_cntl_attributes(adapter);
3124 if (status)
3125 return status;
3126
3c8def97
SP
3127 if ((num_vfs && adapter->sriov_enabled) ||
3128 (adapter->function_mode & 0x400) ||
3129 lancer_chip(adapter) || !be_physfn(adapter)) {
3130 adapter->num_tx_qs = 1;
3131 netif_set_real_num_tx_queues(adapter->netdev,
3132 adapter->num_tx_qs);
3133 } else {
3134 adapter->num_tx_qs = MAX_TX_QS;
3135 }
3136
2243e2e9 3137 return 0;
6b7c5b94
SP
3138}
3139
fe6d2a38
SP
3140static int be_dev_family_check(struct be_adapter *adapter)
3141{
3142 struct pci_dev *pdev = adapter->pdev;
3143 u32 sli_intf = 0, if_type;
3144
3145 switch (pdev->device) {
3146 case BE_DEVICE_ID1:
3147 case OC_DEVICE_ID1:
3148 adapter->generation = BE_GEN2;
3149 break;
3150 case BE_DEVICE_ID2:
3151 case OC_DEVICE_ID2:
3152 adapter->generation = BE_GEN3;
3153 break;
3154 case OC_DEVICE_ID3:
12f4d0a8 3155 case OC_DEVICE_ID4:
fe6d2a38
SP
3156 pci_read_config_dword(pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3157 if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
3158 SLI_INTF_IF_TYPE_SHIFT;
3159
3160 if (((sli_intf & SLI_INTF_VALID_MASK) != SLI_INTF_VALID) ||
3161 if_type != 0x02) {
3162 dev_err(&pdev->dev, "SLI_INTF reg val is not valid\n");
3163 return -EINVAL;
3164 }
fe6d2a38
SP
3165 adapter->sli_family = ((sli_intf & SLI_INTF_FAMILY_MASK) >>
3166 SLI_INTF_FAMILY_SHIFT);
3167 adapter->generation = BE_GEN3;
3168 break;
3169 default:
3170 adapter->generation = 0;
3171 }
3172 return 0;
3173}
3174
37eed1cb
PR
3175static int lancer_wait_ready(struct be_adapter *adapter)
3176{
3177#define SLIPORT_READY_TIMEOUT 500
3178 u32 sliport_status;
3179 int status = 0, i;
3180
3181 for (i = 0; i < SLIPORT_READY_TIMEOUT; i++) {
3182 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3183 if (sliport_status & SLIPORT_STATUS_RDY_MASK)
3184 break;
3185
3186 msleep(20);
3187 }
3188
3189 if (i == SLIPORT_READY_TIMEOUT)
3190 status = -1;
3191
3192 return status;
3193}
3194
3195static int lancer_test_and_set_rdy_state(struct be_adapter *adapter)
3196{
3197 int status;
3198 u32 sliport_status, err, reset_needed;
3199 status = lancer_wait_ready(adapter);
3200 if (!status) {
3201 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3202 err = sliport_status & SLIPORT_STATUS_ERR_MASK;
3203 reset_needed = sliport_status & SLIPORT_STATUS_RN_MASK;
3204 if (err && reset_needed) {
3205 iowrite32(SLI_PORT_CONTROL_IP_MASK,
3206 adapter->db + SLIPORT_CONTROL_OFFSET);
3207
3208 /* check adapter has corrected the error */
3209 status = lancer_wait_ready(adapter);
3210 sliport_status = ioread32(adapter->db +
3211 SLIPORT_STATUS_OFFSET);
3212 sliport_status &= (SLIPORT_STATUS_ERR_MASK |
3213 SLIPORT_STATUS_RN_MASK);
3214 if (status || sliport_status)
3215 status = -1;
3216 } else if (err || reset_needed) {
3217 status = -1;
3218 }
3219 }
3220 return status;
3221}
3222
6b7c5b94
SP
3223static int __devinit be_probe(struct pci_dev *pdev,
3224 const struct pci_device_id *pdev_id)
3225{
3226 int status = 0;
3227 struct be_adapter *adapter;
3228 struct net_device *netdev;
6b7c5b94
SP
3229
3230 status = pci_enable_device(pdev);
3231 if (status)
3232 goto do_none;
3233
3234 status = pci_request_regions(pdev, DRV_NAME);
3235 if (status)
3236 goto disable_dev;
3237 pci_set_master(pdev);
3238
3c8def97 3239 netdev = alloc_etherdev_mq(sizeof(struct be_adapter), MAX_TX_QS);
6b7c5b94
SP
3240 if (netdev == NULL) {
3241 status = -ENOMEM;
3242 goto rel_reg;
3243 }
3244 adapter = netdev_priv(netdev);
3245 adapter->pdev = pdev;
3246 pci_set_drvdata(pdev, adapter);
fe6d2a38
SP
3247
3248 status = be_dev_family_check(adapter);
63657b9c 3249 if (status)
fe6d2a38
SP
3250 goto free_netdev;
3251
6b7c5b94 3252 adapter->netdev = netdev;
2243e2e9 3253 SET_NETDEV_DEV(netdev, &pdev->dev);
6b7c5b94 3254
2b7bcebf 3255 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
6b7c5b94
SP
3256 if (!status) {
3257 netdev->features |= NETIF_F_HIGHDMA;
3258 } else {
2b7bcebf 3259 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
6b7c5b94
SP
3260 if (status) {
3261 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
3262 goto free_netdev;
3263 }
3264 }
3265
ba343c77 3266 be_sriov_enable(adapter);
48f5a191
AK
3267 if (adapter->sriov_enabled) {
3268 adapter->vf_cfg = kcalloc(num_vfs,
3269 sizeof(struct be_vf_cfg), GFP_KERNEL);
3270
3271 if (!adapter->vf_cfg)
3272 goto free_netdev;
3273 }
ba343c77 3274
6b7c5b94
SP
3275 status = be_ctrl_init(adapter);
3276 if (status)
48f5a191 3277 goto free_vf_cfg;
6b7c5b94 3278
37eed1cb
PR
3279 if (lancer_chip(adapter)) {
3280 status = lancer_test_and_set_rdy_state(adapter);
3281 if (status) {
3282 dev_err(&pdev->dev, "Adapter in non recoverable error\n");
48f5a191 3283 goto ctrl_clean;
37eed1cb
PR
3284 }
3285 }
3286
2243e2e9 3287 /* sync up with fw's ready state */
ba343c77
SB
3288 if (be_physfn(adapter)) {
3289 status = be_cmd_POST(adapter);
3290 if (status)
3291 goto ctrl_clean;
ba343c77 3292 }
6b7c5b94 3293
2243e2e9
SP
3294 /* tell fw we're ready to fire cmds */
3295 status = be_cmd_fw_init(adapter);
6b7c5b94 3296 if (status)
2243e2e9
SP
3297 goto ctrl_clean;
3298
a4b4dfab
AK
3299 status = be_cmd_reset_function(adapter);
3300 if (status)
3301 goto ctrl_clean;
556ae191 3302
2243e2e9
SP
3303 status = be_stats_init(adapter);
3304 if (status)
3305 goto ctrl_clean;
3306
3307 status = be_get_config(adapter);
6b7c5b94
SP
3308 if (status)
3309 goto stats_clean;
6b7c5b94 3310
b9ab82c7
SP
3311 /* The INTR bit may be set in the card when probed by a kdump kernel
3312 * after a crash.
3313 */
3314 if (!lancer_chip(adapter))
3315 be_intr_set(adapter, false);
3316
3abcdeda
SP
3317 be_msix_enable(adapter);
3318
6b7c5b94 3319 INIT_DELAYED_WORK(&adapter->work, be_worker);
6b7c5b94 3320
5fb379ee
SP
3321 status = be_setup(adapter);
3322 if (status)
3abcdeda 3323 goto msix_disable;
2243e2e9 3324
3abcdeda 3325 be_netdev_init(netdev);
6b7c5b94
SP
3326 status = register_netdev(netdev);
3327 if (status != 0)
5fb379ee 3328 goto unsetup;
63a76944 3329 netif_carrier_off(netdev);
6b7c5b94 3330
e6319365 3331 if (be_physfn(adapter) && adapter->sriov_enabled) {
d0381c42
AK
3332 u8 mac_speed;
3333 bool link_up;
3334 u16 vf, lnk_speed;
3335
12f4d0a8
ME
3336 if (!lancer_chip(adapter)) {
3337 status = be_vf_eth_addr_config(adapter);
3338 if (status)
3339 goto unreg_netdev;
3340 }
d0381c42
AK
3341
3342 for (vf = 0; vf < num_vfs; vf++) {
3343 status = be_cmd_link_status_query(adapter, &link_up,
3344 &mac_speed, &lnk_speed, vf + 1);
3345 if (!status)
3346 adapter->vf_cfg[vf].vf_tx_rate = lnk_speed * 10;
3347 else
3348 goto unreg_netdev;
3349 }
e6319365
AK
3350 }
3351
c4ca2374 3352 dev_info(&pdev->dev, "%s port %d\n", nic_name(pdev), adapter->port_num);
34b1ef04 3353
f203af70 3354 schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
6b7c5b94
SP
3355 return 0;
3356
e6319365
AK
3357unreg_netdev:
3358 unregister_netdev(netdev);
5fb379ee
SP
3359unsetup:
3360 be_clear(adapter);
3abcdeda
SP
3361msix_disable:
3362 be_msix_disable(adapter);
6b7c5b94
SP
3363stats_clean:
3364 be_stats_cleanup(adapter);
3365ctrl_clean:
3366 be_ctrl_cleanup(adapter);
48f5a191
AK
3367free_vf_cfg:
3368 kfree(adapter->vf_cfg);
6b7c5b94 3369free_netdev:
ba343c77 3370 be_sriov_disable(adapter);
fe6d2a38 3371 free_netdev(netdev);
8d56ff11 3372 pci_set_drvdata(pdev, NULL);
6b7c5b94
SP
3373rel_reg:
3374 pci_release_regions(pdev);
3375disable_dev:
3376 pci_disable_device(pdev);
3377do_none:
c4ca2374 3378 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
6b7c5b94
SP
3379 return status;
3380}
3381
3382static int be_suspend(struct pci_dev *pdev, pm_message_t state)
3383{
3384 struct be_adapter *adapter = pci_get_drvdata(pdev);
3385 struct net_device *netdev = adapter->netdev;
3386
a4ca055f 3387 cancel_delayed_work_sync(&adapter->work);
71d8d1b5
AK
3388 if (adapter->wol)
3389 be_setup_wol(adapter, true);
3390
6b7c5b94
SP
3391 netif_device_detach(netdev);
3392 if (netif_running(netdev)) {
3393 rtnl_lock();
3394 be_close(netdev);
3395 rtnl_unlock();
3396 }
9e90c961 3397 be_cmd_get_flow_control(adapter, &adapter->tx_fc, &adapter->rx_fc);
9b0365f1 3398 be_clear(adapter);
6b7c5b94 3399
a4ca055f 3400 be_msix_disable(adapter);
6b7c5b94
SP
3401 pci_save_state(pdev);
3402 pci_disable_device(pdev);
3403 pci_set_power_state(pdev, pci_choose_state(pdev, state));
3404 return 0;
3405}
3406
3407static int be_resume(struct pci_dev *pdev)
3408{
3409 int status = 0;
3410 struct be_adapter *adapter = pci_get_drvdata(pdev);
3411 struct net_device *netdev = adapter->netdev;
3412
3413 netif_device_detach(netdev);
3414
3415 status = pci_enable_device(pdev);
3416 if (status)
3417 return status;
3418
3419 pci_set_power_state(pdev, 0);
3420 pci_restore_state(pdev);
3421
a4ca055f 3422 be_msix_enable(adapter);
2243e2e9
SP
3423 /* tell fw we're ready to fire cmds */
3424 status = be_cmd_fw_init(adapter);
3425 if (status)
3426 return status;
3427
9b0365f1 3428 be_setup(adapter);
6b7c5b94
SP
3429 if (netif_running(netdev)) {
3430 rtnl_lock();
3431 be_open(netdev);
3432 rtnl_unlock();
3433 }
3434 netif_device_attach(netdev);
71d8d1b5
AK
3435
3436 if (adapter->wol)
3437 be_setup_wol(adapter, false);
a4ca055f
AK
3438
3439 schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
6b7c5b94
SP
3440 return 0;
3441}
3442
82456b03
SP
3443/*
3444 * An FLR will stop BE from DMAing any data.
3445 */
3446static void be_shutdown(struct pci_dev *pdev)
3447{
3448 struct be_adapter *adapter = pci_get_drvdata(pdev);
82456b03 3449
2d5d4154
AK
3450 if (!adapter)
3451 return;
82456b03 3452
0f4a6828 3453 cancel_delayed_work_sync(&adapter->work);
a4ca055f 3454
2d5d4154 3455 netif_device_detach(adapter->netdev);
82456b03 3456
82456b03
SP
3457 if (adapter->wol)
3458 be_setup_wol(adapter, true);
3459
57841869
AK
3460 be_cmd_reset_function(adapter);
3461
82456b03 3462 pci_disable_device(pdev);
82456b03
SP
3463}
3464
cf588477
SP
3465static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
3466 pci_channel_state_t state)
3467{
3468 struct be_adapter *adapter = pci_get_drvdata(pdev);
3469 struct net_device *netdev = adapter->netdev;
3470
3471 dev_err(&adapter->pdev->dev, "EEH error detected\n");
3472
3473 adapter->eeh_err = true;
3474
3475 netif_device_detach(netdev);
3476
3477 if (netif_running(netdev)) {
3478 rtnl_lock();
3479 be_close(netdev);
3480 rtnl_unlock();
3481 }
3482 be_clear(adapter);
3483
3484 if (state == pci_channel_io_perm_failure)
3485 return PCI_ERS_RESULT_DISCONNECT;
3486
3487 pci_disable_device(pdev);
3488
3489 return PCI_ERS_RESULT_NEED_RESET;
3490}
3491
3492static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
3493{
3494 struct be_adapter *adapter = pci_get_drvdata(pdev);
3495 int status;
3496
3497 dev_info(&adapter->pdev->dev, "EEH reset\n");
3498 adapter->eeh_err = false;
3499
3500 status = pci_enable_device(pdev);
3501 if (status)
3502 return PCI_ERS_RESULT_DISCONNECT;
3503
3504 pci_set_master(pdev);
3505 pci_set_power_state(pdev, 0);
3506 pci_restore_state(pdev);
3507
3508 /* Check if card is ok and fw is ready */
3509 status = be_cmd_POST(adapter);
3510 if (status)
3511 return PCI_ERS_RESULT_DISCONNECT;
3512
3513 return PCI_ERS_RESULT_RECOVERED;
3514}
3515
3516static void be_eeh_resume(struct pci_dev *pdev)
3517{
3518 int status = 0;
3519 struct be_adapter *adapter = pci_get_drvdata(pdev);
3520 struct net_device *netdev = adapter->netdev;
3521
3522 dev_info(&adapter->pdev->dev, "EEH resume\n");
3523
3524 pci_save_state(pdev);
3525
3526 /* tell fw we're ready to fire cmds */
3527 status = be_cmd_fw_init(adapter);
3528 if (status)
3529 goto err;
3530
3531 status = be_setup(adapter);
3532 if (status)
3533 goto err;
3534
3535 if (netif_running(netdev)) {
3536 status = be_open(netdev);
3537 if (status)
3538 goto err;
3539 }
3540 netif_device_attach(netdev);
3541 return;
3542err:
3543 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
cf588477
SP
3544}
3545
3546static struct pci_error_handlers be_eeh_handlers = {
3547 .error_detected = be_eeh_err_detected,
3548 .slot_reset = be_eeh_reset,
3549 .resume = be_eeh_resume,
3550};
3551
6b7c5b94
SP
3552static struct pci_driver be_driver = {
3553 .name = DRV_NAME,
3554 .id_table = be_dev_ids,
3555 .probe = be_probe,
3556 .remove = be_remove,
3557 .suspend = be_suspend,
cf588477 3558 .resume = be_resume,
82456b03 3559 .shutdown = be_shutdown,
cf588477 3560 .err_handler = &be_eeh_handlers
6b7c5b94
SP
3561};
3562
3563static int __init be_init_module(void)
3564{
8e95a202
JP
3565 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
3566 rx_frag_size != 2048) {
6b7c5b94
SP
3567 printk(KERN_WARNING DRV_NAME
3568 " : Module param rx_frag_size must be 2048/4096/8192."
3569 " Using 2048\n");
3570 rx_frag_size = 2048;
3571 }
6b7c5b94
SP
3572
3573 return pci_register_driver(&be_driver);
3574}
3575module_init(be_init_module);
3576
3577static void __exit be_exit_module(void)
3578{
3579 pci_unregister_driver(&be_driver);
3580}
3581module_exit(be_exit_module);