be2net: Move the Emulex driver
[linux-2.6-block.git] / drivers / net / ethernet / emulex / benet / be_main.c
1 /*
2  * Copyright (C) 2005 - 2011 Emulex
3  * All rights reserved.
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public License version 2
7  * as published by the Free Software Foundation.  The full GNU General
8  * Public License is included in this distribution in the file called COPYING.
9  *
10  * Contact Information:
11  * linux-drivers@emulex.com
12  *
13  * Emulex
14  * 3333 Susan Street
15  * Costa Mesa, CA 92626
16  */
17
18 #include <linux/prefetch.h>
19 #include "be.h"
20 #include "be_cmds.h"
21 #include <asm/div64.h>
22
23 MODULE_VERSION(DRV_VER);
24 MODULE_DEVICE_TABLE(pci, be_dev_ids);
25 MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
26 MODULE_AUTHOR("ServerEngines Corporation");
27 MODULE_LICENSE("GPL");
28
29 static ushort rx_frag_size = 2048;
30 static unsigned int num_vfs;
31 module_param(rx_frag_size, ushort, S_IRUGO);
32 module_param(num_vfs, uint, S_IRUGO);
33 MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
34 MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
35
36 static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
37         { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
38         { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
39         { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
40         { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
41         { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
42         { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
43         { 0 }
44 };
45 MODULE_DEVICE_TABLE(pci, be_dev_ids);
46 /* UE Status Low CSR */
47 static const char * const ue_status_low_desc[] = {
48         "CEV",
49         "CTX",
50         "DBUF",
51         "ERX",
52         "Host",
53         "MPU",
54         "NDMA",
55         "PTC ",
56         "RDMA ",
57         "RXF ",
58         "RXIPS ",
59         "RXULP0 ",
60         "RXULP1 ",
61         "RXULP2 ",
62         "TIM ",
63         "TPOST ",
64         "TPRE ",
65         "TXIPS ",
66         "TXULP0 ",
67         "TXULP1 ",
68         "UC ",
69         "WDMA ",
70         "TXULP2 ",
71         "HOST1 ",
72         "P0_OB_LINK ",
73         "P1_OB_LINK ",
74         "HOST_GPIO ",
75         "MBOX ",
76         "AXGMAC0",
77         "AXGMAC1",
78         "JTAG",
79         "MPU_INTPEND"
80 };
81 /* UE Status High CSR */
82 static const char * const ue_status_hi_desc[] = {
83         "LPCMEMHOST",
84         "MGMT_MAC",
85         "PCS0ONLINE",
86         "MPU_IRAM",
87         "PCS1ONLINE",
88         "PCTL0",
89         "PCTL1",
90         "PMEM",
91         "RR",
92         "TXPB",
93         "RXPP",
94         "XAUI",
95         "TXP",
96         "ARM",
97         "IPC",
98         "HOST2",
99         "HOST3",
100         "HOST4",
101         "HOST5",
102         "HOST6",
103         "HOST7",
104         "HOST8",
105         "HOST9",
106         "NETC",
107         "Unknown",
108         "Unknown",
109         "Unknown",
110         "Unknown",
111         "Unknown",
112         "Unknown",
113         "Unknown",
114         "Unknown"
115 };
116
117 static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
118 {
119         struct be_dma_mem *mem = &q->dma_mem;
120         if (mem->va)
121                 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
122                                   mem->dma);
123 }
124
125 static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
126                 u16 len, u16 entry_size)
127 {
128         struct be_dma_mem *mem = &q->dma_mem;
129
130         memset(q, 0, sizeof(*q));
131         q->len = len;
132         q->entry_size = entry_size;
133         mem->size = len * entry_size;
134         mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
135                                      GFP_KERNEL);
136         if (!mem->va)
137                 return -1;
138         memset(mem->va, 0, mem->size);
139         return 0;
140 }
141
142 static void be_intr_set(struct be_adapter *adapter, bool enable)
143 {
144         u8 __iomem *addr = adapter->pcicfg + PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET;
145         u32 reg = ioread32(addr);
146         u32 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
147
148         if (adapter->eeh_err)
149                 return;
150
151         if (!enabled && enable)
152                 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
153         else if (enabled && !enable)
154                 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
155         else
156                 return;
157
158         iowrite32(reg, addr);
159 }
160
161 static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
162 {
163         u32 val = 0;
164         val |= qid & DB_RQ_RING_ID_MASK;
165         val |= posted << DB_RQ_NUM_POSTED_SHIFT;
166
167         wmb();
168         iowrite32(val, adapter->db + DB_RQ_OFFSET);
169 }
170
171 static void be_txq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
172 {
173         u32 val = 0;
174         val |= qid & DB_TXULP_RING_ID_MASK;
175         val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
176
177         wmb();
178         iowrite32(val, adapter->db + DB_TXULP1_OFFSET);
179 }
180
181 static void be_eq_notify(struct be_adapter *adapter, u16 qid,
182                 bool arm, bool clear_int, u16 num_popped)
183 {
184         u32 val = 0;
185         val |= qid & DB_EQ_RING_ID_MASK;
186         val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
187                         DB_EQ_RING_ID_EXT_MASK_SHIFT);
188
189         if (adapter->eeh_err)
190                 return;
191
192         if (arm)
193                 val |= 1 << DB_EQ_REARM_SHIFT;
194         if (clear_int)
195                 val |= 1 << DB_EQ_CLR_SHIFT;
196         val |= 1 << DB_EQ_EVNT_SHIFT;
197         val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
198         iowrite32(val, adapter->db + DB_EQ_OFFSET);
199 }
200
201 void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
202 {
203         u32 val = 0;
204         val |= qid & DB_CQ_RING_ID_MASK;
205         val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
206                         DB_CQ_RING_ID_EXT_MASK_SHIFT);
207
208         if (adapter->eeh_err)
209                 return;
210
211         if (arm)
212                 val |= 1 << DB_CQ_REARM_SHIFT;
213         val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
214         iowrite32(val, adapter->db + DB_CQ_OFFSET);
215 }
216
217 static int be_mac_addr_set(struct net_device *netdev, void *p)
218 {
219         struct be_adapter *adapter = netdev_priv(netdev);
220         struct sockaddr *addr = p;
221         int status = 0;
222
223         if (!is_valid_ether_addr(addr->sa_data))
224                 return -EADDRNOTAVAIL;
225
226         /* MAC addr configuration will be done in hardware for VFs
227          * by their corresponding PFs. Just copy to netdev addr here
228          */
229         if (!be_physfn(adapter))
230                 goto netdev_addr;
231
232         status = be_cmd_pmac_del(adapter, adapter->if_handle,
233                                 adapter->pmac_id, 0);
234         if (status)
235                 return status;
236
237         status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
238                                 adapter->if_handle, &adapter->pmac_id, 0);
239 netdev_addr:
240         if (!status)
241                 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
242
243         return status;
244 }
245
246 static void populate_be2_stats(struct be_adapter *adapter)
247 {
248         struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
249         struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
250         struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
251         struct be_port_rxf_stats_v0 *port_stats =
252                                         &rxf_stats->port[adapter->port_num];
253         struct be_drv_stats *drvs = &adapter->drv_stats;
254
255         be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
256         drvs->rx_pause_frames = port_stats->rx_pause_frames;
257         drvs->rx_crc_errors = port_stats->rx_crc_errors;
258         drvs->rx_control_frames = port_stats->rx_control_frames;
259         drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
260         drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
261         drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
262         drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
263         drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
264         drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
265         drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
266         drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
267         drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
268         drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
269         drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
270         drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
271         drvs->rx_dropped_header_too_small =
272                 port_stats->rx_dropped_header_too_small;
273         drvs->rx_address_match_errors = port_stats->rx_address_match_errors;
274         drvs->rx_alignment_symbol_errors =
275                 port_stats->rx_alignment_symbol_errors;
276
277         drvs->tx_pauseframes = port_stats->tx_pauseframes;
278         drvs->tx_controlframes = port_stats->tx_controlframes;
279
280         if (adapter->port_num)
281                 drvs->jabber_events = rxf_stats->port1_jabber_events;
282         else
283                 drvs->jabber_events = rxf_stats->port0_jabber_events;
284         drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
285         drvs->rx_drops_no_txpb = rxf_stats->rx_drops_no_txpb;
286         drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
287         drvs->rx_drops_invalid_ring = rxf_stats->rx_drops_invalid_ring;
288         drvs->forwarded_packets = rxf_stats->forwarded_packets;
289         drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
290         drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
291         drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
292         adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
293 }
294
295 static void populate_be3_stats(struct be_adapter *adapter)
296 {
297         struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
298         struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
299         struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
300         struct be_port_rxf_stats_v1 *port_stats =
301                                         &rxf_stats->port[adapter->port_num];
302         struct be_drv_stats *drvs = &adapter->drv_stats;
303
304         be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
305         drvs->rx_pause_frames = port_stats->rx_pause_frames;
306         drvs->rx_crc_errors = port_stats->rx_crc_errors;
307         drvs->rx_control_frames = port_stats->rx_control_frames;
308         drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
309         drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
310         drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
311         drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
312         drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
313         drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
314         drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
315         drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
316         drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
317         drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
318         drvs->rx_dropped_header_too_small =
319                 port_stats->rx_dropped_header_too_small;
320         drvs->rx_input_fifo_overflow_drop =
321                 port_stats->rx_input_fifo_overflow_drop;
322         drvs->rx_address_match_errors = port_stats->rx_address_match_errors;
323         drvs->rx_alignment_symbol_errors =
324                 port_stats->rx_alignment_symbol_errors;
325         drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
326         drvs->tx_pauseframes = port_stats->tx_pauseframes;
327         drvs->tx_controlframes = port_stats->tx_controlframes;
328         drvs->jabber_events = port_stats->jabber_events;
329         drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
330         drvs->rx_drops_no_txpb = rxf_stats->rx_drops_no_txpb;
331         drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
332         drvs->rx_drops_invalid_ring = rxf_stats->rx_drops_invalid_ring;
333         drvs->forwarded_packets = rxf_stats->forwarded_packets;
334         drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
335         drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
336         drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
337         adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
338 }
339
340 static void populate_lancer_stats(struct be_adapter *adapter)
341 {
342
343         struct be_drv_stats *drvs = &adapter->drv_stats;
344         struct lancer_pport_stats *pport_stats =
345                                         pport_stats_from_cmd(adapter);
346
347         be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
348         drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
349         drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
350         drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
351         drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
352         drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
353         drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
354         drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
355         drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
356         drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
357         drvs->rx_dropped_tcp_length =
358                                 pport_stats->rx_dropped_invalid_tcp_length;
359         drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
360         drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
361         drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
362         drvs->rx_dropped_header_too_small =
363                                 pport_stats->rx_dropped_header_too_small;
364         drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
365         drvs->rx_address_match_errors = pport_stats->rx_address_match_errors;
366         drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
367         drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
368         drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
369         drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
370         drvs->jabber_events = pport_stats->rx_jabbers;
371         drvs->rx_drops_invalid_ring = pport_stats->rx_drops_invalid_queue;
372         drvs->forwarded_packets = pport_stats->num_forwards_lo;
373         drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
374         drvs->rx_drops_too_many_frags =
375                                 pport_stats->rx_drops_too_many_frags_lo;
376 }
377
378 void be_parse_stats(struct be_adapter *adapter)
379 {
380         struct be_erx_stats_v1 *erx = be_erx_stats_from_cmd(adapter);
381         struct be_rx_obj *rxo;
382         int i;
383
384         if (adapter->generation == BE_GEN3) {
385                 if (lancer_chip(adapter))
386                         populate_lancer_stats(adapter);
387                  else
388                         populate_be3_stats(adapter);
389         } else {
390                 populate_be2_stats(adapter);
391         }
392
393         /* as erx_v1 is longer than v0, ok to use v1 defn for v0 access */
394         for_all_rx_queues(adapter, rxo, i)
395                 rx_stats(rxo)->rx_drops_no_frags =
396                         erx->rx_drops_no_fragments[rxo->q.id];
397 }
398
399 static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
400                                         struct rtnl_link_stats64 *stats)
401 {
402         struct be_adapter *adapter = netdev_priv(netdev);
403         struct be_drv_stats *drvs = &adapter->drv_stats;
404         struct be_rx_obj *rxo;
405         struct be_tx_obj *txo;
406         u64 pkts, bytes;
407         unsigned int start;
408         int i;
409
410         for_all_rx_queues(adapter, rxo, i) {
411                 const struct be_rx_stats *rx_stats = rx_stats(rxo);
412                 do {
413                         start = u64_stats_fetch_begin_bh(&rx_stats->sync);
414                         pkts = rx_stats(rxo)->rx_pkts;
415                         bytes = rx_stats(rxo)->rx_bytes;
416                 } while (u64_stats_fetch_retry_bh(&rx_stats->sync, start));
417                 stats->rx_packets += pkts;
418                 stats->rx_bytes += bytes;
419                 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
420                 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
421                                         rx_stats(rxo)->rx_drops_no_frags;
422         }
423
424         for_all_tx_queues(adapter, txo, i) {
425                 const struct be_tx_stats *tx_stats = tx_stats(txo);
426                 do {
427                         start = u64_stats_fetch_begin_bh(&tx_stats->sync);
428                         pkts = tx_stats(txo)->tx_pkts;
429                         bytes = tx_stats(txo)->tx_bytes;
430                 } while (u64_stats_fetch_retry_bh(&tx_stats->sync, start));
431                 stats->tx_packets += pkts;
432                 stats->tx_bytes += bytes;
433         }
434
435         /* bad pkts received */
436         stats->rx_errors = drvs->rx_crc_errors +
437                 drvs->rx_alignment_symbol_errors +
438                 drvs->rx_in_range_errors +
439                 drvs->rx_out_range_errors +
440                 drvs->rx_frame_too_long +
441                 drvs->rx_dropped_too_small +
442                 drvs->rx_dropped_too_short +
443                 drvs->rx_dropped_header_too_small +
444                 drvs->rx_dropped_tcp_length +
445                 drvs->rx_dropped_runt;
446
447         /* detailed rx errors */
448         stats->rx_length_errors = drvs->rx_in_range_errors +
449                 drvs->rx_out_range_errors +
450                 drvs->rx_frame_too_long;
451
452         stats->rx_crc_errors = drvs->rx_crc_errors;
453
454         /* frame alignment errors */
455         stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
456
457         /* receiver fifo overrun */
458         /* drops_no_pbuf is no per i/f, it's per BE card */
459         stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
460                                 drvs->rx_input_fifo_overflow_drop +
461                                 drvs->rx_drops_no_pbuf;
462         return stats;
463 }
464
465 void be_link_status_update(struct be_adapter *adapter, u32 link_status)
466 {
467         struct net_device *netdev = adapter->netdev;
468
469         /* when link status changes, link speed must be re-queried from card */
470         adapter->link_speed = -1;
471         if ((link_status & LINK_STATUS_MASK) == LINK_UP) {
472                 netif_carrier_on(netdev);
473                 dev_info(&adapter->pdev->dev, "%s: Link up\n", netdev->name);
474         } else {
475                 netif_carrier_off(netdev);
476                 dev_info(&adapter->pdev->dev, "%s: Link down\n", netdev->name);
477         }
478 }
479
480 static void be_tx_stats_update(struct be_tx_obj *txo,
481                         u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
482 {
483         struct be_tx_stats *stats = tx_stats(txo);
484
485         u64_stats_update_begin(&stats->sync);
486         stats->tx_reqs++;
487         stats->tx_wrbs += wrb_cnt;
488         stats->tx_bytes += copied;
489         stats->tx_pkts += (gso_segs ? gso_segs : 1);
490         if (stopped)
491                 stats->tx_stops++;
492         u64_stats_update_end(&stats->sync);
493 }
494
495 /* Determine number of WRB entries needed to xmit data in an skb */
496 static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
497                                                                 bool *dummy)
498 {
499         int cnt = (skb->len > skb->data_len);
500
501         cnt += skb_shinfo(skb)->nr_frags;
502
503         /* to account for hdr wrb */
504         cnt++;
505         if (lancer_chip(adapter) || !(cnt & 1)) {
506                 *dummy = false;
507         } else {
508                 /* add a dummy to make it an even num */
509                 cnt++;
510                 *dummy = true;
511         }
512         BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
513         return cnt;
514 }
515
516 static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
517 {
518         wrb->frag_pa_hi = upper_32_bits(addr);
519         wrb->frag_pa_lo = addr & 0xFFFFFFFF;
520         wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
521 }
522
523 static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
524                 struct sk_buff *skb, u32 wrb_cnt, u32 len)
525 {
526         u8 vlan_prio = 0;
527         u16 vlan_tag = 0;
528
529         memset(hdr, 0, sizeof(*hdr));
530
531         AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
532
533         if (skb_is_gso(skb)) {
534                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
535                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
536                         hdr, skb_shinfo(skb)->gso_size);
537                 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
538                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
539                 if (lancer_chip(adapter) && adapter->sli_family  ==
540                                                         LANCER_A0_SLI_FAMILY) {
541                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, ipcs, hdr, 1);
542                         if (is_tcp_pkt(skb))
543                                 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
544                                                                 tcpcs, hdr, 1);
545                         else if (is_udp_pkt(skb))
546                                 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
547                                                                 udpcs, hdr, 1);
548                 }
549         } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
550                 if (is_tcp_pkt(skb))
551                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
552                 else if (is_udp_pkt(skb))
553                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
554         }
555
556         if (vlan_tx_tag_present(skb)) {
557                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
558                 vlan_tag = vlan_tx_tag_get(skb);
559                 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
560                 /* If vlan priority provided by OS is NOT in available bmap */
561                 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
562                         vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
563                                         adapter->recommended_prio;
564                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
565         }
566
567         AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
568         AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, 1);
569         AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
570         AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
571 }
572
573 static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
574                 bool unmap_single)
575 {
576         dma_addr_t dma;
577
578         be_dws_le_to_cpu(wrb, sizeof(*wrb));
579
580         dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
581         if (wrb->frag_len) {
582                 if (unmap_single)
583                         dma_unmap_single(dev, dma, wrb->frag_len,
584                                          DMA_TO_DEVICE);
585                 else
586                         dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
587         }
588 }
589
590 static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
591                 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb)
592 {
593         dma_addr_t busaddr;
594         int i, copied = 0;
595         struct device *dev = &adapter->pdev->dev;
596         struct sk_buff *first_skb = skb;
597         struct be_eth_wrb *wrb;
598         struct be_eth_hdr_wrb *hdr;
599         bool map_single = false;
600         u16 map_head;
601
602         hdr = queue_head_node(txq);
603         queue_head_inc(txq);
604         map_head = txq->head;
605
606         if (skb->len > skb->data_len) {
607                 int len = skb_headlen(skb);
608                 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
609                 if (dma_mapping_error(dev, busaddr))
610                         goto dma_err;
611                 map_single = true;
612                 wrb = queue_head_node(txq);
613                 wrb_fill(wrb, busaddr, len);
614                 be_dws_cpu_to_le(wrb, sizeof(*wrb));
615                 queue_head_inc(txq);
616                 copied += len;
617         }
618
619         for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
620                 struct skb_frag_struct *frag =
621                         &skb_shinfo(skb)->frags[i];
622                 busaddr = dma_map_page(dev, frag->page, frag->page_offset,
623                                        frag->size, DMA_TO_DEVICE);
624                 if (dma_mapping_error(dev, busaddr))
625                         goto dma_err;
626                 wrb = queue_head_node(txq);
627                 wrb_fill(wrb, busaddr, frag->size);
628                 be_dws_cpu_to_le(wrb, sizeof(*wrb));
629                 queue_head_inc(txq);
630                 copied += frag->size;
631         }
632
633         if (dummy_wrb) {
634                 wrb = queue_head_node(txq);
635                 wrb_fill(wrb, 0, 0);
636                 be_dws_cpu_to_le(wrb, sizeof(*wrb));
637                 queue_head_inc(txq);
638         }
639
640         wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied);
641         be_dws_cpu_to_le(hdr, sizeof(*hdr));
642
643         return copied;
644 dma_err:
645         txq->head = map_head;
646         while (copied) {
647                 wrb = queue_head_node(txq);
648                 unmap_tx_frag(dev, wrb, map_single);
649                 map_single = false;
650                 copied -= wrb->frag_len;
651                 queue_head_inc(txq);
652         }
653         return 0;
654 }
655
656 static netdev_tx_t be_xmit(struct sk_buff *skb,
657                         struct net_device *netdev)
658 {
659         struct be_adapter *adapter = netdev_priv(netdev);
660         struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
661         struct be_queue_info *txq = &txo->q;
662         u32 wrb_cnt = 0, copied = 0;
663         u32 start = txq->head;
664         bool dummy_wrb, stopped = false;
665
666         wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
667
668         copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb);
669         if (copied) {
670                 /* record the sent skb in the sent_skb table */
671                 BUG_ON(txo->sent_skb_list[start]);
672                 txo->sent_skb_list[start] = skb;
673
674                 /* Ensure txq has space for the next skb; Else stop the queue
675                  * *BEFORE* ringing the tx doorbell, so that we serialze the
676                  * tx compls of the current transmit which'll wake up the queue
677                  */
678                 atomic_add(wrb_cnt, &txq->used);
679                 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
680                                                                 txq->len) {
681                         netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
682                         stopped = true;
683                 }
684
685                 be_txq_notify(adapter, txq->id, wrb_cnt);
686
687                 be_tx_stats_update(txo, wrb_cnt, copied,
688                                 skb_shinfo(skb)->gso_segs, stopped);
689         } else {
690                 txq->head = start;
691                 dev_kfree_skb_any(skb);
692         }
693         return NETDEV_TX_OK;
694 }
695
696 static int be_change_mtu(struct net_device *netdev, int new_mtu)
697 {
698         struct be_adapter *adapter = netdev_priv(netdev);
699         if (new_mtu < BE_MIN_MTU ||
700                         new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
701                                         (ETH_HLEN + ETH_FCS_LEN))) {
702                 dev_info(&adapter->pdev->dev,
703                         "MTU must be between %d and %d bytes\n",
704                         BE_MIN_MTU,
705                         (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
706                 return -EINVAL;
707         }
708         dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
709                         netdev->mtu, new_mtu);
710         netdev->mtu = new_mtu;
711         return 0;
712 }
713
714 /*
715  * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
716  * If the user configures more, place BE in vlan promiscuous mode.
717  */
718 static int be_vid_config(struct be_adapter *adapter, bool vf, u32 vf_num)
719 {
720         u16 vtag[BE_NUM_VLANS_SUPPORTED];
721         u16 ntags = 0, i;
722         int status = 0;
723         u32 if_handle;
724
725         if (vf) {
726                 if_handle = adapter->vf_cfg[vf_num].vf_if_handle;
727                 vtag[0] = cpu_to_le16(adapter->vf_cfg[vf_num].vf_vlan_tag);
728                 status = be_cmd_vlan_config(adapter, if_handle, vtag, 1, 1, 0);
729         }
730
731         /* No need to further configure vids if in promiscuous mode */
732         if (adapter->promiscuous)
733                 return 0;
734
735         if (adapter->vlans_added <= adapter->max_vlans)  {
736                 /* Construct VLAN Table to give to HW */
737                 for (i = 0; i < VLAN_N_VID; i++) {
738                         if (adapter->vlan_tag[i]) {
739                                 vtag[ntags] = cpu_to_le16(i);
740                                 ntags++;
741                         }
742                 }
743                 status = be_cmd_vlan_config(adapter, adapter->if_handle,
744                                         vtag, ntags, 1, 0);
745         } else {
746                 status = be_cmd_vlan_config(adapter, adapter->if_handle,
747                                         NULL, 0, 1, 1);
748         }
749
750         return status;
751 }
752
753 static void be_vlan_add_vid(struct net_device *netdev, u16 vid)
754 {
755         struct be_adapter *adapter = netdev_priv(netdev);
756
757         adapter->vlans_added++;
758         if (!be_physfn(adapter))
759                 return;
760
761         adapter->vlan_tag[vid] = 1;
762         if (adapter->vlans_added <= (adapter->max_vlans + 1))
763                 be_vid_config(adapter, false, 0);
764 }
765
766 static void be_vlan_rem_vid(struct net_device *netdev, u16 vid)
767 {
768         struct be_adapter *adapter = netdev_priv(netdev);
769
770         adapter->vlans_added--;
771
772         if (!be_physfn(adapter))
773                 return;
774
775         adapter->vlan_tag[vid] = 0;
776         if (adapter->vlans_added <= adapter->max_vlans)
777                 be_vid_config(adapter, false, 0);
778 }
779
780 static void be_set_multicast_list(struct net_device *netdev)
781 {
782         struct be_adapter *adapter = netdev_priv(netdev);
783
784         if (netdev->flags & IFF_PROMISC) {
785                 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
786                 adapter->promiscuous = true;
787                 goto done;
788         }
789
790         /* BE was previously in promiscuous mode; disable it */
791         if (adapter->promiscuous) {
792                 adapter->promiscuous = false;
793                 be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
794
795                 if (adapter->vlans_added)
796                         be_vid_config(adapter, false, 0);
797         }
798
799         /* Enable multicast promisc if num configured exceeds what we support */
800         if (netdev->flags & IFF_ALLMULTI ||
801                         netdev_mc_count(netdev) > BE_MAX_MC) {
802                 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
803                 goto done;
804         }
805
806         be_cmd_rx_filter(adapter, IFF_MULTICAST, ON);
807 done:
808         return;
809 }
810
811 static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
812 {
813         struct be_adapter *adapter = netdev_priv(netdev);
814         int status;
815
816         if (!adapter->sriov_enabled)
817                 return -EPERM;
818
819         if (!is_valid_ether_addr(mac) || (vf >= num_vfs))
820                 return -EINVAL;
821
822         if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID)
823                 status = be_cmd_pmac_del(adapter,
824                                         adapter->vf_cfg[vf].vf_if_handle,
825                                         adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
826
827         status = be_cmd_pmac_add(adapter, mac,
828                                 adapter->vf_cfg[vf].vf_if_handle,
829                                 &adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
830
831         if (status)
832                 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
833                                 mac, vf);
834         else
835                 memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
836
837         return status;
838 }
839
840 static int be_get_vf_config(struct net_device *netdev, int vf,
841                         struct ifla_vf_info *vi)
842 {
843         struct be_adapter *adapter = netdev_priv(netdev);
844
845         if (!adapter->sriov_enabled)
846                 return -EPERM;
847
848         if (vf >= num_vfs)
849                 return -EINVAL;
850
851         vi->vf = vf;
852         vi->tx_rate = adapter->vf_cfg[vf].vf_tx_rate;
853         vi->vlan = adapter->vf_cfg[vf].vf_vlan_tag;
854         vi->qos = 0;
855         memcpy(&vi->mac, adapter->vf_cfg[vf].vf_mac_addr, ETH_ALEN);
856
857         return 0;
858 }
859
860 static int be_set_vf_vlan(struct net_device *netdev,
861                         int vf, u16 vlan, u8 qos)
862 {
863         struct be_adapter *adapter = netdev_priv(netdev);
864         int status = 0;
865
866         if (!adapter->sriov_enabled)
867                 return -EPERM;
868
869         if ((vf >= num_vfs) || (vlan > 4095))
870                 return -EINVAL;
871
872         if (vlan) {
873                 adapter->vf_cfg[vf].vf_vlan_tag = vlan;
874                 adapter->vlans_added++;
875         } else {
876                 adapter->vf_cfg[vf].vf_vlan_tag = 0;
877                 adapter->vlans_added--;
878         }
879
880         status = be_vid_config(adapter, true, vf);
881
882         if (status)
883                 dev_info(&adapter->pdev->dev,
884                                 "VLAN %d config on VF %d failed\n", vlan, vf);
885         return status;
886 }
887
888 static int be_set_vf_tx_rate(struct net_device *netdev,
889                         int vf, int rate)
890 {
891         struct be_adapter *adapter = netdev_priv(netdev);
892         int status = 0;
893
894         if (!adapter->sriov_enabled)
895                 return -EPERM;
896
897         if ((vf >= num_vfs) || (rate < 0))
898                 return -EINVAL;
899
900         if (rate > 10000)
901                 rate = 10000;
902
903         adapter->vf_cfg[vf].vf_tx_rate = rate;
904         status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
905
906         if (status)
907                 dev_info(&adapter->pdev->dev,
908                                 "tx rate %d on VF %d failed\n", rate, vf);
909         return status;
910 }
911
912 static void be_rx_eqd_update(struct be_adapter *adapter, struct be_rx_obj *rxo)
913 {
914         struct be_eq_obj *rx_eq = &rxo->rx_eq;
915         struct be_rx_stats *stats = rx_stats(rxo);
916         ulong now = jiffies;
917         ulong delta = now - stats->rx_jiffies;
918         u64 pkts;
919         unsigned int start, eqd;
920
921         if (!rx_eq->enable_aic)
922                 return;
923
924         /* Wrapped around */
925         if (time_before(now, stats->rx_jiffies)) {
926                 stats->rx_jiffies = now;
927                 return;
928         }
929
930         /* Update once a second */
931         if (delta < HZ)
932                 return;
933
934         do {
935                 start = u64_stats_fetch_begin_bh(&stats->sync);
936                 pkts = stats->rx_pkts;
937         } while (u64_stats_fetch_retry_bh(&stats->sync, start));
938
939         stats->rx_pps = (pkts - stats->rx_pkts_prev) / (delta / HZ);
940         stats->rx_pkts_prev = pkts;
941         stats->rx_jiffies = now;
942         eqd = stats->rx_pps / 110000;
943         eqd = eqd << 3;
944         if (eqd > rx_eq->max_eqd)
945                 eqd = rx_eq->max_eqd;
946         if (eqd < rx_eq->min_eqd)
947                 eqd = rx_eq->min_eqd;
948         if (eqd < 10)
949                 eqd = 0;
950         if (eqd != rx_eq->cur_eqd) {
951                 be_cmd_modify_eqd(adapter, rx_eq->q.id, eqd);
952                 rx_eq->cur_eqd = eqd;
953         }
954 }
955
956 static void be_rx_stats_update(struct be_rx_obj *rxo,
957                 struct be_rx_compl_info *rxcp)
958 {
959         struct be_rx_stats *stats = rx_stats(rxo);
960
961         u64_stats_update_begin(&stats->sync);
962         stats->rx_compl++;
963         stats->rx_bytes += rxcp->pkt_size;
964         stats->rx_pkts++;
965         if (rxcp->pkt_type == BE_MULTICAST_PACKET)
966                 stats->rx_mcast_pkts++;
967         if (rxcp->err)
968                 stats->rx_compl_err++;
969         u64_stats_update_end(&stats->sync);
970 }
971
972 static inline bool csum_passed(struct be_rx_compl_info *rxcp)
973 {
974         /* L4 checksum is not reliable for non TCP/UDP packets.
975          * Also ignore ipcksm for ipv6 pkts */
976         return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
977                                 (rxcp->ip_csum || rxcp->ipv6);
978 }
979
980 static struct be_rx_page_info *
981 get_rx_page_info(struct be_adapter *adapter,
982                 struct be_rx_obj *rxo,
983                 u16 frag_idx)
984 {
985         struct be_rx_page_info *rx_page_info;
986         struct be_queue_info *rxq = &rxo->q;
987
988         rx_page_info = &rxo->page_info_tbl[frag_idx];
989         BUG_ON(!rx_page_info->page);
990
991         if (rx_page_info->last_page_user) {
992                 dma_unmap_page(&adapter->pdev->dev,
993                                dma_unmap_addr(rx_page_info, bus),
994                                adapter->big_page_size, DMA_FROM_DEVICE);
995                 rx_page_info->last_page_user = false;
996         }
997
998         atomic_dec(&rxq->used);
999         return rx_page_info;
1000 }
1001
1002 /* Throwaway the data in the Rx completion */
1003 static void be_rx_compl_discard(struct be_adapter *adapter,
1004                 struct be_rx_obj *rxo,
1005                 struct be_rx_compl_info *rxcp)
1006 {
1007         struct be_queue_info *rxq = &rxo->q;
1008         struct be_rx_page_info *page_info;
1009         u16 i, num_rcvd = rxcp->num_rcvd;
1010
1011         for (i = 0; i < num_rcvd; i++) {
1012                 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
1013                 put_page(page_info->page);
1014                 memset(page_info, 0, sizeof(*page_info));
1015                 index_inc(&rxcp->rxq_idx, rxq->len);
1016         }
1017 }
1018
1019 /*
1020  * skb_fill_rx_data forms a complete skb for an ether frame
1021  * indicated by rxcp.
1022  */
1023 static void skb_fill_rx_data(struct be_adapter *adapter, struct be_rx_obj *rxo,
1024                         struct sk_buff *skb, struct be_rx_compl_info *rxcp)
1025 {
1026         struct be_queue_info *rxq = &rxo->q;
1027         struct be_rx_page_info *page_info;
1028         u16 i, j;
1029         u16 hdr_len, curr_frag_len, remaining;
1030         u8 *start;
1031
1032         page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
1033         start = page_address(page_info->page) + page_info->page_offset;
1034         prefetch(start);
1035
1036         /* Copy data in the first descriptor of this completion */
1037         curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
1038
1039         /* Copy the header portion into skb_data */
1040         hdr_len = min(BE_HDR_LEN, curr_frag_len);
1041         memcpy(skb->data, start, hdr_len);
1042         skb->len = curr_frag_len;
1043         if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
1044                 /* Complete packet has now been moved to data */
1045                 put_page(page_info->page);
1046                 skb->data_len = 0;
1047                 skb->tail += curr_frag_len;
1048         } else {
1049                 skb_shinfo(skb)->nr_frags = 1;
1050                 skb_shinfo(skb)->frags[0].page = page_info->page;
1051                 skb_shinfo(skb)->frags[0].page_offset =
1052                                         page_info->page_offset + hdr_len;
1053                 skb_shinfo(skb)->frags[0].size = curr_frag_len - hdr_len;
1054                 skb->data_len = curr_frag_len - hdr_len;
1055                 skb->tail += hdr_len;
1056         }
1057         page_info->page = NULL;
1058
1059         if (rxcp->pkt_size <= rx_frag_size) {
1060                 BUG_ON(rxcp->num_rcvd != 1);
1061                 return;
1062         }
1063
1064         /* More frags present for this completion */
1065         index_inc(&rxcp->rxq_idx, rxq->len);
1066         remaining = rxcp->pkt_size - curr_frag_len;
1067         for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
1068                 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
1069                 curr_frag_len = min(remaining, rx_frag_size);
1070
1071                 /* Coalesce all frags from the same physical page in one slot */
1072                 if (page_info->page_offset == 0) {
1073                         /* Fresh page */
1074                         j++;
1075                         skb_shinfo(skb)->frags[j].page = page_info->page;
1076                         skb_shinfo(skb)->frags[j].page_offset =
1077                                                         page_info->page_offset;
1078                         skb_shinfo(skb)->frags[j].size = 0;
1079                         skb_shinfo(skb)->nr_frags++;
1080                 } else {
1081                         put_page(page_info->page);
1082                 }
1083
1084                 skb_shinfo(skb)->frags[j].size += curr_frag_len;
1085                 skb->len += curr_frag_len;
1086                 skb->data_len += curr_frag_len;
1087
1088                 remaining -= curr_frag_len;
1089                 index_inc(&rxcp->rxq_idx, rxq->len);
1090                 page_info->page = NULL;
1091         }
1092         BUG_ON(j > MAX_SKB_FRAGS);
1093 }
1094
1095 /* Process the RX completion indicated by rxcp when GRO is disabled */
1096 static void be_rx_compl_process(struct be_adapter *adapter,
1097                         struct be_rx_obj *rxo,
1098                         struct be_rx_compl_info *rxcp)
1099 {
1100         struct net_device *netdev = adapter->netdev;
1101         struct sk_buff *skb;
1102
1103         skb = netdev_alloc_skb_ip_align(netdev, BE_HDR_LEN);
1104         if (unlikely(!skb)) {
1105                 rx_stats(rxo)->rx_drops_no_skbs++;
1106                 be_rx_compl_discard(adapter, rxo, rxcp);
1107                 return;
1108         }
1109
1110         skb_fill_rx_data(adapter, rxo, skb, rxcp);
1111
1112         if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
1113                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1114         else
1115                 skb_checksum_none_assert(skb);
1116
1117         skb->truesize = skb->len + sizeof(struct sk_buff);
1118         skb->protocol = eth_type_trans(skb, netdev);
1119         if (adapter->netdev->features & NETIF_F_RXHASH)
1120                 skb->rxhash = rxcp->rss_hash;
1121
1122
1123         if (unlikely(rxcp->vlanf))
1124                 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1125
1126         netif_receive_skb(skb);
1127 }
1128
1129 /* Process the RX completion indicated by rxcp when GRO is enabled */
1130 static void be_rx_compl_process_gro(struct be_adapter *adapter,
1131                 struct be_rx_obj *rxo,
1132                 struct be_rx_compl_info *rxcp)
1133 {
1134         struct be_rx_page_info *page_info;
1135         struct sk_buff *skb = NULL;
1136         struct be_queue_info *rxq = &rxo->q;
1137         struct be_eq_obj *eq_obj =  &rxo->rx_eq;
1138         u16 remaining, curr_frag_len;
1139         u16 i, j;
1140
1141         skb = napi_get_frags(&eq_obj->napi);
1142         if (!skb) {
1143                 be_rx_compl_discard(adapter, rxo, rxcp);
1144                 return;
1145         }
1146
1147         remaining = rxcp->pkt_size;
1148         for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
1149                 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
1150
1151                 curr_frag_len = min(remaining, rx_frag_size);
1152
1153                 /* Coalesce all frags from the same physical page in one slot */
1154                 if (i == 0 || page_info->page_offset == 0) {
1155                         /* First frag or Fresh page */
1156                         j++;
1157                         skb_shinfo(skb)->frags[j].page = page_info->page;
1158                         skb_shinfo(skb)->frags[j].page_offset =
1159                                                         page_info->page_offset;
1160                         skb_shinfo(skb)->frags[j].size = 0;
1161                 } else {
1162                         put_page(page_info->page);
1163                 }
1164                 skb_shinfo(skb)->frags[j].size += curr_frag_len;
1165
1166                 remaining -= curr_frag_len;
1167                 index_inc(&rxcp->rxq_idx, rxq->len);
1168                 memset(page_info, 0, sizeof(*page_info));
1169         }
1170         BUG_ON(j > MAX_SKB_FRAGS);
1171
1172         skb_shinfo(skb)->nr_frags = j + 1;
1173         skb->len = rxcp->pkt_size;
1174         skb->data_len = rxcp->pkt_size;
1175         skb->truesize += rxcp->pkt_size;
1176         skb->ip_summed = CHECKSUM_UNNECESSARY;
1177         if (adapter->netdev->features & NETIF_F_RXHASH)
1178                 skb->rxhash = rxcp->rss_hash;
1179
1180         if (unlikely(rxcp->vlanf))
1181                 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1182
1183         napi_gro_frags(&eq_obj->napi);
1184 }
1185
1186 static void be_parse_rx_compl_v1(struct be_adapter *adapter,
1187                                 struct be_eth_rx_compl *compl,
1188                                 struct be_rx_compl_info *rxcp)
1189 {
1190         rxcp->pkt_size =
1191                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1192         rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1193         rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1194         rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
1195         rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
1196         rxcp->ip_csum =
1197                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1198         rxcp->l4_csum =
1199                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1200         rxcp->ipv6 =
1201                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
1202         rxcp->rxq_idx =
1203                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, fragndx, compl);
1204         rxcp->num_rcvd =
1205                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1206         rxcp->pkt_type =
1207                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
1208         rxcp->rss_hash =
1209                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, rxcp);
1210         if (rxcp->vlanf) {
1211                 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm,
1212                                           compl);
1213                 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
1214                                                compl);
1215         }
1216         rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, port, compl);
1217 }
1218
1219 static void be_parse_rx_compl_v0(struct be_adapter *adapter,
1220                                 struct be_eth_rx_compl *compl,
1221                                 struct be_rx_compl_info *rxcp)
1222 {
1223         rxcp->pkt_size =
1224                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1225         rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1226         rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1227         rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
1228         rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
1229         rxcp->ip_csum =
1230                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1231         rxcp->l4_csum =
1232                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1233         rxcp->ipv6 =
1234                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
1235         rxcp->rxq_idx =
1236                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, fragndx, compl);
1237         rxcp->num_rcvd =
1238                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1239         rxcp->pkt_type =
1240                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
1241         rxcp->rss_hash =
1242                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, rxcp);
1243         if (rxcp->vlanf) {
1244                 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm,
1245                                           compl);
1246                 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
1247                                                compl);
1248         }
1249         rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, port, compl);
1250 }
1251
1252 static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1253 {
1254         struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1255         struct be_rx_compl_info *rxcp = &rxo->rxcp;
1256         struct be_adapter *adapter = rxo->adapter;
1257
1258         /* For checking the valid bit it is Ok to use either definition as the
1259          * valid bit is at the same position in both v0 and v1 Rx compl */
1260         if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
1261                 return NULL;
1262
1263         rmb();
1264         be_dws_le_to_cpu(compl, sizeof(*compl));
1265
1266         if (adapter->be3_native)
1267                 be_parse_rx_compl_v1(adapter, compl, rxcp);
1268         else
1269                 be_parse_rx_compl_v0(adapter, compl, rxcp);
1270
1271         if (rxcp->vlanf) {
1272                 /* vlanf could be wrongly set in some cards.
1273                  * ignore if vtm is not set */
1274                 if ((adapter->function_mode & 0x400) && !rxcp->vtm)
1275                         rxcp->vlanf = 0;
1276
1277                 if (!lancer_chip(adapter))
1278                         rxcp->vlan_tag = swab16(rxcp->vlan_tag);
1279
1280                 if (((adapter->pvid & VLAN_VID_MASK) ==
1281                      (rxcp->vlan_tag & VLAN_VID_MASK)) &&
1282                     !adapter->vlan_tag[rxcp->vlan_tag])
1283                         rxcp->vlanf = 0;
1284         }
1285
1286         /* As the compl has been parsed, reset it; we wont touch it again */
1287         compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
1288
1289         queue_tail_inc(&rxo->cq);
1290         return rxcp;
1291 }
1292
1293 static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
1294 {
1295         u32 order = get_order(size);
1296
1297         if (order > 0)
1298                 gfp |= __GFP_COMP;
1299         return  alloc_pages(gfp, order);
1300 }
1301
1302 /*
1303  * Allocate a page, split it to fragments of size rx_frag_size and post as
1304  * receive buffers to BE
1305  */
1306 static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
1307 {
1308         struct be_adapter *adapter = rxo->adapter;
1309         struct be_rx_page_info *page_info_tbl = rxo->page_info_tbl;
1310         struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
1311         struct be_queue_info *rxq = &rxo->q;
1312         struct page *pagep = NULL;
1313         struct be_eth_rx_d *rxd;
1314         u64 page_dmaaddr = 0, frag_dmaaddr;
1315         u32 posted, page_offset = 0;
1316
1317         page_info = &rxo->page_info_tbl[rxq->head];
1318         for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1319                 if (!pagep) {
1320                         pagep = be_alloc_pages(adapter->big_page_size, gfp);
1321                         if (unlikely(!pagep)) {
1322                                 rx_stats(rxo)->rx_post_fail++;
1323                                 break;
1324                         }
1325                         page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
1326                                                     0, adapter->big_page_size,
1327                                                     DMA_FROM_DEVICE);
1328                         page_info->page_offset = 0;
1329                 } else {
1330                         get_page(pagep);
1331                         page_info->page_offset = page_offset + rx_frag_size;
1332                 }
1333                 page_offset = page_info->page_offset;
1334                 page_info->page = pagep;
1335                 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
1336                 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1337
1338                 rxd = queue_head_node(rxq);
1339                 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1340                 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
1341
1342                 /* Any space left in the current big page for another frag? */
1343                 if ((page_offset + rx_frag_size + rx_frag_size) >
1344                                         adapter->big_page_size) {
1345                         pagep = NULL;
1346                         page_info->last_page_user = true;
1347                 }
1348
1349                 prev_page_info = page_info;
1350                 queue_head_inc(rxq);
1351                 page_info = &page_info_tbl[rxq->head];
1352         }
1353         if (pagep)
1354                 prev_page_info->last_page_user = true;
1355
1356         if (posted) {
1357                 atomic_add(posted, &rxq->used);
1358                 be_rxq_notify(adapter, rxq->id, posted);
1359         } else if (atomic_read(&rxq->used) == 0) {
1360                 /* Let be_worker replenish when memory is available */
1361                 rxo->rx_post_starved = true;
1362         }
1363 }
1364
1365 static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
1366 {
1367         struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1368
1369         if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1370                 return NULL;
1371
1372         rmb();
1373         be_dws_le_to_cpu(txcp, sizeof(*txcp));
1374
1375         txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1376
1377         queue_tail_inc(tx_cq);
1378         return txcp;
1379 }
1380
1381 static u16 be_tx_compl_process(struct be_adapter *adapter,
1382                 struct be_tx_obj *txo, u16 last_index)
1383 {
1384         struct be_queue_info *txq = &txo->q;
1385         struct be_eth_wrb *wrb;
1386         struct sk_buff **sent_skbs = txo->sent_skb_list;
1387         struct sk_buff *sent_skb;
1388         u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1389         bool unmap_skb_hdr = true;
1390
1391         sent_skb = sent_skbs[txq->tail];
1392         BUG_ON(!sent_skb);
1393         sent_skbs[txq->tail] = NULL;
1394
1395         /* skip header wrb */
1396         queue_tail_inc(txq);
1397
1398         do {
1399                 cur_index = txq->tail;
1400                 wrb = queue_tail_node(txq);
1401                 unmap_tx_frag(&adapter->pdev->dev, wrb,
1402                               (unmap_skb_hdr && skb_headlen(sent_skb)));
1403                 unmap_skb_hdr = false;
1404
1405                 num_wrbs++;
1406                 queue_tail_inc(txq);
1407         } while (cur_index != last_index);
1408
1409         kfree_skb(sent_skb);
1410         return num_wrbs;
1411 }
1412
1413 static inline struct be_eq_entry *event_get(struct be_eq_obj *eq_obj)
1414 {
1415         struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
1416
1417         if (!eqe->evt)
1418                 return NULL;
1419
1420         rmb();
1421         eqe->evt = le32_to_cpu(eqe->evt);
1422         queue_tail_inc(&eq_obj->q);
1423         return eqe;
1424 }
1425
1426 static int event_handle(struct be_adapter *adapter,
1427                         struct be_eq_obj *eq_obj,
1428                         bool rearm)
1429 {
1430         struct be_eq_entry *eqe;
1431         u16 num = 0;
1432
1433         while ((eqe = event_get(eq_obj)) != NULL) {
1434                 eqe->evt = 0;
1435                 num++;
1436         }
1437
1438         /* Deal with any spurious interrupts that come
1439          * without events
1440          */
1441         if (!num)
1442                 rearm = true;
1443
1444         be_eq_notify(adapter, eq_obj->q.id, rearm, true, num);
1445         if (num)
1446                 napi_schedule(&eq_obj->napi);
1447
1448         return num;
1449 }
1450
1451 /* Just read and notify events without processing them.
1452  * Used at the time of destroying event queues */
1453 static void be_eq_clean(struct be_adapter *adapter,
1454                         struct be_eq_obj *eq_obj)
1455 {
1456         struct be_eq_entry *eqe;
1457         u16 num = 0;
1458
1459         while ((eqe = event_get(eq_obj)) != NULL) {
1460                 eqe->evt = 0;
1461                 num++;
1462         }
1463
1464         if (num)
1465                 be_eq_notify(adapter, eq_obj->q.id, false, true, num);
1466 }
1467
1468 static void be_rx_q_clean(struct be_adapter *adapter, struct be_rx_obj *rxo)
1469 {
1470         struct be_rx_page_info *page_info;
1471         struct be_queue_info *rxq = &rxo->q;
1472         struct be_queue_info *rx_cq = &rxo->cq;
1473         struct be_rx_compl_info *rxcp;
1474         u16 tail;
1475
1476         /* First cleanup pending rx completions */
1477         while ((rxcp = be_rx_compl_get(rxo)) != NULL) {
1478                 be_rx_compl_discard(adapter, rxo, rxcp);
1479                 be_cq_notify(adapter, rx_cq->id, false, 1);
1480         }
1481
1482         /* Then free posted rx buffer that were not used */
1483         tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
1484         for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
1485                 page_info = get_rx_page_info(adapter, rxo, tail);
1486                 put_page(page_info->page);
1487                 memset(page_info, 0, sizeof(*page_info));
1488         }
1489         BUG_ON(atomic_read(&rxq->used));
1490         rxq->tail = rxq->head = 0;
1491 }
1492
1493 static void be_tx_compl_clean(struct be_adapter *adapter,
1494                                 struct be_tx_obj *txo)
1495 {
1496         struct be_queue_info *tx_cq = &txo->cq;
1497         struct be_queue_info *txq = &txo->q;
1498         struct be_eth_tx_compl *txcp;
1499         u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
1500         struct sk_buff **sent_skbs = txo->sent_skb_list;
1501         struct sk_buff *sent_skb;
1502         bool dummy_wrb;
1503
1504         /* Wait for a max of 200ms for all the tx-completions to arrive. */
1505         do {
1506                 while ((txcp = be_tx_compl_get(tx_cq))) {
1507                         end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl,
1508                                         wrb_index, txcp);
1509                         num_wrbs += be_tx_compl_process(adapter, txo, end_idx);
1510                         cmpl++;
1511                 }
1512                 if (cmpl) {
1513                         be_cq_notify(adapter, tx_cq->id, false, cmpl);
1514                         atomic_sub(num_wrbs, &txq->used);
1515                         cmpl = 0;
1516                         num_wrbs = 0;
1517                 }
1518
1519                 if (atomic_read(&txq->used) == 0 || ++timeo > 200)
1520                         break;
1521
1522                 mdelay(1);
1523         } while (true);
1524
1525         if (atomic_read(&txq->used))
1526                 dev_err(&adapter->pdev->dev, "%d pending tx-completions\n",
1527                         atomic_read(&txq->used));
1528
1529         /* free posted tx for which compls will never arrive */
1530         while (atomic_read(&txq->used)) {
1531                 sent_skb = sent_skbs[txq->tail];
1532                 end_idx = txq->tail;
1533                 index_adv(&end_idx,
1534                         wrb_cnt_for_skb(adapter, sent_skb, &dummy_wrb) - 1,
1535                         txq->len);
1536                 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
1537                 atomic_sub(num_wrbs, &txq->used);
1538         }
1539 }
1540
1541 static void be_mcc_queues_destroy(struct be_adapter *adapter)
1542 {
1543         struct be_queue_info *q;
1544
1545         q = &adapter->mcc_obj.q;
1546         if (q->created)
1547                 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
1548         be_queue_free(adapter, q);
1549
1550         q = &adapter->mcc_obj.cq;
1551         if (q->created)
1552                 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1553         be_queue_free(adapter, q);
1554 }
1555
1556 /* Must be called only after TX qs are created as MCC shares TX EQ */
1557 static int be_mcc_queues_create(struct be_adapter *adapter)
1558 {
1559         struct be_queue_info *q, *cq;
1560
1561         /* Alloc MCC compl queue */
1562         cq = &adapter->mcc_obj.cq;
1563         if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
1564                         sizeof(struct be_mcc_compl)))
1565                 goto err;
1566
1567         /* Ask BE to create MCC compl queue; share TX's eq */
1568         if (be_cmd_cq_create(adapter, cq, &adapter->tx_eq.q, false, true, 0))
1569                 goto mcc_cq_free;
1570
1571         /* Alloc MCC queue */
1572         q = &adapter->mcc_obj.q;
1573         if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
1574                 goto mcc_cq_destroy;
1575
1576         /* Ask BE to create MCC queue */
1577         if (be_cmd_mccq_create(adapter, q, cq))
1578                 goto mcc_q_free;
1579
1580         return 0;
1581
1582 mcc_q_free:
1583         be_queue_free(adapter, q);
1584 mcc_cq_destroy:
1585         be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
1586 mcc_cq_free:
1587         be_queue_free(adapter, cq);
1588 err:
1589         return -1;
1590 }
1591
1592 static void be_tx_queues_destroy(struct be_adapter *adapter)
1593 {
1594         struct be_queue_info *q;
1595         struct be_tx_obj *txo;
1596         u8 i;
1597
1598         for_all_tx_queues(adapter, txo, i) {
1599                 q = &txo->q;
1600                 if (q->created)
1601                         be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
1602                 be_queue_free(adapter, q);
1603
1604                 q = &txo->cq;
1605                 if (q->created)
1606                         be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1607                 be_queue_free(adapter, q);
1608         }
1609
1610         /* Clear any residual events */
1611         be_eq_clean(adapter, &adapter->tx_eq);
1612
1613         q = &adapter->tx_eq.q;
1614         if (q->created)
1615                 be_cmd_q_destroy(adapter, q, QTYPE_EQ);
1616         be_queue_free(adapter, q);
1617 }
1618
1619 /* One TX event queue is shared by all TX compl qs */
1620 static int be_tx_queues_create(struct be_adapter *adapter)
1621 {
1622         struct be_queue_info *eq, *q, *cq;
1623         struct be_tx_obj *txo;
1624         u8 i;
1625
1626         adapter->tx_eq.max_eqd = 0;
1627         adapter->tx_eq.min_eqd = 0;
1628         adapter->tx_eq.cur_eqd = 96;
1629         adapter->tx_eq.enable_aic = false;
1630
1631         eq = &adapter->tx_eq.q;
1632         if (be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1633                 sizeof(struct be_eq_entry)))
1634                 return -1;
1635
1636         if (be_cmd_eq_create(adapter, eq, adapter->tx_eq.cur_eqd))
1637                 goto err;
1638         adapter->tx_eq.eq_idx = adapter->eq_next_idx++;
1639
1640         for_all_tx_queues(adapter, txo, i) {
1641                 cq = &txo->cq;
1642                 if (be_queue_alloc(adapter, cq, TX_CQ_LEN,
1643                         sizeof(struct be_eth_tx_compl)))
1644                         goto err;
1645
1646                 if (be_cmd_cq_create(adapter, cq, eq, false, false, 3))
1647                         goto err;
1648
1649                 q = &txo->q;
1650                 if (be_queue_alloc(adapter, q, TX_Q_LEN,
1651                         sizeof(struct be_eth_wrb)))
1652                         goto err;
1653
1654                 if (be_cmd_txq_create(adapter, q, cq))
1655                         goto err;
1656         }
1657         return 0;
1658
1659 err:
1660         be_tx_queues_destroy(adapter);
1661         return -1;
1662 }
1663
1664 static void be_rx_queues_destroy(struct be_adapter *adapter)
1665 {
1666         struct be_queue_info *q;
1667         struct be_rx_obj *rxo;
1668         int i;
1669
1670         for_all_rx_queues(adapter, rxo, i) {
1671                 be_queue_free(adapter, &rxo->q);
1672
1673                 q = &rxo->cq;
1674                 if (q->created)
1675                         be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1676                 be_queue_free(adapter, q);
1677
1678                 q = &rxo->rx_eq.q;
1679                 if (q->created)
1680                         be_cmd_q_destroy(adapter, q, QTYPE_EQ);
1681                 be_queue_free(adapter, q);
1682         }
1683 }
1684
1685 static u32 be_num_rxqs_want(struct be_adapter *adapter)
1686 {
1687         if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
1688                 !adapter->sriov_enabled && !(adapter->function_mode & 0x400)) {
1689                 return 1 + MAX_RSS_QS; /* one default non-RSS queue */
1690         } else {
1691                 dev_warn(&adapter->pdev->dev,
1692                         "No support for multiple RX queues\n");
1693                 return 1;
1694         }
1695 }
1696
1697 static int be_rx_queues_create(struct be_adapter *adapter)
1698 {
1699         struct be_queue_info *eq, *q, *cq;
1700         struct be_rx_obj *rxo;
1701         int rc, i;
1702
1703         adapter->num_rx_qs = min(be_num_rxqs_want(adapter),
1704                                 msix_enabled(adapter) ?
1705                                         adapter->num_msix_vec - 1 : 1);
1706         if (adapter->num_rx_qs != MAX_RX_QS)
1707                 dev_warn(&adapter->pdev->dev,
1708                         "Can create only %d RX queues", adapter->num_rx_qs);
1709
1710         adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
1711         for_all_rx_queues(adapter, rxo, i) {
1712                 rxo->adapter = adapter;
1713                 rxo->rx_eq.max_eqd = BE_MAX_EQD;
1714                 rxo->rx_eq.enable_aic = true;
1715
1716                 /* EQ */
1717                 eq = &rxo->rx_eq.q;
1718                 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1719                                         sizeof(struct be_eq_entry));
1720                 if (rc)
1721                         goto err;
1722
1723                 rc = be_cmd_eq_create(adapter, eq, rxo->rx_eq.cur_eqd);
1724                 if (rc)
1725                         goto err;
1726
1727                 rxo->rx_eq.eq_idx = adapter->eq_next_idx++;
1728
1729                 /* CQ */
1730                 cq = &rxo->cq;
1731                 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
1732                                 sizeof(struct be_eth_rx_compl));
1733                 if (rc)
1734                         goto err;
1735
1736                 rc = be_cmd_cq_create(adapter, cq, eq, false, false, 3);
1737                 if (rc)
1738                         goto err;
1739
1740                 /* Rx Q - will be created in be_open() */
1741                 q = &rxo->q;
1742                 rc = be_queue_alloc(adapter, q, RX_Q_LEN,
1743                                 sizeof(struct be_eth_rx_d));
1744                 if (rc)
1745                         goto err;
1746
1747         }
1748
1749         return 0;
1750 err:
1751         be_rx_queues_destroy(adapter);
1752         return -1;
1753 }
1754
1755 static bool event_peek(struct be_eq_obj *eq_obj)
1756 {
1757         struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
1758         if (!eqe->evt)
1759                 return false;
1760         else
1761                 return true;
1762 }
1763
1764 static irqreturn_t be_intx(int irq, void *dev)
1765 {
1766         struct be_adapter *adapter = dev;
1767         struct be_rx_obj *rxo;
1768         int isr, i, tx = 0 , rx = 0;
1769
1770         if (lancer_chip(adapter)) {
1771                 if (event_peek(&adapter->tx_eq))
1772                         tx = event_handle(adapter, &adapter->tx_eq, false);
1773                 for_all_rx_queues(adapter, rxo, i) {
1774                         if (event_peek(&rxo->rx_eq))
1775                                 rx |= event_handle(adapter, &rxo->rx_eq, true);
1776                 }
1777
1778                 if (!(tx || rx))
1779                         return IRQ_NONE;
1780
1781         } else {
1782                 isr = ioread32(adapter->csr + CEV_ISR0_OFFSET +
1783                         (adapter->tx_eq.q.id / 8) * CEV_ISR_SIZE);
1784                 if (!isr)
1785                         return IRQ_NONE;
1786
1787                 if ((1 << adapter->tx_eq.eq_idx & isr))
1788                         event_handle(adapter, &adapter->tx_eq, false);
1789
1790                 for_all_rx_queues(adapter, rxo, i) {
1791                         if ((1 << rxo->rx_eq.eq_idx & isr))
1792                                 event_handle(adapter, &rxo->rx_eq, true);
1793                 }
1794         }
1795
1796         return IRQ_HANDLED;
1797 }
1798
1799 static irqreturn_t be_msix_rx(int irq, void *dev)
1800 {
1801         struct be_rx_obj *rxo = dev;
1802         struct be_adapter *adapter = rxo->adapter;
1803
1804         event_handle(adapter, &rxo->rx_eq, true);
1805
1806         return IRQ_HANDLED;
1807 }
1808
1809 static irqreturn_t be_msix_tx_mcc(int irq, void *dev)
1810 {
1811         struct be_adapter *adapter = dev;
1812
1813         event_handle(adapter, &adapter->tx_eq, false);
1814
1815         return IRQ_HANDLED;
1816 }
1817
1818 static inline bool do_gro(struct be_rx_compl_info *rxcp)
1819 {
1820         return (rxcp->tcpf && !rxcp->err) ? true : false;
1821 }
1822
1823 static int be_poll_rx(struct napi_struct *napi, int budget)
1824 {
1825         struct be_eq_obj *rx_eq = container_of(napi, struct be_eq_obj, napi);
1826         struct be_rx_obj *rxo = container_of(rx_eq, struct be_rx_obj, rx_eq);
1827         struct be_adapter *adapter = rxo->adapter;
1828         struct be_queue_info *rx_cq = &rxo->cq;
1829         struct be_rx_compl_info *rxcp;
1830         u32 work_done;
1831
1832         rx_stats(rxo)->rx_polls++;
1833         for (work_done = 0; work_done < budget; work_done++) {
1834                 rxcp = be_rx_compl_get(rxo);
1835                 if (!rxcp)
1836                         break;
1837
1838                 /* Is it a flush compl that has no data */
1839                 if (unlikely(rxcp->num_rcvd == 0))
1840                         goto loop_continue;
1841
1842                 /* Discard compl with partial DMA Lancer B0 */
1843                 if (unlikely(!rxcp->pkt_size)) {
1844                         be_rx_compl_discard(adapter, rxo, rxcp);
1845                         goto loop_continue;
1846                 }
1847
1848                 /* On BE drop pkts that arrive due to imperfect filtering in
1849                  * promiscuous mode on some skews
1850                  */
1851                 if (unlikely(rxcp->port != adapter->port_num &&
1852                                 !lancer_chip(adapter))) {
1853                         be_rx_compl_discard(adapter, rxo, rxcp);
1854                         goto loop_continue;
1855                 }
1856
1857                 if (do_gro(rxcp))
1858                         be_rx_compl_process_gro(adapter, rxo, rxcp);
1859                 else
1860                         be_rx_compl_process(adapter, rxo, rxcp);
1861 loop_continue:
1862                 be_rx_stats_update(rxo, rxcp);
1863         }
1864
1865         /* Refill the queue */
1866         if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
1867                 be_post_rx_frags(rxo, GFP_ATOMIC);
1868
1869         /* All consumed */
1870         if (work_done < budget) {
1871                 napi_complete(napi);
1872                 be_cq_notify(adapter, rx_cq->id, true, work_done);
1873         } else {
1874                 /* More to be consumed; continue with interrupts disabled */
1875                 be_cq_notify(adapter, rx_cq->id, false, work_done);
1876         }
1877         return work_done;
1878 }
1879
1880 /* As TX and MCC share the same EQ check for both TX and MCC completions.
1881  * For TX/MCC we don't honour budget; consume everything
1882  */
1883 static int be_poll_tx_mcc(struct napi_struct *napi, int budget)
1884 {
1885         struct be_eq_obj *tx_eq = container_of(napi, struct be_eq_obj, napi);
1886         struct be_adapter *adapter =
1887                 container_of(tx_eq, struct be_adapter, tx_eq);
1888         struct be_tx_obj *txo;
1889         struct be_eth_tx_compl *txcp;
1890         int tx_compl, mcc_compl, status = 0;
1891         u8 i;
1892         u16 num_wrbs;
1893
1894         for_all_tx_queues(adapter, txo, i) {
1895                 tx_compl = 0;
1896                 num_wrbs = 0;
1897                 while ((txcp = be_tx_compl_get(&txo->cq))) {
1898                         num_wrbs += be_tx_compl_process(adapter, txo,
1899                                 AMAP_GET_BITS(struct amap_eth_tx_compl,
1900                                         wrb_index, txcp));
1901                         tx_compl++;
1902                 }
1903                 if (tx_compl) {
1904                         be_cq_notify(adapter, txo->cq.id, true, tx_compl);
1905
1906                         atomic_sub(num_wrbs, &txo->q.used);
1907
1908                         /* As Tx wrbs have been freed up, wake up netdev queue
1909                          * if it was stopped due to lack of tx wrbs.  */
1910                         if (__netif_subqueue_stopped(adapter->netdev, i) &&
1911                                 atomic_read(&txo->q.used) < txo->q.len / 2) {
1912                                 netif_wake_subqueue(adapter->netdev, i);
1913                         }
1914
1915                         u64_stats_update_begin(&tx_stats(txo)->sync_compl);
1916                         tx_stats(txo)->tx_compl += tx_compl;
1917                         u64_stats_update_end(&tx_stats(txo)->sync_compl);
1918                 }
1919         }
1920
1921         mcc_compl = be_process_mcc(adapter, &status);
1922
1923         if (mcc_compl) {
1924                 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
1925                 be_cq_notify(adapter, mcc_obj->cq.id, true, mcc_compl);
1926         }
1927
1928         napi_complete(napi);
1929
1930         be_eq_notify(adapter, tx_eq->q.id, true, false, 0);
1931         adapter->drv_stats.tx_events++;
1932         return 1;
1933 }
1934
1935 void be_detect_dump_ue(struct be_adapter *adapter)
1936 {
1937         u32 ue_status_lo, ue_status_hi, ue_status_lo_mask, ue_status_hi_mask;
1938         u32 i;
1939
1940         pci_read_config_dword(adapter->pdev,
1941                                 PCICFG_UE_STATUS_LOW, &ue_status_lo);
1942         pci_read_config_dword(adapter->pdev,
1943                                 PCICFG_UE_STATUS_HIGH, &ue_status_hi);
1944         pci_read_config_dword(adapter->pdev,
1945                                 PCICFG_UE_STATUS_LOW_MASK, &ue_status_lo_mask);
1946         pci_read_config_dword(adapter->pdev,
1947                                 PCICFG_UE_STATUS_HI_MASK, &ue_status_hi_mask);
1948
1949         ue_status_lo = (ue_status_lo & (~ue_status_lo_mask));
1950         ue_status_hi = (ue_status_hi & (~ue_status_hi_mask));
1951
1952         if (ue_status_lo || ue_status_hi) {
1953                 adapter->ue_detected = true;
1954                 adapter->eeh_err = true;
1955                 dev_err(&adapter->pdev->dev, "UE Detected!!\n");
1956         }
1957
1958         if (ue_status_lo) {
1959                 for (i = 0; ue_status_lo; ue_status_lo >>= 1, i++) {
1960                         if (ue_status_lo & 1)
1961                                 dev_err(&adapter->pdev->dev,
1962                                 "UE: %s bit set\n", ue_status_low_desc[i]);
1963                 }
1964         }
1965         if (ue_status_hi) {
1966                 for (i = 0; ue_status_hi; ue_status_hi >>= 1, i++) {
1967                         if (ue_status_hi & 1)
1968                                 dev_err(&adapter->pdev->dev,
1969                                 "UE: %s bit set\n", ue_status_hi_desc[i]);
1970                 }
1971         }
1972
1973 }
1974
1975 static void be_worker(struct work_struct *work)
1976 {
1977         struct be_adapter *adapter =
1978                 container_of(work, struct be_adapter, work.work);
1979         struct be_rx_obj *rxo;
1980         int i;
1981
1982         if (!adapter->ue_detected && !lancer_chip(adapter))
1983                 be_detect_dump_ue(adapter);
1984
1985         /* when interrupts are not yet enabled, just reap any pending
1986         * mcc completions */
1987         if (!netif_running(adapter->netdev)) {
1988                 int mcc_compl, status = 0;
1989
1990                 mcc_compl = be_process_mcc(adapter, &status);
1991
1992                 if (mcc_compl) {
1993                         struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
1994                         be_cq_notify(adapter, mcc_obj->cq.id, false, mcc_compl);
1995                 }
1996
1997                 goto reschedule;
1998         }
1999
2000         if (!adapter->stats_cmd_sent) {
2001                 if (lancer_chip(adapter))
2002                         lancer_cmd_get_pport_stats(adapter,
2003                                                 &adapter->stats_cmd);
2004                 else
2005                         be_cmd_get_stats(adapter, &adapter->stats_cmd);
2006         }
2007
2008         for_all_rx_queues(adapter, rxo, i) {
2009                 be_rx_eqd_update(adapter, rxo);
2010
2011                 if (rxo->rx_post_starved) {
2012                         rxo->rx_post_starved = false;
2013                         be_post_rx_frags(rxo, GFP_KERNEL);
2014                 }
2015         }
2016
2017 reschedule:
2018         adapter->work_counter++;
2019         schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
2020 }
2021
2022 static void be_msix_disable(struct be_adapter *adapter)
2023 {
2024         if (msix_enabled(adapter)) {
2025                 pci_disable_msix(adapter->pdev);
2026                 adapter->num_msix_vec = 0;
2027         }
2028 }
2029
2030 static void be_msix_enable(struct be_adapter *adapter)
2031 {
2032 #define BE_MIN_MSIX_VECTORS     (1 + 1) /* Rx + Tx */
2033         int i, status, num_vec;
2034
2035         num_vec = be_num_rxqs_want(adapter) + 1;
2036
2037         for (i = 0; i < num_vec; i++)
2038                 adapter->msix_entries[i].entry = i;
2039
2040         status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
2041         if (status == 0) {
2042                 goto done;
2043         } else if (status >= BE_MIN_MSIX_VECTORS) {
2044                 num_vec = status;
2045                 if (pci_enable_msix(adapter->pdev, adapter->msix_entries,
2046                                 num_vec) == 0)
2047                         goto done;
2048         }
2049         return;
2050 done:
2051         adapter->num_msix_vec = num_vec;
2052         return;
2053 }
2054
2055 static void be_sriov_enable(struct be_adapter *adapter)
2056 {
2057         be_check_sriov_fn_type(adapter);
2058 #ifdef CONFIG_PCI_IOV
2059         if (be_physfn(adapter) && num_vfs) {
2060                 int status, pos;
2061                 u16 nvfs;
2062
2063                 pos = pci_find_ext_capability(adapter->pdev,
2064                                                 PCI_EXT_CAP_ID_SRIOV);
2065                 pci_read_config_word(adapter->pdev,
2066                                         pos + PCI_SRIOV_TOTAL_VF, &nvfs);
2067
2068                 if (num_vfs > nvfs) {
2069                         dev_info(&adapter->pdev->dev,
2070                                         "Device supports %d VFs and not %d\n",
2071                                         nvfs, num_vfs);
2072                         num_vfs = nvfs;
2073                 }
2074
2075                 status = pci_enable_sriov(adapter->pdev, num_vfs);
2076                 adapter->sriov_enabled = status ? false : true;
2077         }
2078 #endif
2079 }
2080
2081 static void be_sriov_disable(struct be_adapter *adapter)
2082 {
2083 #ifdef CONFIG_PCI_IOV
2084         if (adapter->sriov_enabled) {
2085                 pci_disable_sriov(adapter->pdev);
2086                 adapter->sriov_enabled = false;
2087         }
2088 #endif
2089 }
2090
2091 static inline int be_msix_vec_get(struct be_adapter *adapter,
2092                                         struct be_eq_obj *eq_obj)
2093 {
2094         return adapter->msix_entries[eq_obj->eq_idx].vector;
2095 }
2096
2097 static int be_request_irq(struct be_adapter *adapter,
2098                 struct be_eq_obj *eq_obj,
2099                 void *handler, char *desc, void *context)
2100 {
2101         struct net_device *netdev = adapter->netdev;
2102         int vec;
2103
2104         sprintf(eq_obj->desc, "%s-%s", netdev->name, desc);
2105         vec = be_msix_vec_get(adapter, eq_obj);
2106         return request_irq(vec, handler, 0, eq_obj->desc, context);
2107 }
2108
2109 static void be_free_irq(struct be_adapter *adapter, struct be_eq_obj *eq_obj,
2110                         void *context)
2111 {
2112         int vec = be_msix_vec_get(adapter, eq_obj);
2113         free_irq(vec, context);
2114 }
2115
2116 static int be_msix_register(struct be_adapter *adapter)
2117 {
2118         struct be_rx_obj *rxo;
2119         int status, i;
2120         char qname[10];
2121
2122         status = be_request_irq(adapter, &adapter->tx_eq, be_msix_tx_mcc, "tx",
2123                                 adapter);
2124         if (status)
2125                 goto err;
2126
2127         for_all_rx_queues(adapter, rxo, i) {
2128                 sprintf(qname, "rxq%d", i);
2129                 status = be_request_irq(adapter, &rxo->rx_eq, be_msix_rx,
2130                                 qname, rxo);
2131                 if (status)
2132                         goto err_msix;
2133         }
2134
2135         return 0;
2136
2137 err_msix:
2138         be_free_irq(adapter, &adapter->tx_eq, adapter);
2139
2140         for (i--, rxo = &adapter->rx_obj[i]; i >= 0; i--, rxo--)
2141                 be_free_irq(adapter, &rxo->rx_eq, rxo);
2142
2143 err:
2144         dev_warn(&adapter->pdev->dev,
2145                 "MSIX Request IRQ failed - err %d\n", status);
2146         be_msix_disable(adapter);
2147         return status;
2148 }
2149
2150 static int be_irq_register(struct be_adapter *adapter)
2151 {
2152         struct net_device *netdev = adapter->netdev;
2153         int status;
2154
2155         if (msix_enabled(adapter)) {
2156                 status = be_msix_register(adapter);
2157                 if (status == 0)
2158                         goto done;
2159                 /* INTx is not supported for VF */
2160                 if (!be_physfn(adapter))
2161                         return status;
2162         }
2163
2164         /* INTx */
2165         netdev->irq = adapter->pdev->irq;
2166         status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
2167                         adapter);
2168         if (status) {
2169                 dev_err(&adapter->pdev->dev,
2170                         "INTx request IRQ failed - err %d\n", status);
2171                 return status;
2172         }
2173 done:
2174         adapter->isr_registered = true;
2175         return 0;
2176 }
2177
2178 static void be_irq_unregister(struct be_adapter *adapter)
2179 {
2180         struct net_device *netdev = adapter->netdev;
2181         struct be_rx_obj *rxo;
2182         int i;
2183
2184         if (!adapter->isr_registered)
2185                 return;
2186
2187         /* INTx */
2188         if (!msix_enabled(adapter)) {
2189                 free_irq(netdev->irq, adapter);
2190                 goto done;
2191         }
2192
2193         /* MSIx */
2194         be_free_irq(adapter, &adapter->tx_eq, adapter);
2195
2196         for_all_rx_queues(adapter, rxo, i)
2197                 be_free_irq(adapter, &rxo->rx_eq, rxo);
2198
2199 done:
2200         adapter->isr_registered = false;
2201 }
2202
2203 static void be_rx_queues_clear(struct be_adapter *adapter)
2204 {
2205         struct be_queue_info *q;
2206         struct be_rx_obj *rxo;
2207         int i;
2208
2209         for_all_rx_queues(adapter, rxo, i) {
2210                 q = &rxo->q;
2211                 if (q->created) {
2212                         be_cmd_rxq_destroy(adapter, q);
2213                         /* After the rxq is invalidated, wait for a grace time
2214                          * of 1ms for all dma to end and the flush compl to
2215                          * arrive
2216                          */
2217                         mdelay(1);
2218                         be_rx_q_clean(adapter, rxo);
2219                 }
2220
2221                 /* Clear any residual events */
2222                 q = &rxo->rx_eq.q;
2223                 if (q->created)
2224                         be_eq_clean(adapter, &rxo->rx_eq);
2225         }
2226 }
2227
2228 static int be_close(struct net_device *netdev)
2229 {
2230         struct be_adapter *adapter = netdev_priv(netdev);
2231         struct be_rx_obj *rxo;
2232         struct be_tx_obj *txo;
2233         struct be_eq_obj *tx_eq = &adapter->tx_eq;
2234         int vec, i;
2235
2236         be_async_mcc_disable(adapter);
2237
2238         if (!lancer_chip(adapter))
2239                 be_intr_set(adapter, false);
2240
2241         for_all_rx_queues(adapter, rxo, i)
2242                 napi_disable(&rxo->rx_eq.napi);
2243
2244         napi_disable(&tx_eq->napi);
2245
2246         if (lancer_chip(adapter)) {
2247                 be_cq_notify(adapter, adapter->mcc_obj.cq.id, false, 0);
2248                 for_all_rx_queues(adapter, rxo, i)
2249                          be_cq_notify(adapter, rxo->cq.id, false, 0);
2250                 for_all_tx_queues(adapter, txo, i)
2251                          be_cq_notify(adapter, txo->cq.id, false, 0);
2252         }
2253
2254         if (msix_enabled(adapter)) {
2255                 vec = be_msix_vec_get(adapter, tx_eq);
2256                 synchronize_irq(vec);
2257
2258                 for_all_rx_queues(adapter, rxo, i) {
2259                         vec = be_msix_vec_get(adapter, &rxo->rx_eq);
2260                         synchronize_irq(vec);
2261                 }
2262         } else {
2263                 synchronize_irq(netdev->irq);
2264         }
2265         be_irq_unregister(adapter);
2266
2267         /* Wait for all pending tx completions to arrive so that
2268          * all tx skbs are freed.
2269          */
2270         for_all_tx_queues(adapter, txo, i)
2271                 be_tx_compl_clean(adapter, txo);
2272
2273         be_rx_queues_clear(adapter);
2274         return 0;
2275 }
2276
2277 static int be_rx_queues_setup(struct be_adapter *adapter)
2278 {
2279         struct be_rx_obj *rxo;
2280         int rc, i;
2281         u8 rsstable[MAX_RSS_QS];
2282
2283         for_all_rx_queues(adapter, rxo, i) {
2284                 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
2285                         rx_frag_size, BE_MAX_JUMBO_FRAME_SIZE,
2286                         adapter->if_handle,
2287                         (i > 0) ? 1 : 0/* rss enable */, &rxo->rss_id);
2288                 if (rc)
2289                         return rc;
2290         }
2291
2292         if (be_multi_rxq(adapter)) {
2293                 for_all_rss_queues(adapter, rxo, i)
2294                         rsstable[i] = rxo->rss_id;
2295
2296                 rc = be_cmd_rss_config(adapter, rsstable,
2297                         adapter->num_rx_qs - 1);
2298                 if (rc)
2299                         return rc;
2300         }
2301
2302         /* First time posting */
2303         for_all_rx_queues(adapter, rxo, i) {
2304                 be_post_rx_frags(rxo, GFP_KERNEL);
2305                 napi_enable(&rxo->rx_eq.napi);
2306         }
2307         return 0;
2308 }
2309
2310 static int be_open(struct net_device *netdev)
2311 {
2312         struct be_adapter *adapter = netdev_priv(netdev);
2313         struct be_eq_obj *tx_eq = &adapter->tx_eq;
2314         struct be_rx_obj *rxo;
2315         int status, i;
2316
2317         status = be_rx_queues_setup(adapter);
2318         if (status)
2319                 goto err;
2320
2321         napi_enable(&tx_eq->napi);
2322
2323         be_irq_register(adapter);
2324
2325         if (!lancer_chip(adapter))
2326                 be_intr_set(adapter, true);
2327
2328         /* The evt queues are created in unarmed state; arm them */
2329         for_all_rx_queues(adapter, rxo, i) {
2330                 be_eq_notify(adapter, rxo->rx_eq.q.id, true, false, 0);
2331                 be_cq_notify(adapter, rxo->cq.id, true, 0);
2332         }
2333         be_eq_notify(adapter, tx_eq->q.id, true, false, 0);
2334
2335         /* Now that interrupts are on we can process async mcc */
2336         be_async_mcc_enable(adapter);
2337
2338         if (be_physfn(adapter)) {
2339                 status = be_vid_config(adapter, false, 0);
2340                 if (status)
2341                         goto err;
2342
2343                 status = be_cmd_set_flow_control(adapter,
2344                                 adapter->tx_fc, adapter->rx_fc);
2345                 if (status)
2346                         goto err;
2347         }
2348
2349         return 0;
2350 err:
2351         be_close(adapter->netdev);
2352         return -EIO;
2353 }
2354
2355 static int be_setup_wol(struct be_adapter *adapter, bool enable)
2356 {
2357         struct be_dma_mem cmd;
2358         int status = 0;
2359         u8 mac[ETH_ALEN];
2360
2361         memset(mac, 0, ETH_ALEN);
2362
2363         cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
2364         cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2365                                     GFP_KERNEL);
2366         if (cmd.va == NULL)
2367                 return -1;
2368         memset(cmd.va, 0, cmd.size);
2369
2370         if (enable) {
2371                 status = pci_write_config_dword(adapter->pdev,
2372                         PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2373                 if (status) {
2374                         dev_err(&adapter->pdev->dev,
2375                                 "Could not enable Wake-on-lan\n");
2376                         dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2377                                           cmd.dma);
2378                         return status;
2379                 }
2380                 status = be_cmd_enable_magic_wol(adapter,
2381                                 adapter->netdev->dev_addr, &cmd);
2382                 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2383                 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2384         } else {
2385                 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2386                 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2387                 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2388         }
2389
2390         dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
2391         return status;
2392 }
2393
2394 /*
2395  * Generate a seed MAC address from the PF MAC Address using jhash.
2396  * MAC Address for VFs are assigned incrementally starting from the seed.
2397  * These addresses are programmed in the ASIC by the PF and the VF driver
2398  * queries for the MAC address during its probe.
2399  */
2400 static inline int be_vf_eth_addr_config(struct be_adapter *adapter)
2401 {
2402         u32 vf = 0;
2403         int status = 0;
2404         u8 mac[ETH_ALEN];
2405
2406         be_vf_eth_addr_generate(adapter, mac);
2407
2408         for (vf = 0; vf < num_vfs; vf++) {
2409                 status = be_cmd_pmac_add(adapter, mac,
2410                                         adapter->vf_cfg[vf].vf_if_handle,
2411                                         &adapter->vf_cfg[vf].vf_pmac_id,
2412                                         vf + 1);
2413                 if (status)
2414                         dev_err(&adapter->pdev->dev,
2415                                 "Mac address add failed for VF %d\n", vf);
2416                 else
2417                         memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
2418
2419                 mac[5] += 1;
2420         }
2421         return status;
2422 }
2423
2424 static inline void be_vf_eth_addr_rem(struct be_adapter *adapter)
2425 {
2426         u32 vf;
2427
2428         for (vf = 0; vf < num_vfs; vf++) {
2429                 if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID)
2430                         be_cmd_pmac_del(adapter,
2431                                         adapter->vf_cfg[vf].vf_if_handle,
2432                                         adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
2433         }
2434 }
2435
2436 static int be_setup(struct be_adapter *adapter)
2437 {
2438         struct net_device *netdev = adapter->netdev;
2439         u32 cap_flags, en_flags, vf = 0;
2440         int status;
2441         u8 mac[ETH_ALEN];
2442
2443         be_cmd_req_native_mode(adapter);
2444
2445         cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED |
2446                                 BE_IF_FLAGS_BROADCAST |
2447                                 BE_IF_FLAGS_MULTICAST;
2448
2449         if (be_physfn(adapter)) {
2450                 cap_flags |= BE_IF_FLAGS_MCAST_PROMISCUOUS |
2451                                 BE_IF_FLAGS_PROMISCUOUS |
2452                                 BE_IF_FLAGS_PASS_L3L4_ERRORS;
2453                 en_flags |= BE_IF_FLAGS_PASS_L3L4_ERRORS;
2454
2455                 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS) {
2456                         cap_flags |= BE_IF_FLAGS_RSS;
2457                         en_flags |= BE_IF_FLAGS_RSS;
2458                 }
2459         }
2460
2461         status = be_cmd_if_create(adapter, cap_flags, en_flags,
2462                         netdev->dev_addr, false/* pmac_invalid */,
2463                         &adapter->if_handle, &adapter->pmac_id, 0);
2464         if (status != 0)
2465                 goto do_none;
2466
2467         if (be_physfn(adapter)) {
2468                 if (adapter->sriov_enabled) {
2469                         while (vf < num_vfs) {
2470                                 cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED |
2471                                                         BE_IF_FLAGS_BROADCAST;
2472                                 status = be_cmd_if_create(adapter, cap_flags,
2473                                         en_flags, mac, true,
2474                                         &adapter->vf_cfg[vf].vf_if_handle,
2475                                         NULL, vf+1);
2476                                 if (status) {
2477                                         dev_err(&adapter->pdev->dev,
2478                                         "Interface Create failed for VF %d\n",
2479                                         vf);
2480                                         goto if_destroy;
2481                                 }
2482                                 adapter->vf_cfg[vf].vf_pmac_id =
2483                                                         BE_INVALID_PMAC_ID;
2484                                 vf++;
2485                         }
2486                 }
2487         } else {
2488                 status = be_cmd_mac_addr_query(adapter, mac,
2489                         MAC_ADDRESS_TYPE_NETWORK, false, adapter->if_handle);
2490                 if (!status) {
2491                         memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2492                         memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2493                 }
2494         }
2495
2496         status = be_tx_queues_create(adapter);
2497         if (status != 0)
2498                 goto if_destroy;
2499
2500         status = be_rx_queues_create(adapter);
2501         if (status != 0)
2502                 goto tx_qs_destroy;
2503
2504         /* Allow all priorities by default. A GRP5 evt may modify this */
2505         adapter->vlan_prio_bmap = 0xff;
2506
2507         status = be_mcc_queues_create(adapter);
2508         if (status != 0)
2509                 goto rx_qs_destroy;
2510
2511         adapter->link_speed = -1;
2512
2513         return 0;
2514
2515 rx_qs_destroy:
2516         be_rx_queues_destroy(adapter);
2517 tx_qs_destroy:
2518         be_tx_queues_destroy(adapter);
2519 if_destroy:
2520         if (be_physfn(adapter) && adapter->sriov_enabled)
2521                 for (vf = 0; vf < num_vfs; vf++)
2522                         if (adapter->vf_cfg[vf].vf_if_handle)
2523                                 be_cmd_if_destroy(adapter,
2524                                         adapter->vf_cfg[vf].vf_if_handle,
2525                                         vf + 1);
2526         be_cmd_if_destroy(adapter, adapter->if_handle, 0);
2527 do_none:
2528         return status;
2529 }
2530
2531 static int be_clear(struct be_adapter *adapter)
2532 {
2533         int vf;
2534
2535         if (be_physfn(adapter) && adapter->sriov_enabled)
2536                 be_vf_eth_addr_rem(adapter);
2537
2538         be_mcc_queues_destroy(adapter);
2539         be_rx_queues_destroy(adapter);
2540         be_tx_queues_destroy(adapter);
2541         adapter->eq_next_idx = 0;
2542
2543         if (be_physfn(adapter) && adapter->sriov_enabled)
2544                 for (vf = 0; vf < num_vfs; vf++)
2545                         if (adapter->vf_cfg[vf].vf_if_handle)
2546                                 be_cmd_if_destroy(adapter,
2547                                         adapter->vf_cfg[vf].vf_if_handle,
2548                                         vf + 1);
2549
2550         be_cmd_if_destroy(adapter, adapter->if_handle,  0);
2551
2552         adapter->be3_native = 0;
2553
2554         /* tell fw we're done with firing cmds */
2555         be_cmd_fw_clean(adapter);
2556         return 0;
2557 }
2558
2559
2560 #define FW_FILE_HDR_SIGN        "ServerEngines Corp. "
2561 static bool be_flash_redboot(struct be_adapter *adapter,
2562                         const u8 *p, u32 img_start, int image_size,
2563                         int hdr_size)
2564 {
2565         u32 crc_offset;
2566         u8 flashed_crc[4];
2567         int status;
2568
2569         crc_offset = hdr_size + img_start + image_size - 4;
2570
2571         p += crc_offset;
2572
2573         status = be_cmd_get_flash_crc(adapter, flashed_crc,
2574                         (image_size - 4));
2575         if (status) {
2576                 dev_err(&adapter->pdev->dev,
2577                 "could not get crc from flash, not flashing redboot\n");
2578                 return false;
2579         }
2580
2581         /*update redboot only if crc does not match*/
2582         if (!memcmp(flashed_crc, p, 4))
2583                 return false;
2584         else
2585                 return true;
2586 }
2587
2588 static bool phy_flashing_required(struct be_adapter *adapter)
2589 {
2590         int status = 0;
2591         struct be_phy_info phy_info;
2592
2593         status = be_cmd_get_phy_info(adapter, &phy_info);
2594         if (status)
2595                 return false;
2596         if ((phy_info.phy_type == TN_8022) &&
2597                 (phy_info.interface_type == PHY_TYPE_BASET_10GB)) {
2598                 return true;
2599         }
2600         return false;
2601 }
2602
2603 static int be_flash_data(struct be_adapter *adapter,
2604                         const struct firmware *fw,
2605                         struct be_dma_mem *flash_cmd, int num_of_images)
2606
2607 {
2608         int status = 0, i, filehdr_size = 0;
2609         u32 total_bytes = 0, flash_op;
2610         int num_bytes;
2611         const u8 *p = fw->data;
2612         struct be_cmd_write_flashrom *req = flash_cmd->va;
2613         const struct flash_comp *pflashcomp;
2614         int num_comp;
2615
2616         static const struct flash_comp gen3_flash_types[10] = {
2617                 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, IMG_TYPE_ISCSI_ACTIVE,
2618                         FLASH_IMAGE_MAX_SIZE_g3},
2619                 { FLASH_REDBOOT_START_g3, IMG_TYPE_REDBOOT,
2620                         FLASH_REDBOOT_IMAGE_MAX_SIZE_g3},
2621                 { FLASH_iSCSI_BIOS_START_g3, IMG_TYPE_BIOS,
2622                         FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2623                 { FLASH_PXE_BIOS_START_g3, IMG_TYPE_PXE_BIOS,
2624                         FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2625                 { FLASH_FCoE_BIOS_START_g3, IMG_TYPE_FCOE_BIOS,
2626                         FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2627                 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, IMG_TYPE_ISCSI_BACKUP,
2628                         FLASH_IMAGE_MAX_SIZE_g3},
2629                 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, IMG_TYPE_FCOE_FW_ACTIVE,
2630                         FLASH_IMAGE_MAX_SIZE_g3},
2631                 { FLASH_FCoE_BACKUP_IMAGE_START_g3, IMG_TYPE_FCOE_FW_BACKUP,
2632                         FLASH_IMAGE_MAX_SIZE_g3},
2633                 { FLASH_NCSI_START_g3, IMG_TYPE_NCSI_FW,
2634                         FLASH_NCSI_IMAGE_MAX_SIZE_g3},
2635                 { FLASH_PHY_FW_START_g3, IMG_TYPE_PHY_FW,
2636                         FLASH_PHY_FW_IMAGE_MAX_SIZE_g3}
2637         };
2638         static const struct flash_comp gen2_flash_types[8] = {
2639                 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, IMG_TYPE_ISCSI_ACTIVE,
2640                         FLASH_IMAGE_MAX_SIZE_g2},
2641                 { FLASH_REDBOOT_START_g2, IMG_TYPE_REDBOOT,
2642                         FLASH_REDBOOT_IMAGE_MAX_SIZE_g2},
2643                 { FLASH_iSCSI_BIOS_START_g2, IMG_TYPE_BIOS,
2644                         FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2645                 { FLASH_PXE_BIOS_START_g2, IMG_TYPE_PXE_BIOS,
2646                         FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2647                 { FLASH_FCoE_BIOS_START_g2, IMG_TYPE_FCOE_BIOS,
2648                         FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2649                 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, IMG_TYPE_ISCSI_BACKUP,
2650                         FLASH_IMAGE_MAX_SIZE_g2},
2651                 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, IMG_TYPE_FCOE_FW_ACTIVE,
2652                         FLASH_IMAGE_MAX_SIZE_g2},
2653                 { FLASH_FCoE_BACKUP_IMAGE_START_g2, IMG_TYPE_FCOE_FW_BACKUP,
2654                          FLASH_IMAGE_MAX_SIZE_g2}
2655         };
2656
2657         if (adapter->generation == BE_GEN3) {
2658                 pflashcomp = gen3_flash_types;
2659                 filehdr_size = sizeof(struct flash_file_hdr_g3);
2660                 num_comp = ARRAY_SIZE(gen3_flash_types);
2661         } else {
2662                 pflashcomp = gen2_flash_types;
2663                 filehdr_size = sizeof(struct flash_file_hdr_g2);
2664                 num_comp = ARRAY_SIZE(gen2_flash_types);
2665         }
2666         for (i = 0; i < num_comp; i++) {
2667                 if ((pflashcomp[i].optype == IMG_TYPE_NCSI_FW) &&
2668                                 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
2669                         continue;
2670                 if (pflashcomp[i].optype == IMG_TYPE_PHY_FW) {
2671                         if (!phy_flashing_required(adapter))
2672                                 continue;
2673                 }
2674                 if ((pflashcomp[i].optype == IMG_TYPE_REDBOOT) &&
2675                         (!be_flash_redboot(adapter, fw->data,
2676                         pflashcomp[i].offset, pflashcomp[i].size, filehdr_size +
2677                         (num_of_images * sizeof(struct image_hdr)))))
2678                         continue;
2679                 p = fw->data;
2680                 p += filehdr_size + pflashcomp[i].offset
2681                         + (num_of_images * sizeof(struct image_hdr));
2682                 if (p + pflashcomp[i].size > fw->data + fw->size)
2683                         return -1;
2684                 total_bytes = pflashcomp[i].size;
2685                 while (total_bytes) {
2686                         if (total_bytes > 32*1024)
2687                                 num_bytes = 32*1024;
2688                         else
2689                                 num_bytes = total_bytes;
2690                         total_bytes -= num_bytes;
2691                         if (!total_bytes) {
2692                                 if (pflashcomp[i].optype == IMG_TYPE_PHY_FW)
2693                                         flash_op = FLASHROM_OPER_PHY_FLASH;
2694                                 else
2695                                         flash_op = FLASHROM_OPER_FLASH;
2696                         } else {
2697                                 if (pflashcomp[i].optype == IMG_TYPE_PHY_FW)
2698                                         flash_op = FLASHROM_OPER_PHY_SAVE;
2699                                 else
2700                                         flash_op = FLASHROM_OPER_SAVE;
2701                         }
2702                         memcpy(req->params.data_buf, p, num_bytes);
2703                         p += num_bytes;
2704                         status = be_cmd_write_flashrom(adapter, flash_cmd,
2705                                 pflashcomp[i].optype, flash_op, num_bytes);
2706                         if (status) {
2707                                 if ((status == ILLEGAL_IOCTL_REQ) &&
2708                                         (pflashcomp[i].optype ==
2709                                                 IMG_TYPE_PHY_FW))
2710                                         break;
2711                                 dev_err(&adapter->pdev->dev,
2712                                         "cmd to write to flash rom failed.\n");
2713                                 return -1;
2714                         }
2715                 }
2716         }
2717         return 0;
2718 }
2719
2720 static int get_ufigen_type(struct flash_file_hdr_g2 *fhdr)
2721 {
2722         if (fhdr == NULL)
2723                 return 0;
2724         if (fhdr->build[0] == '3')
2725                 return BE_GEN3;
2726         else if (fhdr->build[0] == '2')
2727                 return BE_GEN2;
2728         else
2729                 return 0;
2730 }
2731
2732 static int lancer_fw_download(struct be_adapter *adapter,
2733                                 const struct firmware *fw)
2734 {
2735 #define LANCER_FW_DOWNLOAD_CHUNK      (32 * 1024)
2736 #define LANCER_FW_DOWNLOAD_LOCATION   "/prg"
2737         struct be_dma_mem flash_cmd;
2738         const u8 *data_ptr = NULL;
2739         u8 *dest_image_ptr = NULL;
2740         size_t image_size = 0;
2741         u32 chunk_size = 0;
2742         u32 data_written = 0;
2743         u32 offset = 0;
2744         int status = 0;
2745         u8 add_status = 0;
2746
2747         if (!IS_ALIGNED(fw->size, sizeof(u32))) {
2748                 dev_err(&adapter->pdev->dev,
2749                         "FW Image not properly aligned. "
2750                         "Length must be 4 byte aligned.\n");
2751                 status = -EINVAL;
2752                 goto lancer_fw_exit;
2753         }
2754
2755         flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
2756                                 + LANCER_FW_DOWNLOAD_CHUNK;
2757         flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
2758                                                 &flash_cmd.dma, GFP_KERNEL);
2759         if (!flash_cmd.va) {
2760                 status = -ENOMEM;
2761                 dev_err(&adapter->pdev->dev,
2762                         "Memory allocation failure while flashing\n");
2763                 goto lancer_fw_exit;
2764         }
2765
2766         dest_image_ptr = flash_cmd.va +
2767                                 sizeof(struct lancer_cmd_req_write_object);
2768         image_size = fw->size;
2769         data_ptr = fw->data;
2770
2771         while (image_size) {
2772                 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
2773
2774                 /* Copy the image chunk content. */
2775                 memcpy(dest_image_ptr, data_ptr, chunk_size);
2776
2777                 status = lancer_cmd_write_object(adapter, &flash_cmd,
2778                                 chunk_size, offset, LANCER_FW_DOWNLOAD_LOCATION,
2779                                 &data_written, &add_status);
2780
2781                 if (status)
2782                         break;
2783
2784                 offset += data_written;
2785                 data_ptr += data_written;
2786                 image_size -= data_written;
2787         }
2788
2789         if (!status) {
2790                 /* Commit the FW written */
2791                 status = lancer_cmd_write_object(adapter, &flash_cmd,
2792                                         0, offset, LANCER_FW_DOWNLOAD_LOCATION,
2793                                         &data_written, &add_status);
2794         }
2795
2796         dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
2797                                 flash_cmd.dma);
2798         if (status) {
2799                 dev_err(&adapter->pdev->dev,
2800                         "Firmware load error. "
2801                         "Status code: 0x%x Additional Status: 0x%x\n",
2802                         status, add_status);
2803                 goto lancer_fw_exit;
2804         }
2805
2806         dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
2807 lancer_fw_exit:
2808         return status;
2809 }
2810
2811 static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
2812 {
2813         struct flash_file_hdr_g2 *fhdr;
2814         struct flash_file_hdr_g3 *fhdr3;
2815         struct image_hdr *img_hdr_ptr = NULL;
2816         struct be_dma_mem flash_cmd;
2817         const u8 *p;
2818         int status = 0, i = 0, num_imgs = 0;
2819
2820         p = fw->data;
2821         fhdr = (struct flash_file_hdr_g2 *) p;
2822
2823         flash_cmd.size = sizeof(struct be_cmd_write_flashrom) + 32*1024;
2824         flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
2825                                           &flash_cmd.dma, GFP_KERNEL);
2826         if (!flash_cmd.va) {
2827                 status = -ENOMEM;
2828                 dev_err(&adapter->pdev->dev,
2829                         "Memory allocation failure while flashing\n");
2830                 goto be_fw_exit;
2831         }
2832
2833         if ((adapter->generation == BE_GEN3) &&
2834                         (get_ufigen_type(fhdr) == BE_GEN3)) {
2835                 fhdr3 = (struct flash_file_hdr_g3 *) fw->data;
2836                 num_imgs = le32_to_cpu(fhdr3->num_imgs);
2837                 for (i = 0; i < num_imgs; i++) {
2838                         img_hdr_ptr = (struct image_hdr *) (fw->data +
2839                                         (sizeof(struct flash_file_hdr_g3) +
2840                                          i * sizeof(struct image_hdr)));
2841                         if (le32_to_cpu(img_hdr_ptr->imageid) == 1)
2842                                 status = be_flash_data(adapter, fw, &flash_cmd,
2843                                                         num_imgs);
2844                 }
2845         } else if ((adapter->generation == BE_GEN2) &&
2846                         (get_ufigen_type(fhdr) == BE_GEN2)) {
2847                 status = be_flash_data(adapter, fw, &flash_cmd, 0);
2848         } else {
2849                 dev_err(&adapter->pdev->dev,
2850                         "UFI and Interface are not compatible for flashing\n");
2851                 status = -1;
2852         }
2853
2854         dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
2855                           flash_cmd.dma);
2856         if (status) {
2857                 dev_err(&adapter->pdev->dev, "Firmware load error\n");
2858                 goto be_fw_exit;
2859         }
2860
2861         dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
2862
2863 be_fw_exit:
2864         return status;
2865 }
2866
2867 int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
2868 {
2869         const struct firmware *fw;
2870         int status;
2871
2872         if (!netif_running(adapter->netdev)) {
2873                 dev_err(&adapter->pdev->dev,
2874                         "Firmware load not allowed (interface is down)\n");
2875                 return -1;
2876         }
2877
2878         status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
2879         if (status)
2880                 goto fw_exit;
2881
2882         dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
2883
2884         if (lancer_chip(adapter))
2885                 status = lancer_fw_download(adapter, fw);
2886         else
2887                 status = be_fw_download(adapter, fw);
2888
2889 fw_exit:
2890         release_firmware(fw);
2891         return status;
2892 }
2893
2894 static struct net_device_ops be_netdev_ops = {
2895         .ndo_open               = be_open,
2896         .ndo_stop               = be_close,
2897         .ndo_start_xmit         = be_xmit,
2898         .ndo_set_rx_mode        = be_set_multicast_list,
2899         .ndo_set_mac_address    = be_mac_addr_set,
2900         .ndo_change_mtu         = be_change_mtu,
2901         .ndo_get_stats64        = be_get_stats64,
2902         .ndo_validate_addr      = eth_validate_addr,
2903         .ndo_vlan_rx_add_vid    = be_vlan_add_vid,
2904         .ndo_vlan_rx_kill_vid   = be_vlan_rem_vid,
2905         .ndo_set_vf_mac         = be_set_vf_mac,
2906         .ndo_set_vf_vlan        = be_set_vf_vlan,
2907         .ndo_set_vf_tx_rate     = be_set_vf_tx_rate,
2908         .ndo_get_vf_config      = be_get_vf_config
2909 };
2910
2911 static void be_netdev_init(struct net_device *netdev)
2912 {
2913         struct be_adapter *adapter = netdev_priv(netdev);
2914         struct be_rx_obj *rxo;
2915         int i;
2916
2917         netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
2918                 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
2919                 NETIF_F_HW_VLAN_TX;
2920         if (be_multi_rxq(adapter))
2921                 netdev->hw_features |= NETIF_F_RXHASH;
2922
2923         netdev->features |= netdev->hw_features |
2924                 NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
2925
2926         netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
2927                 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
2928
2929         netdev->flags |= IFF_MULTICAST;
2930
2931         /* Default settings for Rx and Tx flow control */
2932         adapter->rx_fc = true;
2933         adapter->tx_fc = true;
2934
2935         netif_set_gso_max_size(netdev, 65535);
2936
2937         BE_SET_NETDEV_OPS(netdev, &be_netdev_ops);
2938
2939         SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
2940
2941         for_all_rx_queues(adapter, rxo, i)
2942                 netif_napi_add(netdev, &rxo->rx_eq.napi, be_poll_rx,
2943                                 BE_NAPI_WEIGHT);
2944
2945         netif_napi_add(netdev, &adapter->tx_eq.napi, be_poll_tx_mcc,
2946                 BE_NAPI_WEIGHT);
2947 }
2948
2949 static void be_unmap_pci_bars(struct be_adapter *adapter)
2950 {
2951         if (adapter->csr)
2952                 iounmap(adapter->csr);
2953         if (adapter->db)
2954                 iounmap(adapter->db);
2955         if (adapter->pcicfg && be_physfn(adapter))
2956                 iounmap(adapter->pcicfg);
2957 }
2958
2959 static int be_map_pci_bars(struct be_adapter *adapter)
2960 {
2961         u8 __iomem *addr;
2962         int pcicfg_reg, db_reg;
2963
2964         if (lancer_chip(adapter)) {
2965                 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 0),
2966                         pci_resource_len(adapter->pdev, 0));
2967                 if (addr == NULL)
2968                         return -ENOMEM;
2969                 adapter->db = addr;
2970                 return 0;
2971         }
2972
2973         if (be_physfn(adapter)) {
2974                 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 2),
2975                                 pci_resource_len(adapter->pdev, 2));
2976                 if (addr == NULL)
2977                         return -ENOMEM;
2978                 adapter->csr = addr;
2979         }
2980
2981         if (adapter->generation == BE_GEN2) {
2982                 pcicfg_reg = 1;
2983                 db_reg = 4;
2984         } else {
2985                 pcicfg_reg = 0;
2986                 if (be_physfn(adapter))
2987                         db_reg = 4;
2988                 else
2989                         db_reg = 0;
2990         }
2991         addr = ioremap_nocache(pci_resource_start(adapter->pdev, db_reg),
2992                                 pci_resource_len(adapter->pdev, db_reg));
2993         if (addr == NULL)
2994                 goto pci_map_err;
2995         adapter->db = addr;
2996
2997         if (be_physfn(adapter)) {
2998                 addr = ioremap_nocache(
2999                                 pci_resource_start(adapter->pdev, pcicfg_reg),
3000                                 pci_resource_len(adapter->pdev, pcicfg_reg));
3001                 if (addr == NULL)
3002                         goto pci_map_err;
3003                 adapter->pcicfg = addr;
3004         } else
3005                 adapter->pcicfg = adapter->db + SRIOV_VF_PCICFG_OFFSET;
3006
3007         return 0;
3008 pci_map_err:
3009         be_unmap_pci_bars(adapter);
3010         return -ENOMEM;
3011 }
3012
3013
3014 static void be_ctrl_cleanup(struct be_adapter *adapter)
3015 {
3016         struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
3017
3018         be_unmap_pci_bars(adapter);
3019
3020         if (mem->va)
3021                 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3022                                   mem->dma);
3023
3024         mem = &adapter->rx_filter;
3025         if (mem->va)
3026                 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3027                                   mem->dma);
3028 }
3029
3030 static int be_ctrl_init(struct be_adapter *adapter)
3031 {
3032         struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
3033         struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
3034         struct be_dma_mem *rx_filter = &adapter->rx_filter;
3035         int status;
3036
3037         status = be_map_pci_bars(adapter);
3038         if (status)
3039                 goto done;
3040
3041         mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
3042         mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
3043                                                 mbox_mem_alloc->size,
3044                                                 &mbox_mem_alloc->dma,
3045                                                 GFP_KERNEL);
3046         if (!mbox_mem_alloc->va) {
3047                 status = -ENOMEM;
3048                 goto unmap_pci_bars;
3049         }
3050         mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
3051         mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
3052         mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
3053         memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
3054
3055         rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
3056         rx_filter->va = dma_alloc_coherent(&adapter->pdev->dev, rx_filter->size,
3057                                         &rx_filter->dma, GFP_KERNEL);
3058         if (rx_filter->va == NULL) {
3059                 status = -ENOMEM;
3060                 goto free_mbox;
3061         }
3062         memset(rx_filter->va, 0, rx_filter->size);
3063
3064         mutex_init(&adapter->mbox_lock);
3065         spin_lock_init(&adapter->mcc_lock);
3066         spin_lock_init(&adapter->mcc_cq_lock);
3067
3068         init_completion(&adapter->flash_compl);
3069         pci_save_state(adapter->pdev);
3070         return 0;
3071
3072 free_mbox:
3073         dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
3074                           mbox_mem_alloc->va, mbox_mem_alloc->dma);
3075
3076 unmap_pci_bars:
3077         be_unmap_pci_bars(adapter);
3078
3079 done:
3080         return status;
3081 }
3082
3083 static void be_stats_cleanup(struct be_adapter *adapter)
3084 {
3085         struct be_dma_mem *cmd = &adapter->stats_cmd;
3086
3087         if (cmd->va)
3088                 dma_free_coherent(&adapter->pdev->dev, cmd->size,
3089                                   cmd->va, cmd->dma);
3090 }
3091
3092 static int be_stats_init(struct be_adapter *adapter)
3093 {
3094         struct be_dma_mem *cmd = &adapter->stats_cmd;
3095
3096         if (adapter->generation == BE_GEN2) {
3097                 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
3098         } else {
3099                 if (lancer_chip(adapter))
3100                         cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
3101                 else
3102                         cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
3103         }
3104         cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
3105                                      GFP_KERNEL);
3106         if (cmd->va == NULL)
3107                 return -1;
3108         memset(cmd->va, 0, cmd->size);
3109         return 0;
3110 }
3111
3112 static void __devexit be_remove(struct pci_dev *pdev)
3113 {
3114         struct be_adapter *adapter = pci_get_drvdata(pdev);
3115
3116         if (!adapter)
3117                 return;
3118
3119         cancel_delayed_work_sync(&adapter->work);
3120
3121         unregister_netdev(adapter->netdev);
3122
3123         be_clear(adapter);
3124
3125         be_stats_cleanup(adapter);
3126
3127         be_ctrl_cleanup(adapter);
3128
3129         kfree(adapter->vf_cfg);
3130         be_sriov_disable(adapter);
3131
3132         be_msix_disable(adapter);
3133
3134         pci_set_drvdata(pdev, NULL);
3135         pci_release_regions(pdev);
3136         pci_disable_device(pdev);
3137
3138         free_netdev(adapter->netdev);
3139 }
3140
3141 static int be_get_config(struct be_adapter *adapter)
3142 {
3143         int status;
3144         u8 mac[ETH_ALEN];
3145
3146         status = be_cmd_get_fw_ver(adapter, adapter->fw_ver);
3147         if (status)
3148                 return status;
3149
3150         status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
3151                         &adapter->function_mode, &adapter->function_caps);
3152         if (status)
3153                 return status;
3154
3155         memset(mac, 0, ETH_ALEN);
3156
3157         /* A default permanent address is given to each VF for Lancer*/
3158         if (be_physfn(adapter) || lancer_chip(adapter)) {
3159                 status = be_cmd_mac_addr_query(adapter, mac,
3160                         MAC_ADDRESS_TYPE_NETWORK, true /*permanent */, 0);
3161
3162                 if (status)
3163                         return status;
3164
3165                 if (!is_valid_ether_addr(mac))
3166                         return -EADDRNOTAVAIL;
3167
3168                 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
3169                 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
3170         }
3171
3172         if (adapter->function_mode & 0x400)
3173                 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/4;
3174         else
3175                 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
3176
3177         status = be_cmd_get_cntl_attributes(adapter);
3178         if (status)
3179                 return status;
3180
3181         if ((num_vfs && adapter->sriov_enabled) ||
3182                 (adapter->function_mode & 0x400) ||
3183                 lancer_chip(adapter) || !be_physfn(adapter)) {
3184                 adapter->num_tx_qs = 1;
3185                 netif_set_real_num_tx_queues(adapter->netdev,
3186                         adapter->num_tx_qs);
3187         } else {
3188                 adapter->num_tx_qs = MAX_TX_QS;
3189         }
3190
3191         return 0;
3192 }
3193
3194 static int be_dev_family_check(struct be_adapter *adapter)
3195 {
3196         struct pci_dev *pdev = adapter->pdev;
3197         u32 sli_intf = 0, if_type;
3198
3199         switch (pdev->device) {
3200         case BE_DEVICE_ID1:
3201         case OC_DEVICE_ID1:
3202                 adapter->generation = BE_GEN2;
3203                 break;
3204         case BE_DEVICE_ID2:
3205         case OC_DEVICE_ID2:
3206                 adapter->generation = BE_GEN3;
3207                 break;
3208         case OC_DEVICE_ID3:
3209         case OC_DEVICE_ID4:
3210                 pci_read_config_dword(pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3211                 if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
3212                                                 SLI_INTF_IF_TYPE_SHIFT;
3213
3214                 if (((sli_intf & SLI_INTF_VALID_MASK) != SLI_INTF_VALID) ||
3215                         if_type != 0x02) {
3216                         dev_err(&pdev->dev, "SLI_INTF reg val is not valid\n");
3217                         return -EINVAL;
3218                 }
3219                 adapter->sli_family = ((sli_intf & SLI_INTF_FAMILY_MASK) >>
3220                                          SLI_INTF_FAMILY_SHIFT);
3221                 adapter->generation = BE_GEN3;
3222                 break;
3223         default:
3224                 adapter->generation = 0;
3225         }
3226         return 0;
3227 }
3228
3229 static int lancer_wait_ready(struct be_adapter *adapter)
3230 {
3231 #define SLIPORT_READY_TIMEOUT 500
3232         u32 sliport_status;
3233         int status = 0, i;
3234
3235         for (i = 0; i < SLIPORT_READY_TIMEOUT; i++) {
3236                 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3237                 if (sliport_status & SLIPORT_STATUS_RDY_MASK)
3238                         break;
3239
3240                 msleep(20);
3241         }
3242
3243         if (i == SLIPORT_READY_TIMEOUT)
3244                 status = -1;
3245
3246         return status;
3247 }
3248
3249 static int lancer_test_and_set_rdy_state(struct be_adapter *adapter)
3250 {
3251         int status;
3252         u32 sliport_status, err, reset_needed;
3253         status = lancer_wait_ready(adapter);
3254         if (!status) {
3255                 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3256                 err = sliport_status & SLIPORT_STATUS_ERR_MASK;
3257                 reset_needed = sliport_status & SLIPORT_STATUS_RN_MASK;
3258                 if (err && reset_needed) {
3259                         iowrite32(SLI_PORT_CONTROL_IP_MASK,
3260                                         adapter->db + SLIPORT_CONTROL_OFFSET);
3261
3262                         /* check adapter has corrected the error */
3263                         status = lancer_wait_ready(adapter);
3264                         sliport_status = ioread32(adapter->db +
3265                                                         SLIPORT_STATUS_OFFSET);
3266                         sliport_status &= (SLIPORT_STATUS_ERR_MASK |
3267                                                 SLIPORT_STATUS_RN_MASK);
3268                         if (status || sliport_status)
3269                                 status = -1;
3270                 } else if (err || reset_needed) {
3271                         status = -1;
3272                 }
3273         }
3274         return status;
3275 }
3276
3277 static int __devinit be_probe(struct pci_dev *pdev,
3278                         const struct pci_device_id *pdev_id)
3279 {
3280         int status = 0;
3281         struct be_adapter *adapter;
3282         struct net_device *netdev;
3283
3284         status = pci_enable_device(pdev);
3285         if (status)
3286                 goto do_none;
3287
3288         status = pci_request_regions(pdev, DRV_NAME);
3289         if (status)
3290                 goto disable_dev;
3291         pci_set_master(pdev);
3292
3293         netdev = alloc_etherdev_mq(sizeof(struct be_adapter), MAX_TX_QS);
3294         if (netdev == NULL) {
3295                 status = -ENOMEM;
3296                 goto rel_reg;
3297         }
3298         adapter = netdev_priv(netdev);
3299         adapter->pdev = pdev;
3300         pci_set_drvdata(pdev, adapter);
3301
3302         status = be_dev_family_check(adapter);
3303         if (status)
3304                 goto free_netdev;
3305
3306         adapter->netdev = netdev;
3307         SET_NETDEV_DEV(netdev, &pdev->dev);
3308
3309         status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
3310         if (!status) {
3311                 netdev->features |= NETIF_F_HIGHDMA;
3312         } else {
3313                 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
3314                 if (status) {
3315                         dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
3316                         goto free_netdev;
3317                 }
3318         }
3319
3320         be_sriov_enable(adapter);
3321         if (adapter->sriov_enabled) {
3322                 adapter->vf_cfg = kcalloc(num_vfs,
3323                         sizeof(struct be_vf_cfg), GFP_KERNEL);
3324
3325                 if (!adapter->vf_cfg)
3326                         goto free_netdev;
3327         }
3328
3329         status = be_ctrl_init(adapter);
3330         if (status)
3331                 goto free_vf_cfg;
3332
3333         if (lancer_chip(adapter)) {
3334                 status = lancer_test_and_set_rdy_state(adapter);
3335                 if (status) {
3336                         dev_err(&pdev->dev, "Adapter in non recoverable error\n");
3337                         goto ctrl_clean;
3338                 }
3339         }
3340
3341         /* sync up with fw's ready state */
3342         if (be_physfn(adapter)) {
3343                 status = be_cmd_POST(adapter);
3344                 if (status)
3345                         goto ctrl_clean;
3346         }
3347
3348         /* tell fw we're ready to fire cmds */
3349         status = be_cmd_fw_init(adapter);
3350         if (status)
3351                 goto ctrl_clean;
3352
3353         status = be_cmd_reset_function(adapter);
3354         if (status)
3355                 goto ctrl_clean;
3356
3357         status = be_stats_init(adapter);
3358         if (status)
3359                 goto ctrl_clean;
3360
3361         status = be_get_config(adapter);
3362         if (status)
3363                 goto stats_clean;
3364
3365         /* The INTR bit may be set in the card when probed by a kdump kernel
3366          * after a crash.
3367          */
3368         if (!lancer_chip(adapter))
3369                 be_intr_set(adapter, false);
3370
3371         be_msix_enable(adapter);
3372
3373         INIT_DELAYED_WORK(&adapter->work, be_worker);
3374
3375         status = be_setup(adapter);
3376         if (status)
3377                 goto msix_disable;
3378
3379         be_netdev_init(netdev);
3380         status = register_netdev(netdev);
3381         if (status != 0)
3382                 goto unsetup;
3383
3384         if (be_physfn(adapter) && adapter->sriov_enabled) {
3385                 u8 mac_speed;
3386                 u16 vf, lnk_speed;
3387
3388                 if (!lancer_chip(adapter)) {
3389                         status = be_vf_eth_addr_config(adapter);
3390                         if (status)
3391                                 goto unreg_netdev;
3392                 }
3393
3394                 for (vf = 0; vf < num_vfs; vf++) {
3395                         status = be_cmd_link_status_query(adapter, &mac_speed,
3396                                                 &lnk_speed, vf + 1);
3397                         if (!status)
3398                                 adapter->vf_cfg[vf].vf_tx_rate = lnk_speed * 10;
3399                         else
3400                                 goto unreg_netdev;
3401                 }
3402         }
3403
3404         dev_info(&pdev->dev, "%s port %d\n", nic_name(pdev), adapter->port_num);
3405
3406         schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
3407         return 0;
3408
3409 unreg_netdev:
3410         unregister_netdev(netdev);
3411 unsetup:
3412         be_clear(adapter);
3413 msix_disable:
3414         be_msix_disable(adapter);
3415 stats_clean:
3416         be_stats_cleanup(adapter);
3417 ctrl_clean:
3418         be_ctrl_cleanup(adapter);
3419 free_vf_cfg:
3420         kfree(adapter->vf_cfg);
3421 free_netdev:
3422         be_sriov_disable(adapter);
3423         free_netdev(netdev);
3424         pci_set_drvdata(pdev, NULL);
3425 rel_reg:
3426         pci_release_regions(pdev);
3427 disable_dev:
3428         pci_disable_device(pdev);
3429 do_none:
3430         dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
3431         return status;
3432 }
3433
3434 static int be_suspend(struct pci_dev *pdev, pm_message_t state)
3435 {
3436         struct be_adapter *adapter = pci_get_drvdata(pdev);
3437         struct net_device *netdev =  adapter->netdev;
3438
3439         cancel_delayed_work_sync(&adapter->work);
3440         if (adapter->wol)
3441                 be_setup_wol(adapter, true);
3442
3443         netif_device_detach(netdev);
3444         if (netif_running(netdev)) {
3445                 rtnl_lock();
3446                 be_close(netdev);
3447                 rtnl_unlock();
3448         }
3449         be_cmd_get_flow_control(adapter, &adapter->tx_fc, &adapter->rx_fc);
3450         be_clear(adapter);
3451
3452         be_msix_disable(adapter);
3453         pci_save_state(pdev);
3454         pci_disable_device(pdev);
3455         pci_set_power_state(pdev, pci_choose_state(pdev, state));
3456         return 0;
3457 }
3458
3459 static int be_resume(struct pci_dev *pdev)
3460 {
3461         int status = 0;
3462         struct be_adapter *adapter = pci_get_drvdata(pdev);
3463         struct net_device *netdev =  adapter->netdev;
3464
3465         netif_device_detach(netdev);
3466
3467         status = pci_enable_device(pdev);
3468         if (status)
3469                 return status;
3470
3471         pci_set_power_state(pdev, 0);
3472         pci_restore_state(pdev);
3473
3474         be_msix_enable(adapter);
3475         /* tell fw we're ready to fire cmds */
3476         status = be_cmd_fw_init(adapter);
3477         if (status)
3478                 return status;
3479
3480         be_setup(adapter);
3481         if (netif_running(netdev)) {
3482                 rtnl_lock();
3483                 be_open(netdev);
3484                 rtnl_unlock();
3485         }
3486         netif_device_attach(netdev);
3487
3488         if (adapter->wol)
3489                 be_setup_wol(adapter, false);
3490
3491         schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
3492         return 0;
3493 }
3494
3495 /*
3496  * An FLR will stop BE from DMAing any data.
3497  */
3498 static void be_shutdown(struct pci_dev *pdev)
3499 {
3500         struct be_adapter *adapter = pci_get_drvdata(pdev);
3501
3502         if (!adapter)
3503                 return;
3504
3505         cancel_delayed_work_sync(&adapter->work);
3506
3507         netif_device_detach(adapter->netdev);
3508
3509         if (adapter->wol)
3510                 be_setup_wol(adapter, true);
3511
3512         be_cmd_reset_function(adapter);
3513
3514         pci_disable_device(pdev);
3515 }
3516
3517 static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
3518                                 pci_channel_state_t state)
3519 {
3520         struct be_adapter *adapter = pci_get_drvdata(pdev);
3521         struct net_device *netdev =  adapter->netdev;
3522
3523         dev_err(&adapter->pdev->dev, "EEH error detected\n");
3524
3525         adapter->eeh_err = true;
3526
3527         netif_device_detach(netdev);
3528
3529         if (netif_running(netdev)) {
3530                 rtnl_lock();
3531                 be_close(netdev);
3532                 rtnl_unlock();
3533         }
3534         be_clear(adapter);
3535
3536         if (state == pci_channel_io_perm_failure)
3537                 return PCI_ERS_RESULT_DISCONNECT;
3538
3539         pci_disable_device(pdev);
3540
3541         return PCI_ERS_RESULT_NEED_RESET;
3542 }
3543
3544 static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
3545 {
3546         struct be_adapter *adapter = pci_get_drvdata(pdev);
3547         int status;
3548
3549         dev_info(&adapter->pdev->dev, "EEH reset\n");
3550         adapter->eeh_err = false;
3551
3552         status = pci_enable_device(pdev);
3553         if (status)
3554                 return PCI_ERS_RESULT_DISCONNECT;
3555
3556         pci_set_master(pdev);
3557         pci_set_power_state(pdev, 0);
3558         pci_restore_state(pdev);
3559
3560         /* Check if card is ok and fw is ready */
3561         status = be_cmd_POST(adapter);
3562         if (status)
3563                 return PCI_ERS_RESULT_DISCONNECT;
3564
3565         return PCI_ERS_RESULT_RECOVERED;
3566 }
3567
3568 static void be_eeh_resume(struct pci_dev *pdev)
3569 {
3570         int status = 0;
3571         struct be_adapter *adapter = pci_get_drvdata(pdev);
3572         struct net_device *netdev =  adapter->netdev;
3573
3574         dev_info(&adapter->pdev->dev, "EEH resume\n");
3575
3576         pci_save_state(pdev);
3577
3578         /* tell fw we're ready to fire cmds */
3579         status = be_cmd_fw_init(adapter);
3580         if (status)
3581                 goto err;
3582
3583         status = be_setup(adapter);
3584         if (status)
3585                 goto err;
3586
3587         if (netif_running(netdev)) {
3588                 status = be_open(netdev);
3589                 if (status)
3590                         goto err;
3591         }
3592         netif_device_attach(netdev);
3593         return;
3594 err:
3595         dev_err(&adapter->pdev->dev, "EEH resume failed\n");
3596 }
3597
3598 static struct pci_error_handlers be_eeh_handlers = {
3599         .error_detected = be_eeh_err_detected,
3600         .slot_reset = be_eeh_reset,
3601         .resume = be_eeh_resume,
3602 };
3603
3604 static struct pci_driver be_driver = {
3605         .name = DRV_NAME,
3606         .id_table = be_dev_ids,
3607         .probe = be_probe,
3608         .remove = be_remove,
3609         .suspend = be_suspend,
3610         .resume = be_resume,
3611         .shutdown = be_shutdown,
3612         .err_handler = &be_eeh_handlers
3613 };
3614
3615 static int __init be_init_module(void)
3616 {
3617         if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
3618             rx_frag_size != 2048) {
3619                 printk(KERN_WARNING DRV_NAME
3620                         " : Module param rx_frag_size must be 2048/4096/8192."
3621                         " Using 2048\n");
3622                 rx_frag_size = 2048;
3623         }
3624
3625         return pci_register_driver(&be_driver);
3626 }
3627 module_init(be_init_module);
3628
3629 static void __exit be_exit_module(void)
3630 {
3631         pci_unregister_driver(&be_driver);
3632 }
3633 module_exit(be_exit_module);