db6912b09997525cf875a7c48a72cee6599ea0fb
[linux-2.6-block.git] / drivers / net / ethernet / broadcom / bnx2x / bnx2x_cmn.c
1 /* bnx2x_cmn.c: Broadcom Everest network driver.
2  *
3  * Copyright (c) 2007-2013 Broadcom Corporation
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation.
8  *
9  * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10  * Written by: Eliezer Tamir
11  * Based on code from Michael Chan's bnx2 driver
12  * UDP CSUM errata workaround by Arik Gendelman
13  * Slowpath and fastpath rework by Vladislav Zolotarov
14  * Statistics and Link management by Yitchak Gertner
15  *
16  */
17
18 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
19
20 #include <linux/etherdevice.h>
21 #include <linux/if_vlan.h>
22 #include <linux/interrupt.h>
23 #include <linux/ip.h>
24 #include <net/tcp.h>
25 #include <net/ipv6.h>
26 #include <net/ip6_checksum.h>
27 #include <linux/prefetch.h>
28 #include "bnx2x_cmn.h"
29 #include "bnx2x_init.h"
30 #include "bnx2x_sp.h"
31
32 /**
33  * bnx2x_move_fp - move content of the fastpath structure.
34  *
35  * @bp:         driver handle
36  * @from:       source FP index
37  * @to:         destination FP index
38  *
39  * Makes sure the contents of the bp->fp[to].napi is kept
40  * intact. This is done by first copying the napi struct from
41  * the target to the source, and then mem copying the entire
42  * source onto the target. Update txdata pointers and related
43  * content.
44  */
45 static inline void bnx2x_move_fp(struct bnx2x *bp, int from, int to)
46 {
47         struct bnx2x_fastpath *from_fp = &bp->fp[from];
48         struct bnx2x_fastpath *to_fp = &bp->fp[to];
49         struct bnx2x_sp_objs *from_sp_objs = &bp->sp_objs[from];
50         struct bnx2x_sp_objs *to_sp_objs = &bp->sp_objs[to];
51         struct bnx2x_fp_stats *from_fp_stats = &bp->fp_stats[from];
52         struct bnx2x_fp_stats *to_fp_stats = &bp->fp_stats[to];
53         int old_max_eth_txqs, new_max_eth_txqs;
54         int old_txdata_index = 0, new_txdata_index = 0;
55
56         /* Copy the NAPI object as it has been already initialized */
57         from_fp->napi = to_fp->napi;
58
59         /* Move bnx2x_fastpath contents */
60         memcpy(to_fp, from_fp, sizeof(*to_fp));
61         to_fp->index = to;
62
63         /* move sp_objs contents as well, as their indices match fp ones */
64         memcpy(to_sp_objs, from_sp_objs, sizeof(*to_sp_objs));
65
66         /* move fp_stats contents as well, as their indices match fp ones */
67         memcpy(to_fp_stats, from_fp_stats, sizeof(*to_fp_stats));
68
69         /* Update txdata pointers in fp and move txdata content accordingly:
70          * Each fp consumes 'max_cos' txdata structures, so the index should be
71          * decremented by max_cos x delta.
72          */
73
74         old_max_eth_txqs = BNX2X_NUM_ETH_QUEUES(bp) * (bp)->max_cos;
75         new_max_eth_txqs = (BNX2X_NUM_ETH_QUEUES(bp) - from + to) *
76                                 (bp)->max_cos;
77         if (from == FCOE_IDX(bp)) {
78                 old_txdata_index = old_max_eth_txqs + FCOE_TXQ_IDX_OFFSET;
79                 new_txdata_index = new_max_eth_txqs + FCOE_TXQ_IDX_OFFSET;
80         }
81
82         memcpy(&bp->bnx2x_txq[new_txdata_index],
83                &bp->bnx2x_txq[old_txdata_index],
84                sizeof(struct bnx2x_fp_txdata));
85         to_fp->txdata_ptr[0] = &bp->bnx2x_txq[new_txdata_index];
86 }
87
88 /**
89  * bnx2x_fill_fw_str - Fill buffer with FW version string.
90  *
91  * @bp:        driver handle
92  * @buf:       character buffer to fill with the fw name
93  * @buf_len:   length of the above buffer
94  *
95  */
96 void bnx2x_fill_fw_str(struct bnx2x *bp, char *buf, size_t buf_len)
97 {
98         if (IS_PF(bp)) {
99                 u8 phy_fw_ver[PHY_FW_VER_LEN];
100
101                 phy_fw_ver[0] = '\0';
102                 bnx2x_get_ext_phy_fw_version(&bp->link_params,
103                                              phy_fw_ver, PHY_FW_VER_LEN);
104                 strlcpy(buf, bp->fw_ver, buf_len);
105                 snprintf(buf + strlen(bp->fw_ver), 32 - strlen(bp->fw_ver),
106                          "bc %d.%d.%d%s%s",
107                          (bp->common.bc_ver & 0xff0000) >> 16,
108                          (bp->common.bc_ver & 0xff00) >> 8,
109                          (bp->common.bc_ver & 0xff),
110                          ((phy_fw_ver[0] != '\0') ? " phy " : ""), phy_fw_ver);
111         } else {
112                 bnx2x_vf_fill_fw_str(bp, buf, buf_len);
113         }
114 }
115
116 /**
117  * bnx2x_shrink_eth_fp - guarantees fastpath structures stay intact
118  *
119  * @bp: driver handle
120  * @delta:      number of eth queues which were not allocated
121  */
122 static void bnx2x_shrink_eth_fp(struct bnx2x *bp, int delta)
123 {
124         int i, cos, old_eth_num = BNX2X_NUM_ETH_QUEUES(bp);
125
126         /* Queue pointer cannot be re-set on an fp-basis, as moving pointer
127          * backward along the array could cause memory to be overriden
128          */
129         for (cos = 1; cos < bp->max_cos; cos++) {
130                 for (i = 0; i < old_eth_num - delta; i++) {
131                         struct bnx2x_fastpath *fp = &bp->fp[i];
132                         int new_idx = cos * (old_eth_num - delta) + i;
133
134                         memcpy(&bp->bnx2x_txq[new_idx], fp->txdata_ptr[cos],
135                                sizeof(struct bnx2x_fp_txdata));
136                         fp->txdata_ptr[cos] = &bp->bnx2x_txq[new_idx];
137                 }
138         }
139 }
140
141 int load_count[2][3] = { {0} }; /* per-path: 0-common, 1-port0, 2-port1 */
142
143 /* free skb in the packet ring at pos idx
144  * return idx of last bd freed
145  */
146 static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata,
147                              u16 idx, unsigned int *pkts_compl,
148                              unsigned int *bytes_compl)
149 {
150         struct sw_tx_bd *tx_buf = &txdata->tx_buf_ring[idx];
151         struct eth_tx_start_bd *tx_start_bd;
152         struct eth_tx_bd *tx_data_bd;
153         struct sk_buff *skb = tx_buf->skb;
154         u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
155         int nbd;
156
157         /* prefetch skb end pointer to speedup dev_kfree_skb() */
158         prefetch(&skb->end);
159
160         DP(NETIF_MSG_TX_DONE, "fp[%d]: pkt_idx %d  buff @(%p)->skb %p\n",
161            txdata->txq_index, idx, tx_buf, skb);
162
163         /* unmap first bd */
164         tx_start_bd = &txdata->tx_desc_ring[bd_idx].start_bd;
165         dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd),
166                          BD_UNMAP_LEN(tx_start_bd), DMA_TO_DEVICE);
167
168
169         nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
170 #ifdef BNX2X_STOP_ON_ERROR
171         if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
172                 BNX2X_ERR("BAD nbd!\n");
173                 bnx2x_panic();
174         }
175 #endif
176         new_cons = nbd + tx_buf->first_bd;
177
178         /* Get the next bd */
179         bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
180
181         /* Skip a parse bd... */
182         --nbd;
183         bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
184
185         /* ...and the TSO split header bd since they have no mapping */
186         if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
187                 --nbd;
188                 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
189         }
190
191         /* now free frags */
192         while (nbd > 0) {
193
194                 tx_data_bd = &txdata->tx_desc_ring[bd_idx].reg_bd;
195                 dma_unmap_page(&bp->pdev->dev, BD_UNMAP_ADDR(tx_data_bd),
196                                BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
197                 if (--nbd)
198                         bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
199         }
200
201         /* release skb */
202         WARN_ON(!skb);
203         if (likely(skb)) {
204                 (*pkts_compl)++;
205                 (*bytes_compl) += skb->len;
206         }
207
208         dev_kfree_skb_any(skb);
209         tx_buf->first_bd = 0;
210         tx_buf->skb = NULL;
211
212         return new_cons;
213 }
214
215 int bnx2x_tx_int(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata)
216 {
217         struct netdev_queue *txq;
218         u16 hw_cons, sw_cons, bd_cons = txdata->tx_bd_cons;
219         unsigned int pkts_compl = 0, bytes_compl = 0;
220
221 #ifdef BNX2X_STOP_ON_ERROR
222         if (unlikely(bp->panic))
223                 return -1;
224 #endif
225
226         txq = netdev_get_tx_queue(bp->dev, txdata->txq_index);
227         hw_cons = le16_to_cpu(*txdata->tx_cons_sb);
228         sw_cons = txdata->tx_pkt_cons;
229
230         while (sw_cons != hw_cons) {
231                 u16 pkt_cons;
232
233                 pkt_cons = TX_BD(sw_cons);
234
235                 DP(NETIF_MSG_TX_DONE,
236                    "queue[%d]: hw_cons %u  sw_cons %u  pkt_cons %u\n",
237                    txdata->txq_index, hw_cons, sw_cons, pkt_cons);
238
239                 bd_cons = bnx2x_free_tx_pkt(bp, txdata, pkt_cons,
240                                             &pkts_compl, &bytes_compl);
241
242                 sw_cons++;
243         }
244
245         netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
246
247         txdata->tx_pkt_cons = sw_cons;
248         txdata->tx_bd_cons = bd_cons;
249
250         /* Need to make the tx_bd_cons update visible to start_xmit()
251          * before checking for netif_tx_queue_stopped().  Without the
252          * memory barrier, there is a small possibility that
253          * start_xmit() will miss it and cause the queue to be stopped
254          * forever.
255          * On the other hand we need an rmb() here to ensure the proper
256          * ordering of bit testing in the following
257          * netif_tx_queue_stopped(txq) call.
258          */
259         smp_mb();
260
261         if (unlikely(netif_tx_queue_stopped(txq))) {
262                 /* Taking tx_lock() is needed to prevent reenabling the queue
263                  * while it's empty. This could have happen if rx_action() gets
264                  * suspended in bnx2x_tx_int() after the condition before
265                  * netif_tx_wake_queue(), while tx_action (bnx2x_start_xmit()):
266                  *
267                  * stops the queue->sees fresh tx_bd_cons->releases the queue->
268                  * sends some packets consuming the whole queue again->
269                  * stops the queue
270                  */
271
272                 __netif_tx_lock(txq, smp_processor_id());
273
274                 if ((netif_tx_queue_stopped(txq)) &&
275                     (bp->state == BNX2X_STATE_OPEN) &&
276                     (bnx2x_tx_avail(bp, txdata) >= MAX_DESC_PER_TX_PKT))
277                         netif_tx_wake_queue(txq);
278
279                 __netif_tx_unlock(txq);
280         }
281         return 0;
282 }
283
284 static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
285                                              u16 idx)
286 {
287         u16 last_max = fp->last_max_sge;
288
289         if (SUB_S16(idx, last_max) > 0)
290                 fp->last_max_sge = idx;
291 }
292
293 static inline void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
294                                          u16 sge_len,
295                                          struct eth_end_agg_rx_cqe *cqe)
296 {
297         struct bnx2x *bp = fp->bp;
298         u16 last_max, last_elem, first_elem;
299         u16 delta = 0;
300         u16 i;
301
302         if (!sge_len)
303                 return;
304
305         /* First mark all used pages */
306         for (i = 0; i < sge_len; i++)
307                 BIT_VEC64_CLEAR_BIT(fp->sge_mask,
308                         RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[i])));
309
310         DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
311            sge_len - 1, le16_to_cpu(cqe->sgl_or_raw_data.sgl[sge_len - 1]));
312
313         /* Here we assume that the last SGE index is the biggest */
314         prefetch((void *)(fp->sge_mask));
315         bnx2x_update_last_max_sge(fp,
316                 le16_to_cpu(cqe->sgl_or_raw_data.sgl[sge_len - 1]));
317
318         last_max = RX_SGE(fp->last_max_sge);
319         last_elem = last_max >> BIT_VEC64_ELEM_SHIFT;
320         first_elem = RX_SGE(fp->rx_sge_prod) >> BIT_VEC64_ELEM_SHIFT;
321
322         /* If ring is not full */
323         if (last_elem + 1 != first_elem)
324                 last_elem++;
325
326         /* Now update the prod */
327         for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
328                 if (likely(fp->sge_mask[i]))
329                         break;
330
331                 fp->sge_mask[i] = BIT_VEC64_ELEM_ONE_MASK;
332                 delta += BIT_VEC64_ELEM_SZ;
333         }
334
335         if (delta > 0) {
336                 fp->rx_sge_prod += delta;
337                 /* clear page-end entries */
338                 bnx2x_clear_sge_mask_next_elems(fp);
339         }
340
341         DP(NETIF_MSG_RX_STATUS,
342            "fp->last_max_sge = %d  fp->rx_sge_prod = %d\n",
343            fp->last_max_sge, fp->rx_sge_prod);
344 }
345
346 /* Get Toeplitz hash value in the skb using the value from the
347  * CQE (calculated by HW).
348  */
349 static u32 bnx2x_get_rxhash(const struct bnx2x *bp,
350                             const struct eth_fast_path_rx_cqe *cqe,
351                             bool *l4_rxhash)
352 {
353         /* Get Toeplitz hash from CQE */
354         if ((bp->dev->features & NETIF_F_RXHASH) &&
355             (cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG)) {
356                 enum eth_rss_hash_type htype;
357
358                 htype = cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_TYPE;
359                 *l4_rxhash = (htype == TCP_IPV4_HASH_TYPE) ||
360                              (htype == TCP_IPV6_HASH_TYPE);
361                 return le32_to_cpu(cqe->rss_hash_result);
362         }
363         *l4_rxhash = false;
364         return 0;
365 }
366
367 static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
368                             u16 cons, u16 prod,
369                             struct eth_fast_path_rx_cqe *cqe)
370 {
371         struct bnx2x *bp = fp->bp;
372         struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
373         struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
374         struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
375         dma_addr_t mapping;
376         struct bnx2x_agg_info *tpa_info = &fp->tpa_info[queue];
377         struct sw_rx_bd *first_buf = &tpa_info->first_buf;
378
379         /* print error if current state != stop */
380         if (tpa_info->tpa_state != BNX2X_TPA_STOP)
381                 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
382
383         /* Try to map an empty data buffer from the aggregation info  */
384         mapping = dma_map_single(&bp->pdev->dev,
385                                  first_buf->data + NET_SKB_PAD,
386                                  fp->rx_buf_size, DMA_FROM_DEVICE);
387         /*
388          *  ...if it fails - move the skb from the consumer to the producer
389          *  and set the current aggregation state as ERROR to drop it
390          *  when TPA_STOP arrives.
391          */
392
393         if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
394                 /* Move the BD from the consumer to the producer */
395                 bnx2x_reuse_rx_data(fp, cons, prod);
396                 tpa_info->tpa_state = BNX2X_TPA_ERROR;
397                 return;
398         }
399
400         /* move empty data from pool to prod */
401         prod_rx_buf->data = first_buf->data;
402         dma_unmap_addr_set(prod_rx_buf, mapping, mapping);
403         /* point prod_bd to new data */
404         prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
405         prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
406
407         /* move partial skb from cons to pool (don't unmap yet) */
408         *first_buf = *cons_rx_buf;
409
410         /* mark bin state as START */
411         tpa_info->parsing_flags =
412                 le16_to_cpu(cqe->pars_flags.flags);
413         tpa_info->vlan_tag = le16_to_cpu(cqe->vlan_tag);
414         tpa_info->tpa_state = BNX2X_TPA_START;
415         tpa_info->len_on_bd = le16_to_cpu(cqe->len_on_bd);
416         tpa_info->placement_offset = cqe->placement_offset;
417         tpa_info->rxhash = bnx2x_get_rxhash(bp, cqe, &tpa_info->l4_rxhash);
418         if (fp->mode == TPA_MODE_GRO) {
419                 u16 gro_size = le16_to_cpu(cqe->pkt_len_or_gro_seg_len);
420                 tpa_info->full_page = SGE_PAGES / gro_size * gro_size;
421                 tpa_info->gro_size = gro_size;
422         }
423
424 #ifdef BNX2X_STOP_ON_ERROR
425         fp->tpa_queue_used |= (1 << queue);
426 #ifdef _ASM_GENERIC_INT_L64_H
427         DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
428 #else
429         DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
430 #endif
431            fp->tpa_queue_used);
432 #endif
433 }
434
435 /* Timestamp option length allowed for TPA aggregation:
436  *
437  *              nop nop kind length echo val
438  */
439 #define TPA_TSTAMP_OPT_LEN      12
440 /**
441  * bnx2x_set_gro_params - compute GRO values
442  *
443  * @skb:                packet skb
444  * @parsing_flags:      parsing flags from the START CQE
445  * @len_on_bd:          total length of the first packet for the
446  *                      aggregation.
447  * @pkt_len:            length of all segments
448  *
449  * Approximate value of the MSS for this aggregation calculated using
450  * the first packet of it.
451  * Compute number of aggregated segments, and gso_type.
452  */
453 static void bnx2x_set_gro_params(struct sk_buff *skb, u16 parsing_flags,
454                                  u16 len_on_bd, unsigned int pkt_len,
455                                  u16 num_of_coalesced_segs)
456 {
457         /* TPA aggregation won't have either IP options or TCP options
458          * other than timestamp or IPv6 extension headers.
459          */
460         u16 hdrs_len = ETH_HLEN + sizeof(struct tcphdr);
461
462         if (GET_FLAG(parsing_flags, PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) ==
463             PRS_FLAG_OVERETH_IPV6) {
464                 hdrs_len += sizeof(struct ipv6hdr);
465                 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
466         } else {
467                 hdrs_len += sizeof(struct iphdr);
468                 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
469         }
470
471         /* Check if there was a TCP timestamp, if there is it's will
472          * always be 12 bytes length: nop nop kind length echo val.
473          *
474          * Otherwise FW would close the aggregation.
475          */
476         if (parsing_flags & PARSING_FLAGS_TIME_STAMP_EXIST_FLAG)
477                 hdrs_len += TPA_TSTAMP_OPT_LEN;
478
479         skb_shinfo(skb)->gso_size = len_on_bd - hdrs_len;
480
481         /* tcp_gro_complete() will copy NAPI_GRO_CB(skb)->count
482          * to skb_shinfo(skb)->gso_segs
483          */
484         NAPI_GRO_CB(skb)->count = num_of_coalesced_segs;
485 }
486
487 static int bnx2x_alloc_rx_sge(struct bnx2x *bp,
488                               struct bnx2x_fastpath *fp, u16 index)
489 {
490         struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
491         struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
492         struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
493         dma_addr_t mapping;
494
495         if (unlikely(page == NULL)) {
496                 BNX2X_ERR("Can't alloc sge\n");
497                 return -ENOMEM;
498         }
499
500         mapping = dma_map_page(&bp->pdev->dev, page, 0,
501                                SGE_PAGES, DMA_FROM_DEVICE);
502         if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
503                 __free_pages(page, PAGES_PER_SGE_SHIFT);
504                 BNX2X_ERR("Can't map sge\n");
505                 return -ENOMEM;
506         }
507
508         sw_buf->page = page;
509         dma_unmap_addr_set(sw_buf, mapping, mapping);
510
511         sge->addr_hi = cpu_to_le32(U64_HI(mapping));
512         sge->addr_lo = cpu_to_le32(U64_LO(mapping));
513
514         return 0;
515 }
516
517 static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
518                                struct bnx2x_agg_info *tpa_info,
519                                u16 pages,
520                                struct sk_buff *skb,
521                                struct eth_end_agg_rx_cqe *cqe,
522                                u16 cqe_idx)
523 {
524         struct sw_rx_page *rx_pg, old_rx_pg;
525         u32 i, frag_len, frag_size;
526         int err, j, frag_id = 0;
527         u16 len_on_bd = tpa_info->len_on_bd;
528         u16 full_page = 0, gro_size = 0;
529
530         frag_size = le16_to_cpu(cqe->pkt_len) - len_on_bd;
531
532         if (fp->mode == TPA_MODE_GRO) {
533                 gro_size = tpa_info->gro_size;
534                 full_page = tpa_info->full_page;
535         }
536
537         /* This is needed in order to enable forwarding support */
538         if (frag_size)
539                 bnx2x_set_gro_params(skb, tpa_info->parsing_flags, len_on_bd,
540                                      le16_to_cpu(cqe->pkt_len),
541                                      le16_to_cpu(cqe->num_of_coalesced_segs));
542
543 #ifdef BNX2X_STOP_ON_ERROR
544         if (pages > min_t(u32, 8, MAX_SKB_FRAGS) * SGE_PAGES) {
545                 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
546                           pages, cqe_idx);
547                 BNX2X_ERR("cqe->pkt_len = %d\n", cqe->pkt_len);
548                 bnx2x_panic();
549                 return -EINVAL;
550         }
551 #endif
552
553         /* Run through the SGL and compose the fragmented skb */
554         for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
555                 u16 sge_idx = RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[j]));
556
557                 /* FW gives the indices of the SGE as if the ring is an array
558                    (meaning that "next" element will consume 2 indices) */
559                 if (fp->mode == TPA_MODE_GRO)
560                         frag_len = min_t(u32, frag_size, (u32)full_page);
561                 else /* LRO */
562                         frag_len = min_t(u32, frag_size, (u32)SGE_PAGES);
563
564                 rx_pg = &fp->rx_page_ring[sge_idx];
565                 old_rx_pg = *rx_pg;
566
567                 /* If we fail to allocate a substitute page, we simply stop
568                    where we are and drop the whole packet */
569                 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
570                 if (unlikely(err)) {
571                         bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
572                         return err;
573                 }
574
575                 /* Unmap the page as we r going to pass it to the stack */
576                 dma_unmap_page(&bp->pdev->dev,
577                                dma_unmap_addr(&old_rx_pg, mapping),
578                                SGE_PAGES, DMA_FROM_DEVICE);
579                 /* Add one frag and update the appropriate fields in the skb */
580                 if (fp->mode == TPA_MODE_LRO)
581                         skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
582                 else { /* GRO */
583                         int rem;
584                         int offset = 0;
585                         for (rem = frag_len; rem > 0; rem -= gro_size) {
586                                 int len = rem > gro_size ? gro_size : rem;
587                                 skb_fill_page_desc(skb, frag_id++,
588                                                    old_rx_pg.page, offset, len);
589                                 if (offset)
590                                         get_page(old_rx_pg.page);
591                                 offset += len;
592                         }
593                 }
594
595                 skb->data_len += frag_len;
596                 skb->truesize += SGE_PAGES;
597                 skb->len += frag_len;
598
599                 frag_size -= frag_len;
600         }
601
602         return 0;
603 }
604
605 static void bnx2x_frag_free(const struct bnx2x_fastpath *fp, void *data)
606 {
607         if (fp->rx_frag_size)
608                 put_page(virt_to_head_page(data));
609         else
610                 kfree(data);
611 }
612
613 static void *bnx2x_frag_alloc(const struct bnx2x_fastpath *fp)
614 {
615         if (fp->rx_frag_size)
616                 return netdev_alloc_frag(fp->rx_frag_size);
617
618         return kmalloc(fp->rx_buf_size + NET_SKB_PAD, GFP_ATOMIC);
619 }
620
621 #ifdef CONFIG_INET
622 static void bnx2x_gro_ip_csum(struct bnx2x *bp, struct sk_buff *skb)
623 {
624         const struct iphdr *iph = ip_hdr(skb);
625         struct tcphdr *th;
626
627         skb_set_transport_header(skb, sizeof(struct iphdr));
628         th = tcp_hdr(skb);
629
630         th->check = ~tcp_v4_check(skb->len - skb_transport_offset(skb),
631                                   iph->saddr, iph->daddr, 0);
632 }
633
634 static void bnx2x_gro_ipv6_csum(struct bnx2x *bp, struct sk_buff *skb)
635 {
636         struct ipv6hdr *iph = ipv6_hdr(skb);
637         struct tcphdr *th;
638
639         skb_set_transport_header(skb, sizeof(struct ipv6hdr));
640         th = tcp_hdr(skb);
641
642         th->check = ~tcp_v6_check(skb->len - skb_transport_offset(skb),
643                                   &iph->saddr, &iph->daddr, 0);
644 }
645 #endif
646
647 static void bnx2x_gro_receive(struct bnx2x *bp, struct bnx2x_fastpath *fp,
648                                struct sk_buff *skb)
649 {
650 #ifdef CONFIG_INET
651         if (skb_shinfo(skb)->gso_size) {
652                 skb_set_network_header(skb, 0);
653                 switch (be16_to_cpu(skb->protocol)) {
654                 case ETH_P_IP:
655                         bnx2x_gro_ip_csum(bp, skb);
656                         break;
657                 case ETH_P_IPV6:
658                         bnx2x_gro_ipv6_csum(bp, skb);
659                         break;
660                 default:
661                         BNX2X_ERR("FW GRO supports only IPv4/IPv6, not 0x%04x\n",
662                                   be16_to_cpu(skb->protocol));
663                 }
664                 tcp_gro_complete(skb);
665         }
666 #endif
667         napi_gro_receive(&fp->napi, skb);
668 }
669
670 static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
671                            struct bnx2x_agg_info *tpa_info,
672                            u16 pages,
673                            struct eth_end_agg_rx_cqe *cqe,
674                            u16 cqe_idx)
675 {
676         struct sw_rx_bd *rx_buf = &tpa_info->first_buf;
677         u8 pad = tpa_info->placement_offset;
678         u16 len = tpa_info->len_on_bd;
679         struct sk_buff *skb = NULL;
680         u8 *new_data, *data = rx_buf->data;
681         u8 old_tpa_state = tpa_info->tpa_state;
682
683         tpa_info->tpa_state = BNX2X_TPA_STOP;
684
685         /* If we there was an error during the handling of the TPA_START -
686          * drop this aggregation.
687          */
688         if (old_tpa_state == BNX2X_TPA_ERROR)
689                 goto drop;
690
691         /* Try to allocate the new data */
692         new_data = bnx2x_frag_alloc(fp);
693         /* Unmap skb in the pool anyway, as we are going to change
694            pool entry status to BNX2X_TPA_STOP even if new skb allocation
695            fails. */
696         dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping),
697                          fp->rx_buf_size, DMA_FROM_DEVICE);
698         if (likely(new_data))
699                 skb = build_skb(data, fp->rx_frag_size);
700
701         if (likely(skb)) {
702 #ifdef BNX2X_STOP_ON_ERROR
703                 if (pad + len > fp->rx_buf_size) {
704                         BNX2X_ERR("skb_put is about to fail...  pad %d  len %d  rx_buf_size %d\n",
705                                   pad, len, fp->rx_buf_size);
706                         bnx2x_panic();
707                         return;
708                 }
709 #endif
710
711                 skb_reserve(skb, pad + NET_SKB_PAD);
712                 skb_put(skb, len);
713                 skb->rxhash = tpa_info->rxhash;
714                 skb->l4_rxhash = tpa_info->l4_rxhash;
715
716                 skb->protocol = eth_type_trans(skb, bp->dev);
717                 skb->ip_summed = CHECKSUM_UNNECESSARY;
718
719                 if (!bnx2x_fill_frag_skb(bp, fp, tpa_info, pages,
720                                          skb, cqe, cqe_idx)) {
721                         if (tpa_info->parsing_flags & PARSING_FLAGS_VLAN)
722                                 __vlan_hwaccel_put_tag(skb, tpa_info->vlan_tag);
723                         bnx2x_gro_receive(bp, fp, skb);
724                 } else {
725                         DP(NETIF_MSG_RX_STATUS,
726                            "Failed to allocate new pages - dropping packet!\n");
727                         dev_kfree_skb_any(skb);
728                 }
729
730
731                 /* put new data in bin */
732                 rx_buf->data = new_data;
733
734                 return;
735         }
736         bnx2x_frag_free(fp, new_data);
737 drop:
738         /* drop the packet and keep the buffer in the bin */
739         DP(NETIF_MSG_RX_STATUS,
740            "Failed to allocate or map a new skb - dropping packet!\n");
741         bnx2x_fp_stats(bp, fp)->eth_q_stats.rx_skb_alloc_failed++;
742 }
743
744 static int bnx2x_alloc_rx_data(struct bnx2x *bp,
745                                struct bnx2x_fastpath *fp, u16 index)
746 {
747         u8 *data;
748         struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
749         struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
750         dma_addr_t mapping;
751
752         data = bnx2x_frag_alloc(fp);
753         if (unlikely(data == NULL))
754                 return -ENOMEM;
755
756         mapping = dma_map_single(&bp->pdev->dev, data + NET_SKB_PAD,
757                                  fp->rx_buf_size,
758                                  DMA_FROM_DEVICE);
759         if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
760                 bnx2x_frag_free(fp, data);
761                 BNX2X_ERR("Can't map rx data\n");
762                 return -ENOMEM;
763         }
764
765         rx_buf->data = data;
766         dma_unmap_addr_set(rx_buf, mapping, mapping);
767
768         rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
769         rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
770
771         return 0;
772 }
773
774 static
775 void bnx2x_csum_validate(struct sk_buff *skb, union eth_rx_cqe *cqe,
776                                  struct bnx2x_fastpath *fp,
777                                  struct bnx2x_eth_q_stats *qstats)
778 {
779         /* Do nothing if no L4 csum validation was done.
780          * We do not check whether IP csum was validated. For IPv4 we assume
781          * that if the card got as far as validating the L4 csum, it also
782          * validated the IP csum. IPv6 has no IP csum.
783          */
784         if (cqe->fast_path_cqe.status_flags &
785             ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG)
786                 return;
787
788         /* If L4 validation was done, check if an error was found. */
789
790         if (cqe->fast_path_cqe.type_error_flags &
791             (ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG |
792              ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG))
793                 qstats->hw_csum_err++;
794         else
795                 skb->ip_summed = CHECKSUM_UNNECESSARY;
796 }
797
798 int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
799 {
800         struct bnx2x *bp = fp->bp;
801         u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
802         u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
803         int rx_pkt = 0;
804
805 #ifdef BNX2X_STOP_ON_ERROR
806         if (unlikely(bp->panic))
807                 return 0;
808 #endif
809
810         /* CQ "next element" is of the size of the regular element,
811            that's why it's ok here */
812         hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
813         if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
814                 hw_comp_cons++;
815
816         bd_cons = fp->rx_bd_cons;
817         bd_prod = fp->rx_bd_prod;
818         bd_prod_fw = bd_prod;
819         sw_comp_cons = fp->rx_comp_cons;
820         sw_comp_prod = fp->rx_comp_prod;
821
822         /* Memory barrier necessary as speculative reads of the rx
823          * buffer can be ahead of the index in the status block
824          */
825         rmb();
826
827         DP(NETIF_MSG_RX_STATUS,
828            "queue[%d]:  hw_comp_cons %u  sw_comp_cons %u\n",
829            fp->index, hw_comp_cons, sw_comp_cons);
830
831         while (sw_comp_cons != hw_comp_cons) {
832                 struct sw_rx_bd *rx_buf = NULL;
833                 struct sk_buff *skb;
834                 union eth_rx_cqe *cqe;
835                 struct eth_fast_path_rx_cqe *cqe_fp;
836                 u8 cqe_fp_flags;
837                 enum eth_rx_cqe_type cqe_fp_type;
838                 u16 len, pad, queue;
839                 u8 *data;
840                 bool l4_rxhash;
841
842 #ifdef BNX2X_STOP_ON_ERROR
843                 if (unlikely(bp->panic))
844                         return 0;
845 #endif
846
847                 comp_ring_cons = RCQ_BD(sw_comp_cons);
848                 bd_prod = RX_BD(bd_prod);
849                 bd_cons = RX_BD(bd_cons);
850
851                 cqe = &fp->rx_comp_ring[comp_ring_cons];
852                 cqe_fp = &cqe->fast_path_cqe;
853                 cqe_fp_flags = cqe_fp->type_error_flags;
854                 cqe_fp_type = cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE;
855
856                 DP(NETIF_MSG_RX_STATUS,
857                    "CQE type %x  err %x  status %x  queue %x  vlan %x  len %u\n",
858                    CQE_TYPE(cqe_fp_flags),
859                    cqe_fp_flags, cqe_fp->status_flags,
860                    le32_to_cpu(cqe_fp->rss_hash_result),
861                    le16_to_cpu(cqe_fp->vlan_tag),
862                    le16_to_cpu(cqe_fp->pkt_len_or_gro_seg_len));
863
864                 /* is this a slowpath msg? */
865                 if (unlikely(CQE_TYPE_SLOW(cqe_fp_type))) {
866                         bnx2x_sp_event(fp, cqe);
867                         goto next_cqe;
868                 }
869
870                 rx_buf = &fp->rx_buf_ring[bd_cons];
871                 data = rx_buf->data;
872
873                 if (!CQE_TYPE_FAST(cqe_fp_type)) {
874                         struct bnx2x_agg_info *tpa_info;
875                         u16 frag_size, pages;
876 #ifdef BNX2X_STOP_ON_ERROR
877                         /* sanity check */
878                         if (fp->disable_tpa &&
879                             (CQE_TYPE_START(cqe_fp_type) ||
880                              CQE_TYPE_STOP(cqe_fp_type)))
881                                 BNX2X_ERR("START/STOP packet while disable_tpa type %x\n",
882                                           CQE_TYPE(cqe_fp_type));
883 #endif
884
885                         if (CQE_TYPE_START(cqe_fp_type)) {
886                                 u16 queue = cqe_fp->queue_index;
887                                 DP(NETIF_MSG_RX_STATUS,
888                                    "calling tpa_start on queue %d\n",
889                                    queue);
890
891                                 bnx2x_tpa_start(fp, queue,
892                                                 bd_cons, bd_prod,
893                                                 cqe_fp);
894
895                                 goto next_rx;
896
897                         }
898                         queue = cqe->end_agg_cqe.queue_index;
899                         tpa_info = &fp->tpa_info[queue];
900                         DP(NETIF_MSG_RX_STATUS,
901                            "calling tpa_stop on queue %d\n",
902                            queue);
903
904                         frag_size = le16_to_cpu(cqe->end_agg_cqe.pkt_len) -
905                                     tpa_info->len_on_bd;
906
907                         if (fp->mode == TPA_MODE_GRO)
908                                 pages = (frag_size + tpa_info->full_page - 1) /
909                                          tpa_info->full_page;
910                         else
911                                 pages = SGE_PAGE_ALIGN(frag_size) >>
912                                         SGE_PAGE_SHIFT;
913
914                         bnx2x_tpa_stop(bp, fp, tpa_info, pages,
915                                        &cqe->end_agg_cqe, comp_ring_cons);
916 #ifdef BNX2X_STOP_ON_ERROR
917                         if (bp->panic)
918                                 return 0;
919 #endif
920
921                         bnx2x_update_sge_prod(fp, pages, &cqe->end_agg_cqe);
922                         goto next_cqe;
923                 }
924                 /* non TPA */
925                 len = le16_to_cpu(cqe_fp->pkt_len_or_gro_seg_len);
926                 pad = cqe_fp->placement_offset;
927                 dma_sync_single_for_cpu(&bp->pdev->dev,
928                                         dma_unmap_addr(rx_buf, mapping),
929                                         pad + RX_COPY_THRESH,
930                                         DMA_FROM_DEVICE);
931                 pad += NET_SKB_PAD;
932                 prefetch(data + pad); /* speedup eth_type_trans() */
933                 /* is this an error packet? */
934                 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
935                         DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
936                            "ERROR  flags %x  rx packet %u\n",
937                            cqe_fp_flags, sw_comp_cons);
938                         bnx2x_fp_qstats(bp, fp)->rx_err_discard_pkt++;
939                         goto reuse_rx;
940                 }
941
942                 /* Since we don't have a jumbo ring
943                  * copy small packets if mtu > 1500
944                  */
945                 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
946                     (len <= RX_COPY_THRESH)) {
947                         skb = netdev_alloc_skb_ip_align(bp->dev, len);
948                         if (skb == NULL) {
949                                 DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
950                                    "ERROR  packet dropped because of alloc failure\n");
951                                 bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
952                                 goto reuse_rx;
953                         }
954                         memcpy(skb->data, data + pad, len);
955                         bnx2x_reuse_rx_data(fp, bd_cons, bd_prod);
956                 } else {
957                         if (likely(bnx2x_alloc_rx_data(bp, fp, bd_prod) == 0)) {
958                                 dma_unmap_single(&bp->pdev->dev,
959                                                  dma_unmap_addr(rx_buf, mapping),
960                                                  fp->rx_buf_size,
961                                                  DMA_FROM_DEVICE);
962                                 skb = build_skb(data, fp->rx_frag_size);
963                                 if (unlikely(!skb)) {
964                                         bnx2x_frag_free(fp, data);
965                                         bnx2x_fp_qstats(bp, fp)->
966                                                         rx_skb_alloc_failed++;
967                                         goto next_rx;
968                                 }
969                                 skb_reserve(skb, pad);
970                         } else {
971                                 DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
972                                    "ERROR  packet dropped because of alloc failure\n");
973                                 bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
974 reuse_rx:
975                                 bnx2x_reuse_rx_data(fp, bd_cons, bd_prod);
976                                 goto next_rx;
977                         }
978                 }
979
980                 skb_put(skb, len);
981                 skb->protocol = eth_type_trans(skb, bp->dev);
982
983                 /* Set Toeplitz hash for a none-LRO skb */
984                 skb->rxhash = bnx2x_get_rxhash(bp, cqe_fp, &l4_rxhash);
985                 skb->l4_rxhash = l4_rxhash;
986
987                 skb_checksum_none_assert(skb);
988
989                 if (bp->dev->features & NETIF_F_RXCSUM)
990                         bnx2x_csum_validate(skb, cqe, fp,
991                                             bnx2x_fp_qstats(bp, fp));
992
993                 skb_record_rx_queue(skb, fp->rx_queue);
994
995                 if (le16_to_cpu(cqe_fp->pars_flags.flags) &
996                     PARSING_FLAGS_VLAN)
997                         __vlan_hwaccel_put_tag(skb,
998                                                le16_to_cpu(cqe_fp->vlan_tag));
999                 napi_gro_receive(&fp->napi, skb);
1000
1001
1002 next_rx:
1003                 rx_buf->data = NULL;
1004
1005                 bd_cons = NEXT_RX_IDX(bd_cons);
1006                 bd_prod = NEXT_RX_IDX(bd_prod);
1007                 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1008                 rx_pkt++;
1009 next_cqe:
1010                 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1011                 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
1012
1013                 if (rx_pkt == budget)
1014                         break;
1015         } /* while */
1016
1017         fp->rx_bd_cons = bd_cons;
1018         fp->rx_bd_prod = bd_prod_fw;
1019         fp->rx_comp_cons = sw_comp_cons;
1020         fp->rx_comp_prod = sw_comp_prod;
1021
1022         /* Update producers */
1023         bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1024                              fp->rx_sge_prod);
1025
1026         fp->rx_pkt += rx_pkt;
1027         fp->rx_calls++;
1028
1029         return rx_pkt;
1030 }
1031
1032 static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1033 {
1034         struct bnx2x_fastpath *fp = fp_cookie;
1035         struct bnx2x *bp = fp->bp;
1036         u8 cos;
1037
1038         DP(NETIF_MSG_INTR,
1039            "got an MSI-X interrupt on IDX:SB [fp %d fw_sd %d igusb %d]\n",
1040            fp->index, fp->fw_sb_id, fp->igu_sb_id);
1041         bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
1042
1043 #ifdef BNX2X_STOP_ON_ERROR
1044         if (unlikely(bp->panic))
1045                 return IRQ_HANDLED;
1046 #endif
1047
1048         /* Handle Rx and Tx according to MSI-X vector */
1049         prefetch(fp->rx_cons_sb);
1050
1051         for_each_cos_in_tx_queue(fp, cos)
1052                 prefetch(fp->txdata_ptr[cos]->tx_cons_sb);
1053
1054         prefetch(&fp->sb_running_index[SM_RX_ID]);
1055         napi_schedule(&bnx2x_fp(bp, fp->index, napi));
1056
1057         return IRQ_HANDLED;
1058 }
1059
1060 /* HW Lock for shared dual port PHYs */
1061 void bnx2x_acquire_phy_lock(struct bnx2x *bp)
1062 {
1063         mutex_lock(&bp->port.phy_mutex);
1064
1065         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
1066 }
1067
1068 void bnx2x_release_phy_lock(struct bnx2x *bp)
1069 {
1070         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
1071
1072         mutex_unlock(&bp->port.phy_mutex);
1073 }
1074
1075 /* calculates MF speed according to current linespeed and MF configuration */
1076 u16 bnx2x_get_mf_speed(struct bnx2x *bp)
1077 {
1078         u16 line_speed = bp->link_vars.line_speed;
1079         if (IS_MF(bp)) {
1080                 u16 maxCfg = bnx2x_extract_max_cfg(bp,
1081                                                    bp->mf_config[BP_VN(bp)]);
1082
1083                 /* Calculate the current MAX line speed limit for the MF
1084                  * devices
1085                  */
1086                 if (IS_MF_SI(bp))
1087                         line_speed = (line_speed * maxCfg) / 100;
1088                 else { /* SD mode */
1089                         u16 vn_max_rate = maxCfg * 100;
1090
1091                         if (vn_max_rate < line_speed)
1092                                 line_speed = vn_max_rate;
1093                 }
1094         }
1095
1096         return line_speed;
1097 }
1098
1099 /**
1100  * bnx2x_fill_report_data - fill link report data to report
1101  *
1102  * @bp:         driver handle
1103  * @data:       link state to update
1104  *
1105  * It uses a none-atomic bit operations because is called under the mutex.
1106  */
1107 static void bnx2x_fill_report_data(struct bnx2x *bp,
1108                                    struct bnx2x_link_report_data *data)
1109 {
1110         u16 line_speed = bnx2x_get_mf_speed(bp);
1111
1112         memset(data, 0, sizeof(*data));
1113
1114         /* Fill the report data: efective line speed */
1115         data->line_speed = line_speed;
1116
1117         /* Link is down */
1118         if (!bp->link_vars.link_up || (bp->flags & MF_FUNC_DIS))
1119                 __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1120                           &data->link_report_flags);
1121
1122         /* Full DUPLEX */
1123         if (bp->link_vars.duplex == DUPLEX_FULL)
1124                 __set_bit(BNX2X_LINK_REPORT_FD, &data->link_report_flags);
1125
1126         /* Rx Flow Control is ON */
1127         if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX)
1128                 __set_bit(BNX2X_LINK_REPORT_RX_FC_ON, &data->link_report_flags);
1129
1130         /* Tx Flow Control is ON */
1131         if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
1132                 __set_bit(BNX2X_LINK_REPORT_TX_FC_ON, &data->link_report_flags);
1133 }
1134
1135 /**
1136  * bnx2x_link_report - report link status to OS.
1137  *
1138  * @bp:         driver handle
1139  *
1140  * Calls the __bnx2x_link_report() under the same locking scheme
1141  * as a link/PHY state managing code to ensure a consistent link
1142  * reporting.
1143  */
1144
1145 void bnx2x_link_report(struct bnx2x *bp)
1146 {
1147         bnx2x_acquire_phy_lock(bp);
1148         __bnx2x_link_report(bp);
1149         bnx2x_release_phy_lock(bp);
1150 }
1151
1152 /**
1153  * __bnx2x_link_report - report link status to OS.
1154  *
1155  * @bp:         driver handle
1156  *
1157  * None atomic inmlementation.
1158  * Should be called under the phy_lock.
1159  */
1160 void __bnx2x_link_report(struct bnx2x *bp)
1161 {
1162         struct bnx2x_link_report_data cur_data;
1163
1164         /* reread mf_cfg */
1165         if (IS_PF(bp) && !CHIP_IS_E1(bp))
1166                 bnx2x_read_mf_cfg(bp);
1167
1168         /* Read the current link report info */
1169         bnx2x_fill_report_data(bp, &cur_data);
1170
1171         /* Don't report link down or exactly the same link status twice */
1172         if (!memcmp(&cur_data, &bp->last_reported_link, sizeof(cur_data)) ||
1173             (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1174                       &bp->last_reported_link.link_report_flags) &&
1175              test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1176                       &cur_data.link_report_flags)))
1177                 return;
1178
1179         bp->link_cnt++;
1180
1181         /* We are going to report a new link parameters now -
1182          * remember the current data for the next time.
1183          */
1184         memcpy(&bp->last_reported_link, &cur_data, sizeof(cur_data));
1185
1186         if (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1187                      &cur_data.link_report_flags)) {
1188                 netif_carrier_off(bp->dev);
1189                 netdev_err(bp->dev, "NIC Link is Down\n");
1190                 return;
1191         } else {
1192                 const char *duplex;
1193                 const char *flow;
1194
1195                 netif_carrier_on(bp->dev);
1196
1197                 if (test_and_clear_bit(BNX2X_LINK_REPORT_FD,
1198                                        &cur_data.link_report_flags))
1199                         duplex = "full";
1200                 else
1201                         duplex = "half";
1202
1203                 /* Handle the FC at the end so that only these flags would be
1204                  * possibly set. This way we may easily check if there is no FC
1205                  * enabled.
1206                  */
1207                 if (cur_data.link_report_flags) {
1208                         if (test_bit(BNX2X_LINK_REPORT_RX_FC_ON,
1209                                      &cur_data.link_report_flags)) {
1210                                 if (test_bit(BNX2X_LINK_REPORT_TX_FC_ON,
1211                                      &cur_data.link_report_flags))
1212                                         flow = "ON - receive & transmit";
1213                                 else
1214                                         flow = "ON - receive";
1215                         } else {
1216                                 flow = "ON - transmit";
1217                         }
1218                 } else {
1219                         flow = "none";
1220                 }
1221                 netdev_info(bp->dev, "NIC Link is Up, %d Mbps %s duplex, Flow control: %s\n",
1222                             cur_data.line_speed, duplex, flow);
1223         }
1224 }
1225
1226 static void bnx2x_set_next_page_sgl(struct bnx2x_fastpath *fp)
1227 {
1228         int i;
1229
1230         for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1231                 struct eth_rx_sge *sge;
1232
1233                 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
1234                 sge->addr_hi =
1235                         cpu_to_le32(U64_HI(fp->rx_sge_mapping +
1236                         BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
1237
1238                 sge->addr_lo =
1239                         cpu_to_le32(U64_LO(fp->rx_sge_mapping +
1240                         BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
1241         }
1242 }
1243
1244 static void bnx2x_free_tpa_pool(struct bnx2x *bp,
1245                                 struct bnx2x_fastpath *fp, int last)
1246 {
1247         int i;
1248
1249         for (i = 0; i < last; i++) {
1250                 struct bnx2x_agg_info *tpa_info = &fp->tpa_info[i];
1251                 struct sw_rx_bd *first_buf = &tpa_info->first_buf;
1252                 u8 *data = first_buf->data;
1253
1254                 if (data == NULL) {
1255                         DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
1256                         continue;
1257                 }
1258                 if (tpa_info->tpa_state == BNX2X_TPA_START)
1259                         dma_unmap_single(&bp->pdev->dev,
1260                                          dma_unmap_addr(first_buf, mapping),
1261                                          fp->rx_buf_size, DMA_FROM_DEVICE);
1262                 bnx2x_frag_free(fp, data);
1263                 first_buf->data = NULL;
1264         }
1265 }
1266
1267 void bnx2x_init_rx_rings_cnic(struct bnx2x *bp)
1268 {
1269         int j;
1270
1271         for_each_rx_queue_cnic(bp, j) {
1272                 struct bnx2x_fastpath *fp = &bp->fp[j];
1273
1274                 fp->rx_bd_cons = 0;
1275
1276                 /* Activate BD ring */
1277                 /* Warning!
1278                  * this will generate an interrupt (to the TSTORM)
1279                  * must only be done after chip is initialized
1280                  */
1281                 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
1282                                      fp->rx_sge_prod);
1283         }
1284 }
1285
1286 void bnx2x_init_rx_rings(struct bnx2x *bp)
1287 {
1288         int func = BP_FUNC(bp);
1289         u16 ring_prod;
1290         int i, j;
1291
1292         /* Allocate TPA resources */
1293         for_each_eth_queue(bp, j) {
1294                 struct bnx2x_fastpath *fp = &bp->fp[j];
1295
1296                 DP(NETIF_MSG_IFUP,
1297                    "mtu %d  rx_buf_size %d\n", bp->dev->mtu, fp->rx_buf_size);
1298
1299                 if (!fp->disable_tpa) {
1300                         /* Fill the per-aggregtion pool */
1301                         for (i = 0; i < MAX_AGG_QS(bp); i++) {
1302                                 struct bnx2x_agg_info *tpa_info =
1303                                         &fp->tpa_info[i];
1304                                 struct sw_rx_bd *first_buf =
1305                                         &tpa_info->first_buf;
1306
1307                                 first_buf->data = bnx2x_frag_alloc(fp);
1308                                 if (!first_buf->data) {
1309                                         BNX2X_ERR("Failed to allocate TPA skb pool for queue[%d] - disabling TPA on this queue!\n",
1310                                                   j);
1311                                         bnx2x_free_tpa_pool(bp, fp, i);
1312                                         fp->disable_tpa = 1;
1313                                         break;
1314                                 }
1315                                 dma_unmap_addr_set(first_buf, mapping, 0);
1316                                 tpa_info->tpa_state = BNX2X_TPA_STOP;
1317                         }
1318
1319                         /* "next page" elements initialization */
1320                         bnx2x_set_next_page_sgl(fp);
1321
1322                         /* set SGEs bit mask */
1323                         bnx2x_init_sge_ring_bit_mask(fp);
1324
1325                         /* Allocate SGEs and initialize the ring elements */
1326                         for (i = 0, ring_prod = 0;
1327                              i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
1328
1329                                 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
1330                                         BNX2X_ERR("was only able to allocate %d rx sges\n",
1331                                                   i);
1332                                         BNX2X_ERR("disabling TPA for queue[%d]\n",
1333                                                   j);
1334                                         /* Cleanup already allocated elements */
1335                                         bnx2x_free_rx_sge_range(bp, fp,
1336                                                                 ring_prod);
1337                                         bnx2x_free_tpa_pool(bp, fp,
1338                                                             MAX_AGG_QS(bp));
1339                                         fp->disable_tpa = 1;
1340                                         ring_prod = 0;
1341                                         break;
1342                                 }
1343                                 ring_prod = NEXT_SGE_IDX(ring_prod);
1344                         }
1345
1346                         fp->rx_sge_prod = ring_prod;
1347                 }
1348         }
1349
1350         for_each_eth_queue(bp, j) {
1351                 struct bnx2x_fastpath *fp = &bp->fp[j];
1352
1353                 fp->rx_bd_cons = 0;
1354
1355                 /* Activate BD ring */
1356                 /* Warning!
1357                  * this will generate an interrupt (to the TSTORM)
1358                  * must only be done after chip is initialized
1359                  */
1360                 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
1361                                      fp->rx_sge_prod);
1362
1363                 if (j != 0)
1364                         continue;
1365
1366                 if (CHIP_IS_E1(bp)) {
1367                         REG_WR(bp, BAR_USTRORM_INTMEM +
1368                                USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
1369                                U64_LO(fp->rx_comp_mapping));
1370                         REG_WR(bp, BAR_USTRORM_INTMEM +
1371                                USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
1372                                U64_HI(fp->rx_comp_mapping));
1373                 }
1374         }
1375 }
1376
1377 static void bnx2x_free_tx_skbs_queue(struct bnx2x_fastpath *fp)
1378 {
1379         u8 cos;
1380         struct bnx2x *bp = fp->bp;
1381
1382         for_each_cos_in_tx_queue(fp, cos) {
1383                 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
1384                 unsigned pkts_compl = 0, bytes_compl = 0;
1385
1386                 u16 sw_prod = txdata->tx_pkt_prod;
1387                 u16 sw_cons = txdata->tx_pkt_cons;
1388
1389                 while (sw_cons != sw_prod) {
1390                         bnx2x_free_tx_pkt(bp, txdata, TX_BD(sw_cons),
1391                                           &pkts_compl, &bytes_compl);
1392                         sw_cons++;
1393                 }
1394
1395                 netdev_tx_reset_queue(
1396                         netdev_get_tx_queue(bp->dev,
1397                                             txdata->txq_index));
1398         }
1399 }
1400
1401 static void bnx2x_free_tx_skbs_cnic(struct bnx2x *bp)
1402 {
1403         int i;
1404
1405         for_each_tx_queue_cnic(bp, i) {
1406                 bnx2x_free_tx_skbs_queue(&bp->fp[i]);
1407         }
1408 }
1409
1410 static void bnx2x_free_tx_skbs(struct bnx2x *bp)
1411 {
1412         int i;
1413
1414         for_each_eth_queue(bp, i) {
1415                 bnx2x_free_tx_skbs_queue(&bp->fp[i]);
1416         }
1417 }
1418
1419 static void bnx2x_free_rx_bds(struct bnx2x_fastpath *fp)
1420 {
1421         struct bnx2x *bp = fp->bp;
1422         int i;
1423
1424         /* ring wasn't allocated */
1425         if (fp->rx_buf_ring == NULL)
1426                 return;
1427
1428         for (i = 0; i < NUM_RX_BD; i++) {
1429                 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
1430                 u8 *data = rx_buf->data;
1431
1432                 if (data == NULL)
1433                         continue;
1434                 dma_unmap_single(&bp->pdev->dev,
1435                                  dma_unmap_addr(rx_buf, mapping),
1436                                  fp->rx_buf_size, DMA_FROM_DEVICE);
1437
1438                 rx_buf->data = NULL;
1439                 bnx2x_frag_free(fp, data);
1440         }
1441 }
1442
1443 static void bnx2x_free_rx_skbs_cnic(struct bnx2x *bp)
1444 {
1445         int j;
1446
1447         for_each_rx_queue_cnic(bp, j) {
1448                 bnx2x_free_rx_bds(&bp->fp[j]);
1449         }
1450 }
1451
1452 static void bnx2x_free_rx_skbs(struct bnx2x *bp)
1453 {
1454         int j;
1455
1456         for_each_eth_queue(bp, j) {
1457                 struct bnx2x_fastpath *fp = &bp->fp[j];
1458
1459                 bnx2x_free_rx_bds(fp);
1460
1461                 if (!fp->disable_tpa)
1462                         bnx2x_free_tpa_pool(bp, fp, MAX_AGG_QS(bp));
1463         }
1464 }
1465
1466 void bnx2x_free_skbs_cnic(struct bnx2x *bp)
1467 {
1468         bnx2x_free_tx_skbs_cnic(bp);
1469         bnx2x_free_rx_skbs_cnic(bp);
1470 }
1471
1472 void bnx2x_free_skbs(struct bnx2x *bp)
1473 {
1474         bnx2x_free_tx_skbs(bp);
1475         bnx2x_free_rx_skbs(bp);
1476 }
1477
1478 void bnx2x_update_max_mf_config(struct bnx2x *bp, u32 value)
1479 {
1480         /* load old values */
1481         u32 mf_cfg = bp->mf_config[BP_VN(bp)];
1482
1483         if (value != bnx2x_extract_max_cfg(bp, mf_cfg)) {
1484                 /* leave all but MAX value */
1485                 mf_cfg &= ~FUNC_MF_CFG_MAX_BW_MASK;
1486
1487                 /* set new MAX value */
1488                 mf_cfg |= (value << FUNC_MF_CFG_MAX_BW_SHIFT)
1489                                 & FUNC_MF_CFG_MAX_BW_MASK;
1490
1491                 bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW, mf_cfg);
1492         }
1493 }
1494
1495 /**
1496  * bnx2x_free_msix_irqs - free previously requested MSI-X IRQ vectors
1497  *
1498  * @bp:         driver handle
1499  * @nvecs:      number of vectors to be released
1500  */
1501 static void bnx2x_free_msix_irqs(struct bnx2x *bp, int nvecs)
1502 {
1503         int i, offset = 0;
1504
1505         if (nvecs == offset)
1506                 return;
1507
1508         /* VFs don't have a default SB */
1509         if (IS_PF(bp)) {
1510                 free_irq(bp->msix_table[offset].vector, bp->dev);
1511                 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
1512                    bp->msix_table[offset].vector);
1513                 offset++;
1514         }
1515
1516         if (CNIC_SUPPORT(bp)) {
1517                 if (nvecs == offset)
1518                         return;
1519                 offset++;
1520         }
1521
1522         for_each_eth_queue(bp, i) {
1523                 if (nvecs == offset)
1524                         return;
1525                 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq\n",
1526                    i, bp->msix_table[offset].vector);
1527
1528                 free_irq(bp->msix_table[offset++].vector, &bp->fp[i]);
1529         }
1530 }
1531
1532 void bnx2x_free_irq(struct bnx2x *bp)
1533 {
1534         if (bp->flags & USING_MSIX_FLAG &&
1535             !(bp->flags & USING_SINGLE_MSIX_FLAG)) {
1536                 int nvecs = BNX2X_NUM_ETH_QUEUES(bp) + CNIC_SUPPORT(bp);
1537
1538                 /* vfs don't have a default status block */
1539                 if (IS_PF(bp))
1540                         nvecs++;
1541
1542                 bnx2x_free_msix_irqs(bp, nvecs);
1543         } else {
1544                 free_irq(bp->dev->irq, bp->dev);
1545         }
1546 }
1547
1548 int bnx2x_enable_msix(struct bnx2x *bp)
1549 {
1550         int msix_vec = 0, i, rc;
1551
1552         /* VFs don't have a default status block */
1553         if (IS_PF(bp)) {
1554                 bp->msix_table[msix_vec].entry = msix_vec;
1555                 BNX2X_DEV_INFO("msix_table[0].entry = %d (slowpath)\n",
1556                                bp->msix_table[0].entry);
1557                 msix_vec++;
1558         }
1559
1560         /* Cnic requires an msix vector for itself */
1561         if (CNIC_SUPPORT(bp)) {
1562                 bp->msix_table[msix_vec].entry = msix_vec;
1563                 BNX2X_DEV_INFO("msix_table[%d].entry = %d (CNIC)\n",
1564                                msix_vec, bp->msix_table[msix_vec].entry);
1565                 msix_vec++;
1566         }
1567
1568         /* We need separate vectors for ETH queues only (not FCoE) */
1569         for_each_eth_queue(bp, i) {
1570                 bp->msix_table[msix_vec].entry = msix_vec;
1571                 BNX2X_DEV_INFO("msix_table[%d].entry = %d (fastpath #%u)\n",
1572                                msix_vec, msix_vec, i);
1573                 msix_vec++;
1574         }
1575
1576         DP(BNX2X_MSG_SP, "about to request enable msix with %d vectors\n",
1577            msix_vec);
1578
1579         rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], msix_vec);
1580
1581         /*
1582          * reconfigure number of tx/rx queues according to available
1583          * MSI-X vectors
1584          */
1585         if (rc >= BNX2X_MIN_MSIX_VEC_CNT(bp)) {
1586                 /* how less vectors we will have? */
1587                 int diff = msix_vec - rc;
1588
1589                 BNX2X_DEV_INFO("Trying to use less MSI-X vectors: %d\n", rc);
1590
1591                 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], rc);
1592
1593                 if (rc) {
1594                         BNX2X_DEV_INFO("MSI-X is not attainable rc %d\n", rc);
1595                         goto no_msix;
1596                 }
1597                 /*
1598                  * decrease number of queues by number of unallocated entries
1599                  */
1600                 bp->num_ethernet_queues -= diff;
1601                 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
1602
1603                 BNX2X_DEV_INFO("New queue configuration set: %d\n",
1604                                bp->num_queues);
1605         } else if (rc > 0) {
1606                 /* Get by with single vector */
1607                 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], 1);
1608                 if (rc) {
1609                         BNX2X_DEV_INFO("Single MSI-X is not attainable rc %d\n",
1610                                        rc);
1611                         goto no_msix;
1612                 }
1613
1614                 BNX2X_DEV_INFO("Using single MSI-X vector\n");
1615                 bp->flags |= USING_SINGLE_MSIX_FLAG;
1616
1617                 BNX2X_DEV_INFO("set number of queues to 1\n");
1618                 bp->num_ethernet_queues = 1;
1619                 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
1620         } else if (rc < 0) {
1621                 BNX2X_DEV_INFO("MSI-X is not attainable  rc %d\n", rc);
1622                 goto no_msix;
1623         }
1624
1625         bp->flags |= USING_MSIX_FLAG;
1626
1627         return 0;
1628
1629 no_msix:
1630         /* fall to INTx if not enough memory */
1631         if (rc == -ENOMEM)
1632                 bp->flags |= DISABLE_MSI_FLAG;
1633
1634         return rc;
1635 }
1636
1637 static int bnx2x_req_msix_irqs(struct bnx2x *bp)
1638 {
1639         int i, rc, offset = 0;
1640
1641         /* no default status block for vf */
1642         if (IS_PF(bp)) {
1643                 rc = request_irq(bp->msix_table[offset++].vector,
1644                                  bnx2x_msix_sp_int, 0,
1645                                  bp->dev->name, bp->dev);
1646                 if (rc) {
1647                         BNX2X_ERR("request sp irq failed\n");
1648                         return -EBUSY;
1649                 }
1650         }
1651
1652         if (CNIC_SUPPORT(bp))
1653                 offset++;
1654
1655         for_each_eth_queue(bp, i) {
1656                 struct bnx2x_fastpath *fp = &bp->fp[i];
1657                 snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
1658                          bp->dev->name, i);
1659
1660                 rc = request_irq(bp->msix_table[offset].vector,
1661                                  bnx2x_msix_fp_int, 0, fp->name, fp);
1662                 if (rc) {
1663                         BNX2X_ERR("request fp #%d irq (%d) failed  rc %d\n", i,
1664                               bp->msix_table[offset].vector, rc);
1665                         bnx2x_free_msix_irqs(bp, offset);
1666                         return -EBUSY;
1667                 }
1668
1669                 offset++;
1670         }
1671
1672         i = BNX2X_NUM_ETH_QUEUES(bp);
1673         if (IS_PF(bp)) {
1674                 offset = 1 + CNIC_SUPPORT(bp);
1675                 netdev_info(bp->dev,
1676                             "using MSI-X  IRQs: sp %d  fp[%d] %d ... fp[%d] %d\n",
1677                             bp->msix_table[0].vector,
1678                             0, bp->msix_table[offset].vector,
1679                             i - 1, bp->msix_table[offset + i - 1].vector);
1680         } else {
1681                 offset = CNIC_SUPPORT(bp);
1682                 netdev_info(bp->dev,
1683                             "using MSI-X  IRQs: fp[%d] %d ... fp[%d] %d\n",
1684                             0, bp->msix_table[offset].vector,
1685                             i - 1, bp->msix_table[offset + i - 1].vector);
1686         }
1687         return 0;
1688 }
1689
1690 int bnx2x_enable_msi(struct bnx2x *bp)
1691 {
1692         int rc;
1693
1694         rc = pci_enable_msi(bp->pdev);
1695         if (rc) {
1696                 BNX2X_DEV_INFO("MSI is not attainable\n");
1697                 return -1;
1698         }
1699         bp->flags |= USING_MSI_FLAG;
1700
1701         return 0;
1702 }
1703
1704 static int bnx2x_req_irq(struct bnx2x *bp)
1705 {
1706         unsigned long flags;
1707         unsigned int irq;
1708
1709         if (bp->flags & (USING_MSI_FLAG | USING_MSIX_FLAG))
1710                 flags = 0;
1711         else
1712                 flags = IRQF_SHARED;
1713
1714         if (bp->flags & USING_MSIX_FLAG)
1715                 irq = bp->msix_table[0].vector;
1716         else
1717                 irq = bp->pdev->irq;
1718
1719         return request_irq(irq, bnx2x_interrupt, flags, bp->dev->name, bp->dev);
1720 }
1721
1722 static int bnx2x_setup_irqs(struct bnx2x *bp)
1723 {
1724         int rc = 0;
1725         if (bp->flags & USING_MSIX_FLAG &&
1726             !(bp->flags & USING_SINGLE_MSIX_FLAG)) {
1727                 rc = bnx2x_req_msix_irqs(bp);
1728                 if (rc)
1729                         return rc;
1730         } else {
1731                 rc = bnx2x_req_irq(bp);
1732                 if (rc) {
1733                         BNX2X_ERR("IRQ request failed  rc %d, aborting\n", rc);
1734                         return rc;
1735                 }
1736                 if (bp->flags & USING_MSI_FLAG) {
1737                         bp->dev->irq = bp->pdev->irq;
1738                         netdev_info(bp->dev, "using MSI IRQ %d\n",
1739                                     bp->dev->irq);
1740                 }
1741                 if (bp->flags & USING_MSIX_FLAG) {
1742                         bp->dev->irq = bp->msix_table[0].vector;
1743                         netdev_info(bp->dev, "using MSIX IRQ %d\n",
1744                                     bp->dev->irq);
1745                 }
1746         }
1747
1748         return 0;
1749 }
1750
1751 static void bnx2x_napi_enable_cnic(struct bnx2x *bp)
1752 {
1753         int i;
1754
1755         for_each_rx_queue_cnic(bp, i)
1756                 napi_enable(&bnx2x_fp(bp, i, napi));
1757 }
1758
1759 static void bnx2x_napi_enable(struct bnx2x *bp)
1760 {
1761         int i;
1762
1763         for_each_eth_queue(bp, i)
1764                 napi_enable(&bnx2x_fp(bp, i, napi));
1765 }
1766
1767 static void bnx2x_napi_disable_cnic(struct bnx2x *bp)
1768 {
1769         int i;
1770
1771         for_each_rx_queue_cnic(bp, i)
1772                 napi_disable(&bnx2x_fp(bp, i, napi));
1773 }
1774
1775 static void bnx2x_napi_disable(struct bnx2x *bp)
1776 {
1777         int i;
1778
1779         for_each_eth_queue(bp, i)
1780                 napi_disable(&bnx2x_fp(bp, i, napi));
1781 }
1782
1783 void bnx2x_netif_start(struct bnx2x *bp)
1784 {
1785         if (netif_running(bp->dev)) {
1786                 bnx2x_napi_enable(bp);
1787                 if (CNIC_LOADED(bp))
1788                         bnx2x_napi_enable_cnic(bp);
1789                 bnx2x_int_enable(bp);
1790                 if (bp->state == BNX2X_STATE_OPEN)
1791                         netif_tx_wake_all_queues(bp->dev);
1792         }
1793 }
1794
1795 void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
1796 {
1797         bnx2x_int_disable_sync(bp, disable_hw);
1798         bnx2x_napi_disable(bp);
1799         if (CNIC_LOADED(bp))
1800                 bnx2x_napi_disable_cnic(bp);
1801 }
1802
1803 u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb)
1804 {
1805         struct bnx2x *bp = netdev_priv(dev);
1806
1807         if (CNIC_LOADED(bp) && !NO_FCOE(bp)) {
1808                 struct ethhdr *hdr = (struct ethhdr *)skb->data;
1809                 u16 ether_type = ntohs(hdr->h_proto);
1810
1811                 /* Skip VLAN tag if present */
1812                 if (ether_type == ETH_P_8021Q) {
1813                         struct vlan_ethhdr *vhdr =
1814                                 (struct vlan_ethhdr *)skb->data;
1815
1816                         ether_type = ntohs(vhdr->h_vlan_encapsulated_proto);
1817                 }
1818
1819                 /* If ethertype is FCoE or FIP - use FCoE ring */
1820                 if ((ether_type == ETH_P_FCOE) || (ether_type == ETH_P_FIP))
1821                         return bnx2x_fcoe_tx(bp, txq_index);
1822         }
1823
1824         /* select a non-FCoE queue */
1825         return __skb_tx_hash(dev, skb, BNX2X_NUM_ETH_QUEUES(bp));
1826 }
1827
1828 void bnx2x_set_num_queues(struct bnx2x *bp)
1829 {
1830         /* RSS queues */
1831         bp->num_ethernet_queues = bnx2x_calc_num_queues(bp);
1832
1833         /* override in STORAGE SD modes */
1834         if (IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp))
1835                 bp->num_ethernet_queues = 1;
1836
1837         /* Add special queues */
1838         bp->num_cnic_queues = CNIC_SUPPORT(bp); /* For FCOE */
1839         bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
1840
1841         BNX2X_DEV_INFO("set number of queues to %d\n", bp->num_queues);
1842 }
1843
1844 /**
1845  * bnx2x_set_real_num_queues - configure netdev->real_num_[tx,rx]_queues
1846  *
1847  * @bp:         Driver handle
1848  *
1849  * We currently support for at most 16 Tx queues for each CoS thus we will
1850  * allocate a multiple of 16 for ETH L2 rings according to the value of the
1851  * bp->max_cos.
1852  *
1853  * If there is an FCoE L2 queue the appropriate Tx queue will have the next
1854  * index after all ETH L2 indices.
1855  *
1856  * If the actual number of Tx queues (for each CoS) is less than 16 then there
1857  * will be the holes at the end of each group of 16 ETh L2 indices (0..15,
1858  * 16..31,...) with indicies that are not coupled with any real Tx queue.
1859  *
1860  * The proper configuration of skb->queue_mapping is handled by
1861  * bnx2x_select_queue() and __skb_tx_hash().
1862  *
1863  * bnx2x_setup_tc() takes care of the proper TC mappings so that __skb_tx_hash()
1864  * will return a proper Tx index if TC is enabled (netdev->num_tc > 0).
1865  */
1866 static int bnx2x_set_real_num_queues(struct bnx2x *bp, int include_cnic)
1867 {
1868         int rc, tx, rx;
1869
1870         tx = BNX2X_NUM_ETH_QUEUES(bp) * bp->max_cos;
1871         rx = BNX2X_NUM_ETH_QUEUES(bp);
1872
1873 /* account for fcoe queue */
1874         if (include_cnic && !NO_FCOE(bp)) {
1875                 rx++;
1876                 tx++;
1877         }
1878
1879         rc = netif_set_real_num_tx_queues(bp->dev, tx);
1880         if (rc) {
1881                 BNX2X_ERR("Failed to set real number of Tx queues: %d\n", rc);
1882                 return rc;
1883         }
1884         rc = netif_set_real_num_rx_queues(bp->dev, rx);
1885         if (rc) {
1886                 BNX2X_ERR("Failed to set real number of Rx queues: %d\n", rc);
1887                 return rc;
1888         }
1889
1890         DP(NETIF_MSG_IFUP, "Setting real num queues to (tx, rx) (%d, %d)\n",
1891                           tx, rx);
1892
1893         return rc;
1894 }
1895
1896 static void bnx2x_set_rx_buf_size(struct bnx2x *bp)
1897 {
1898         int i;
1899
1900         for_each_queue(bp, i) {
1901                 struct bnx2x_fastpath *fp = &bp->fp[i];
1902                 u32 mtu;
1903
1904                 /* Always use a mini-jumbo MTU for the FCoE L2 ring */
1905                 if (IS_FCOE_IDX(i))
1906                         /*
1907                          * Although there are no IP frames expected to arrive to
1908                          * this ring we still want to add an
1909                          * IP_HEADER_ALIGNMENT_PADDING to prevent a buffer
1910                          * overrun attack.
1911                          */
1912                         mtu = BNX2X_FCOE_MINI_JUMBO_MTU;
1913                 else
1914                         mtu = bp->dev->mtu;
1915                 fp->rx_buf_size = BNX2X_FW_RX_ALIGN_START +
1916                                   IP_HEADER_ALIGNMENT_PADDING +
1917                                   ETH_OVREHEAD +
1918                                   mtu +
1919                                   BNX2X_FW_RX_ALIGN_END;
1920                 /* Note : rx_buf_size doesnt take into account NET_SKB_PAD */
1921                 if (fp->rx_buf_size + NET_SKB_PAD <= PAGE_SIZE)
1922                         fp->rx_frag_size = fp->rx_buf_size + NET_SKB_PAD;
1923                 else
1924                         fp->rx_frag_size = 0;
1925         }
1926 }
1927
1928 static int bnx2x_init_rss_pf(struct bnx2x *bp)
1929 {
1930         int i;
1931         u8 num_eth_queues = BNX2X_NUM_ETH_QUEUES(bp);
1932
1933         /* Prepare the initial contents fo the indirection table if RSS is
1934          * enabled
1935          */
1936         for (i = 0; i < sizeof(bp->rss_conf_obj.ind_table); i++)
1937                 bp->rss_conf_obj.ind_table[i] =
1938                         bp->fp->cl_id +
1939                         ethtool_rxfh_indir_default(i, num_eth_queues);
1940
1941         /*
1942          * For 57710 and 57711 SEARCHER configuration (rss_keys) is
1943          * per-port, so if explicit configuration is needed , do it only
1944          * for a PMF.
1945          *
1946          * For 57712 and newer on the other hand it's a per-function
1947          * configuration.
1948          */
1949         return bnx2x_config_rss_eth(bp, bp->port.pmf || !CHIP_IS_E1x(bp));
1950 }
1951
1952 int bnx2x_config_rss_pf(struct bnx2x *bp, struct bnx2x_rss_config_obj *rss_obj,
1953                         bool config_hash)
1954 {
1955         struct bnx2x_config_rss_params params = {NULL};
1956
1957         /* Although RSS is meaningless when there is a single HW queue we
1958          * still need it enabled in order to have HW Rx hash generated.
1959          *
1960          * if (!is_eth_multi(bp))
1961          *      bp->multi_mode = ETH_RSS_MODE_DISABLED;
1962          */
1963
1964         params.rss_obj = rss_obj;
1965
1966         __set_bit(RAMROD_COMP_WAIT, &params.ramrod_flags);
1967
1968         __set_bit(BNX2X_RSS_MODE_REGULAR, &params.rss_flags);
1969
1970         /* RSS configuration */
1971         __set_bit(BNX2X_RSS_IPV4, &params.rss_flags);
1972         __set_bit(BNX2X_RSS_IPV4_TCP, &params.rss_flags);
1973         __set_bit(BNX2X_RSS_IPV6, &params.rss_flags);
1974         __set_bit(BNX2X_RSS_IPV6_TCP, &params.rss_flags);
1975         if (rss_obj->udp_rss_v4)
1976                 __set_bit(BNX2X_RSS_IPV4_UDP, &params.rss_flags);
1977         if (rss_obj->udp_rss_v6)
1978                 __set_bit(BNX2X_RSS_IPV6_UDP, &params.rss_flags);
1979
1980         /* Hash bits */
1981         params.rss_result_mask = MULTI_MASK;
1982
1983         memcpy(params.ind_table, rss_obj->ind_table, sizeof(params.ind_table));
1984
1985         if (config_hash) {
1986                 /* RSS keys */
1987                 prandom_bytes(params.rss_key, sizeof(params.rss_key));
1988                 __set_bit(BNX2X_RSS_SET_SRCH, &params.rss_flags);
1989         }
1990
1991         return bnx2x_config_rss(bp, &params);
1992 }
1993
1994 static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
1995 {
1996         struct bnx2x_func_state_params func_params = {NULL};
1997
1998         /* Prepare parameters for function state transitions */
1999         __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
2000
2001         func_params.f_obj = &bp->func_obj;
2002         func_params.cmd = BNX2X_F_CMD_HW_INIT;
2003
2004         func_params.params.hw_init.load_phase = load_code;
2005
2006         return bnx2x_func_state_change(bp, &func_params);
2007 }
2008
2009 /*
2010  * Cleans the object that have internal lists without sending
2011  * ramrods. Should be run when interrutps are disabled.
2012  */
2013 static void bnx2x_squeeze_objects(struct bnx2x *bp)
2014 {
2015         int rc;
2016         unsigned long ramrod_flags = 0, vlan_mac_flags = 0;
2017         struct bnx2x_mcast_ramrod_params rparam = {NULL};
2018         struct bnx2x_vlan_mac_obj *mac_obj = &bp->sp_objs->mac_obj;
2019
2020         /***************** Cleanup MACs' object first *************************/
2021
2022         /* Wait for completion of requested */
2023         __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
2024         /* Perform a dry cleanup */
2025         __set_bit(RAMROD_DRV_CLR_ONLY, &ramrod_flags);
2026
2027         /* Clean ETH primary MAC */
2028         __set_bit(BNX2X_ETH_MAC, &vlan_mac_flags);
2029         rc = mac_obj->delete_all(bp, &bp->sp_objs->mac_obj, &vlan_mac_flags,
2030                                  &ramrod_flags);
2031         if (rc != 0)
2032                 BNX2X_ERR("Failed to clean ETH MACs: %d\n", rc);
2033
2034         /* Cleanup UC list */
2035         vlan_mac_flags = 0;
2036         __set_bit(BNX2X_UC_LIST_MAC, &vlan_mac_flags);
2037         rc = mac_obj->delete_all(bp, mac_obj, &vlan_mac_flags,
2038                                  &ramrod_flags);
2039         if (rc != 0)
2040                 BNX2X_ERR("Failed to clean UC list MACs: %d\n", rc);
2041
2042         /***************** Now clean mcast object *****************************/
2043         rparam.mcast_obj = &bp->mcast_obj;
2044         __set_bit(RAMROD_DRV_CLR_ONLY, &rparam.ramrod_flags);
2045
2046         /* Add a DEL command... */
2047         rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL);
2048         if (rc < 0)
2049                 BNX2X_ERR("Failed to add a new DEL command to a multi-cast object: %d\n",
2050                           rc);
2051
2052         /* ...and wait until all pending commands are cleared */
2053         rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
2054         while (rc != 0) {
2055                 if (rc < 0) {
2056                         BNX2X_ERR("Failed to clean multi-cast object: %d\n",
2057                                   rc);
2058                         return;
2059                 }
2060
2061                 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
2062         }
2063 }
2064
2065 #ifndef BNX2X_STOP_ON_ERROR
2066 #define LOAD_ERROR_EXIT(bp, label) \
2067         do { \
2068                 (bp)->state = BNX2X_STATE_ERROR; \
2069                 goto label; \
2070         } while (0)
2071
2072 #define LOAD_ERROR_EXIT_CNIC(bp, label) \
2073         do { \
2074                 bp->cnic_loaded = false; \
2075                 goto label; \
2076         } while (0)
2077 #else /*BNX2X_STOP_ON_ERROR*/
2078 #define LOAD_ERROR_EXIT(bp, label) \
2079         do { \
2080                 (bp)->state = BNX2X_STATE_ERROR; \
2081                 (bp)->panic = 1; \
2082                 return -EBUSY; \
2083         } while (0)
2084 #define LOAD_ERROR_EXIT_CNIC(bp, label) \
2085         do { \
2086                 bp->cnic_loaded = false; \
2087                 (bp)->panic = 1; \
2088                 return -EBUSY; \
2089         } while (0)
2090 #endif /*BNX2X_STOP_ON_ERROR*/
2091
2092 static void bnx2x_free_fw_stats_mem(struct bnx2x *bp)
2093 {
2094         BNX2X_PCI_FREE(bp->fw_stats, bp->fw_stats_mapping,
2095                        bp->fw_stats_data_sz + bp->fw_stats_req_sz);
2096         return;
2097 }
2098
2099 static int bnx2x_alloc_fw_stats_mem(struct bnx2x *bp)
2100 {
2101         int num_groups, vf_headroom = 0;
2102         int is_fcoe_stats = NO_FCOE(bp) ? 0 : 1;
2103
2104         /* number of queues for statistics is number of eth queues + FCoE */
2105         u8 num_queue_stats = BNX2X_NUM_ETH_QUEUES(bp) + is_fcoe_stats;
2106
2107         /* Total number of FW statistics requests =
2108          * 1 for port stats + 1 for PF stats + potential 2 for FCoE (fcoe proper
2109          * and fcoe l2 queue) stats + num of queues (which includes another 1
2110          * for fcoe l2 queue if applicable)
2111          */
2112         bp->fw_stats_num = 2 + is_fcoe_stats + num_queue_stats;
2113
2114         /* vf stats appear in the request list, but their data is allocated by
2115          * the VFs themselves. We don't include them in the bp->fw_stats_num as
2116          * it is used to determine where to place the vf stats queries in the
2117          * request struct
2118          */
2119         if (IS_SRIOV(bp))
2120                 vf_headroom = bnx2x_vf_headroom(bp);
2121
2122         /* Request is built from stats_query_header and an array of
2123          * stats_query_cmd_group each of which contains
2124          * STATS_QUERY_CMD_COUNT rules. The real number or requests is
2125          * configured in the stats_query_header.
2126          */
2127         num_groups =
2128                 (((bp->fw_stats_num + vf_headroom) / STATS_QUERY_CMD_COUNT) +
2129                  (((bp->fw_stats_num + vf_headroom) % STATS_QUERY_CMD_COUNT) ?
2130                  1 : 0));
2131
2132         DP(BNX2X_MSG_SP, "stats fw_stats_num %d, vf headroom %d, num_groups %d\n",
2133            bp->fw_stats_num, vf_headroom, num_groups);
2134         bp->fw_stats_req_sz = sizeof(struct stats_query_header) +
2135                 num_groups * sizeof(struct stats_query_cmd_group);
2136
2137         /* Data for statistics requests + stats_counter
2138          * stats_counter holds per-STORM counters that are incremented
2139          * when STORM has finished with the current request.
2140          * memory for FCoE offloaded statistics are counted anyway,
2141          * even if they will not be sent.
2142          * VF stats are not accounted for here as the data of VF stats is stored
2143          * in memory allocated by the VF, not here.
2144          */
2145         bp->fw_stats_data_sz = sizeof(struct per_port_stats) +
2146                 sizeof(struct per_pf_stats) +
2147                 sizeof(struct fcoe_statistics_params) +
2148                 sizeof(struct per_queue_stats) * num_queue_stats +
2149                 sizeof(struct stats_counter);
2150
2151         BNX2X_PCI_ALLOC(bp->fw_stats, &bp->fw_stats_mapping,
2152                         bp->fw_stats_data_sz + bp->fw_stats_req_sz);
2153
2154         /* Set shortcuts */
2155         bp->fw_stats_req = (struct bnx2x_fw_stats_req *)bp->fw_stats;
2156         bp->fw_stats_req_mapping = bp->fw_stats_mapping;
2157         bp->fw_stats_data = (struct bnx2x_fw_stats_data *)
2158                 ((u8 *)bp->fw_stats + bp->fw_stats_req_sz);
2159         bp->fw_stats_data_mapping = bp->fw_stats_mapping +
2160                 bp->fw_stats_req_sz;
2161
2162         DP(BNX2X_MSG_SP, "statistics request base address set to %x %x",
2163            U64_HI(bp->fw_stats_req_mapping),
2164            U64_LO(bp->fw_stats_req_mapping));
2165         DP(BNX2X_MSG_SP, "statistics data base address set to %x %x",
2166            U64_HI(bp->fw_stats_data_mapping),
2167            U64_LO(bp->fw_stats_data_mapping));
2168         return 0;
2169
2170 alloc_mem_err:
2171         bnx2x_free_fw_stats_mem(bp);
2172         BNX2X_ERR("Can't allocate FW stats memory\n");
2173         return -ENOMEM;
2174 }
2175
2176 /* send load request to mcp and analyze response */
2177 static int bnx2x_nic_load_request(struct bnx2x *bp, u32 *load_code)
2178 {
2179         /* init fw_seq */
2180         bp->fw_seq =
2181                 (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
2182                  DRV_MSG_SEQ_NUMBER_MASK);
2183         BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
2184
2185         /* Get current FW pulse sequence */
2186         bp->fw_drv_pulse_wr_seq =
2187                 (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_pulse_mb) &
2188                  DRV_PULSE_SEQ_MASK);
2189         BNX2X_DEV_INFO("drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
2190
2191         /* load request */
2192         (*load_code) = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ,
2193                                         DRV_MSG_CODE_LOAD_REQ_WITH_LFA);
2194
2195         /* if mcp fails to respond we must abort */
2196         if (!(*load_code)) {
2197                 BNX2X_ERR("MCP response failure, aborting\n");
2198                 return -EBUSY;
2199         }
2200
2201         /* If mcp refused (e.g. other port is in diagnostic mode) we
2202          * must abort
2203          */
2204         if ((*load_code) == FW_MSG_CODE_DRV_LOAD_REFUSED) {
2205                 BNX2X_ERR("MCP refused load request, aborting\n");
2206                 return -EBUSY;
2207         }
2208         return 0;
2209 }
2210
2211 /* check whether another PF has already loaded FW to chip. In
2212  * virtualized environments a pf from another VM may have already
2213  * initialized the device including loading FW
2214  */
2215 int bnx2x_nic_load_analyze_req(struct bnx2x *bp, u32 load_code)
2216 {
2217         /* is another pf loaded on this engine? */
2218         if (load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP &&
2219             load_code != FW_MSG_CODE_DRV_LOAD_COMMON) {
2220                 /* build my FW version dword */
2221                 u32 my_fw = (BCM_5710_FW_MAJOR_VERSION) +
2222                         (BCM_5710_FW_MINOR_VERSION << 8) +
2223                         (BCM_5710_FW_REVISION_VERSION << 16) +
2224                         (BCM_5710_FW_ENGINEERING_VERSION << 24);
2225
2226                 /* read loaded FW from chip */
2227                 u32 loaded_fw = REG_RD(bp, XSEM_REG_PRAM);
2228
2229                 DP(BNX2X_MSG_SP, "loaded fw %x, my fw %x\n",
2230                    loaded_fw, my_fw);
2231
2232                 /* abort nic load if version mismatch */
2233                 if (my_fw != loaded_fw) {
2234                         BNX2X_ERR("bnx2x with FW %x was already loaded which mismatches my %x FW. aborting\n",
2235                                   loaded_fw, my_fw);
2236                         return -EBUSY;
2237                 }
2238         }
2239         return 0;
2240 }
2241
2242 /* returns the "mcp load_code" according to global load_count array */
2243 static int bnx2x_nic_load_no_mcp(struct bnx2x *bp, int port)
2244 {
2245         int path = BP_PATH(bp);
2246
2247         DP(NETIF_MSG_IFUP, "NO MCP - load counts[%d]      %d, %d, %d\n",
2248            path, load_count[path][0], load_count[path][1],
2249            load_count[path][2]);
2250         load_count[path][0]++;
2251         load_count[path][1 + port]++;
2252         DP(NETIF_MSG_IFUP, "NO MCP - new load counts[%d]  %d, %d, %d\n",
2253            path, load_count[path][0], load_count[path][1],
2254            load_count[path][2]);
2255         if (load_count[path][0] == 1)
2256                 return FW_MSG_CODE_DRV_LOAD_COMMON;
2257         else if (load_count[path][1 + port] == 1)
2258                 return FW_MSG_CODE_DRV_LOAD_PORT;
2259         else
2260                 return FW_MSG_CODE_DRV_LOAD_FUNCTION;
2261 }
2262
2263 /* mark PMF if applicable */
2264 static void bnx2x_nic_load_pmf(struct bnx2x *bp, u32 load_code)
2265 {
2266         if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
2267             (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) ||
2268             (load_code == FW_MSG_CODE_DRV_LOAD_PORT)) {
2269                 bp->port.pmf = 1;
2270                 /* We need the barrier to ensure the ordering between the
2271                  * writing to bp->port.pmf here and reading it from the
2272                  * bnx2x_periodic_task().
2273                  */
2274                 smp_mb();
2275         } else {
2276                 bp->port.pmf = 0;
2277         }
2278
2279         DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2280 }
2281
2282 static void bnx2x_nic_load_afex_dcc(struct bnx2x *bp, int load_code)
2283 {
2284         if (((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
2285              (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP)) &&
2286             (bp->common.shmem2_base)) {
2287                 if (SHMEM2_HAS(bp, dcc_support))
2288                         SHMEM2_WR(bp, dcc_support,
2289                                   (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
2290                                    SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
2291                 if (SHMEM2_HAS(bp, afex_driver_support))
2292                         SHMEM2_WR(bp, afex_driver_support,
2293                                   SHMEM_AFEX_SUPPORTED_VERSION_ONE);
2294         }
2295
2296         /* Set AFEX default VLAN tag to an invalid value */
2297         bp->afex_def_vlan_tag = -1;
2298 }
2299
2300 /**
2301  * bnx2x_bz_fp - zero content of the fastpath structure.
2302  *
2303  * @bp:         driver handle
2304  * @index:      fastpath index to be zeroed
2305  *
2306  * Makes sure the contents of the bp->fp[index].napi is kept
2307  * intact.
2308  */
2309 static void bnx2x_bz_fp(struct bnx2x *bp, int index)
2310 {
2311         struct bnx2x_fastpath *fp = &bp->fp[index];
2312
2313         int cos;
2314         struct napi_struct orig_napi = fp->napi;
2315         struct bnx2x_agg_info *orig_tpa_info = fp->tpa_info;
2316         /* bzero bnx2x_fastpath contents */
2317         if (fp->tpa_info)
2318                 memset(fp->tpa_info, 0, ETH_MAX_AGGREGATION_QUEUES_E1H_E2 *
2319                        sizeof(struct bnx2x_agg_info));
2320         memset(fp, 0, sizeof(*fp));
2321
2322         /* Restore the NAPI object as it has been already initialized */
2323         fp->napi = orig_napi;
2324         fp->tpa_info = orig_tpa_info;
2325         fp->bp = bp;
2326         fp->index = index;
2327         if (IS_ETH_FP(fp))
2328                 fp->max_cos = bp->max_cos;
2329         else
2330                 /* Special queues support only one CoS */
2331                 fp->max_cos = 1;
2332
2333         /* Init txdata pointers */
2334         if (IS_FCOE_FP(fp))
2335                 fp->txdata_ptr[0] = &bp->bnx2x_txq[FCOE_TXQ_IDX(bp)];
2336         if (IS_ETH_FP(fp))
2337                 for_each_cos_in_tx_queue(fp, cos)
2338                         fp->txdata_ptr[cos] = &bp->bnx2x_txq[cos *
2339                                 BNX2X_NUM_ETH_QUEUES(bp) + index];
2340
2341         /*
2342          * set the tpa flag for each queue. The tpa flag determines the queue
2343          * minimal size so it must be set prior to queue memory allocation
2344          */
2345         fp->disable_tpa = !(bp->flags & TPA_ENABLE_FLAG ||
2346                                   (bp->flags & GRO_ENABLE_FLAG &&
2347                                    bnx2x_mtu_allows_gro(bp->dev->mtu)));
2348         if (bp->flags & TPA_ENABLE_FLAG)
2349                 fp->mode = TPA_MODE_LRO;
2350         else if (bp->flags & GRO_ENABLE_FLAG)
2351                 fp->mode = TPA_MODE_GRO;
2352
2353         /* We don't want TPA on an FCoE L2 ring */
2354         if (IS_FCOE_FP(fp))
2355                 fp->disable_tpa = 1;
2356 }
2357
2358 int bnx2x_load_cnic(struct bnx2x *bp)
2359 {
2360         int i, rc, port = BP_PORT(bp);
2361
2362         DP(NETIF_MSG_IFUP, "Starting CNIC-related load\n");
2363
2364         mutex_init(&bp->cnic_mutex);
2365
2366         if (IS_PF(bp)) {
2367                 rc = bnx2x_alloc_mem_cnic(bp);
2368                 if (rc) {
2369                         BNX2X_ERR("Unable to allocate bp memory for cnic\n");
2370                         LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
2371                 }
2372         }
2373
2374         rc = bnx2x_alloc_fp_mem_cnic(bp);
2375         if (rc) {
2376                 BNX2X_ERR("Unable to allocate memory for cnic fps\n");
2377                 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
2378         }
2379
2380         /* Update the number of queues with the cnic queues */
2381         rc = bnx2x_set_real_num_queues(bp, 1);
2382         if (rc) {
2383                 BNX2X_ERR("Unable to set real_num_queues including cnic\n");
2384                 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
2385         }
2386
2387         /* Add all CNIC NAPI objects */
2388         bnx2x_add_all_napi_cnic(bp);
2389         DP(NETIF_MSG_IFUP, "cnic napi added\n");
2390         bnx2x_napi_enable_cnic(bp);
2391
2392         rc = bnx2x_init_hw_func_cnic(bp);
2393         if (rc)
2394                 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic1);
2395
2396         bnx2x_nic_init_cnic(bp);
2397
2398         if (IS_PF(bp)) {
2399                 /* Enable Timer scan */
2400                 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 1);
2401
2402                 /* setup cnic queues */
2403                 for_each_cnic_queue(bp, i) {
2404                         rc = bnx2x_setup_queue(bp, &bp->fp[i], 0);
2405                         if (rc) {
2406                                 BNX2X_ERR("Queue setup failed\n");
2407                                 LOAD_ERROR_EXIT(bp, load_error_cnic2);
2408                         }
2409                 }
2410         }
2411
2412         /* Initialize Rx filter. */
2413         netif_addr_lock_bh(bp->dev);
2414         bnx2x_set_rx_mode(bp->dev);
2415         netif_addr_unlock_bh(bp->dev);
2416
2417         /* re-read iscsi info */
2418         bnx2x_get_iscsi_info(bp);
2419         bnx2x_setup_cnic_irq_info(bp);
2420         bnx2x_setup_cnic_info(bp);
2421         bp->cnic_loaded = true;
2422         if (bp->state == BNX2X_STATE_OPEN)
2423                 bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
2424
2425
2426         DP(NETIF_MSG_IFUP, "Ending successfully CNIC-related load\n");
2427
2428         return 0;
2429
2430 #ifndef BNX2X_STOP_ON_ERROR
2431 load_error_cnic2:
2432         /* Disable Timer scan */
2433         REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
2434
2435 load_error_cnic1:
2436         bnx2x_napi_disable_cnic(bp);
2437         /* Update the number of queues without the cnic queues */
2438         rc = bnx2x_set_real_num_queues(bp, 0);
2439         if (rc)
2440                 BNX2X_ERR("Unable to set real_num_queues not including cnic\n");
2441 load_error_cnic0:
2442         BNX2X_ERR("CNIC-related load failed\n");
2443         bnx2x_free_fp_mem_cnic(bp);
2444         bnx2x_free_mem_cnic(bp);
2445         return rc;
2446 #endif /* ! BNX2X_STOP_ON_ERROR */
2447 }
2448
2449 /* must be called with rtnl_lock */
2450 int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
2451 {
2452         int port = BP_PORT(bp);
2453         int i, rc = 0, load_code = 0;
2454
2455         DP(NETIF_MSG_IFUP, "Starting NIC load\n");
2456         DP(NETIF_MSG_IFUP,
2457            "CNIC is %s\n", CNIC_ENABLED(bp) ? "enabled" : "disabled");
2458
2459 #ifdef BNX2X_STOP_ON_ERROR
2460         if (unlikely(bp->panic)) {
2461                 BNX2X_ERR("Can't load NIC when there is panic\n");
2462                 return -EPERM;
2463         }
2464 #endif
2465
2466         bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
2467
2468         memset(&bp->last_reported_link, 0, sizeof(bp->last_reported_link));
2469         __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
2470                 &bp->last_reported_link.link_report_flags);
2471
2472         if (IS_PF(bp))
2473                 /* must be called before memory allocation and HW init */
2474                 bnx2x_ilt_set_info(bp);
2475
2476         /*
2477          * Zero fastpath structures preserving invariants like napi, which are
2478          * allocated only once, fp index, max_cos, bp pointer.
2479          * Also set fp->disable_tpa and txdata_ptr.
2480          */
2481         DP(NETIF_MSG_IFUP, "num queues: %d", bp->num_queues);
2482         for_each_queue(bp, i)
2483                 bnx2x_bz_fp(bp, i);
2484         memset(bp->bnx2x_txq, 0, (BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS +
2485                                   bp->num_cnic_queues) *
2486                                   sizeof(struct bnx2x_fp_txdata));
2487
2488         bp->fcoe_init = false;
2489
2490         /* Set the receive queues buffer size */
2491         bnx2x_set_rx_buf_size(bp);
2492
2493         if (IS_PF(bp)) {
2494                 rc = bnx2x_alloc_mem(bp);
2495                 if (rc) {
2496                         BNX2X_ERR("Unable to allocate bp memory\n");
2497                         return rc;
2498                 }
2499         }
2500
2501         /* Allocated memory for FW statistics  */
2502         if (bnx2x_alloc_fw_stats_mem(bp))
2503                 LOAD_ERROR_EXIT(bp, load_error0);
2504
2505         /* need to be done after alloc mem, since it's self adjusting to amount
2506          * of memory available for RSS queues
2507          */
2508         rc = bnx2x_alloc_fp_mem(bp);
2509         if (rc) {
2510                 BNX2X_ERR("Unable to allocate memory for fps\n");
2511                 LOAD_ERROR_EXIT(bp, load_error0);
2512         }
2513
2514         /* request pf to initialize status blocks */
2515         if (IS_VF(bp)) {
2516                 rc = bnx2x_vfpf_init(bp);
2517                 if (rc)
2518                         LOAD_ERROR_EXIT(bp, load_error0);
2519         }
2520
2521         /* As long as bnx2x_alloc_mem() may possibly update
2522          * bp->num_queues, bnx2x_set_real_num_queues() should always
2523          * come after it. At this stage cnic queues are not counted.
2524          */
2525         rc = bnx2x_set_real_num_queues(bp, 0);
2526         if (rc) {
2527                 BNX2X_ERR("Unable to set real_num_queues\n");
2528                 LOAD_ERROR_EXIT(bp, load_error0);
2529         }
2530
2531         /* configure multi cos mappings in kernel.
2532          * this configuration may be overriden by a multi class queue discipline
2533          * or by a dcbx negotiation result.
2534          */
2535         bnx2x_setup_tc(bp->dev, bp->max_cos);
2536
2537         /* Add all NAPI objects */
2538         bnx2x_add_all_napi(bp);
2539         DP(NETIF_MSG_IFUP, "napi added\n");
2540         bnx2x_napi_enable(bp);
2541
2542         if (IS_PF(bp)) {
2543                 /* set pf load just before approaching the MCP */
2544                 bnx2x_set_pf_load(bp);
2545
2546                 /* if mcp exists send load request and analyze response */
2547                 if (!BP_NOMCP(bp)) {
2548                         /* attempt to load pf */
2549                         rc = bnx2x_nic_load_request(bp, &load_code);
2550                         if (rc)
2551                                 LOAD_ERROR_EXIT(bp, load_error1);
2552
2553                         /* what did mcp say? */
2554                         rc = bnx2x_nic_load_analyze_req(bp, load_code);
2555                         if (rc) {
2556                                 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2557                                 LOAD_ERROR_EXIT(bp, load_error2);
2558                         }
2559                 } else {
2560                         load_code = bnx2x_nic_load_no_mcp(bp, port);
2561                 }
2562
2563                 /* mark pmf if applicable */
2564                 bnx2x_nic_load_pmf(bp, load_code);
2565
2566                 /* Init Function state controlling object */
2567                 bnx2x__init_func_obj(bp);
2568
2569                 /* Initialize HW */
2570                 rc = bnx2x_init_hw(bp, load_code);
2571                 if (rc) {
2572                         BNX2X_ERR("HW init failed, aborting\n");
2573                         bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2574                         LOAD_ERROR_EXIT(bp, load_error2);
2575                 }
2576         }
2577
2578         /* Connect to IRQs */
2579         rc = bnx2x_setup_irqs(bp);
2580         if (rc) {
2581                 BNX2X_ERR("setup irqs failed\n");
2582                 if (IS_PF(bp))
2583                         bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2584                 LOAD_ERROR_EXIT(bp, load_error2);
2585         }
2586
2587         /* Setup NIC internals and enable interrupts */
2588         bnx2x_nic_init(bp, load_code);
2589
2590         /* Init per-function objects */
2591         if (IS_PF(bp)) {
2592                 bnx2x_init_bp_objs(bp);
2593                 bnx2x_iov_nic_init(bp);
2594
2595                 /* Set AFEX default VLAN tag to an invalid value */
2596                 bp->afex_def_vlan_tag = -1;
2597                 bnx2x_nic_load_afex_dcc(bp, load_code);
2598                 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
2599                 rc = bnx2x_func_start(bp);
2600                 if (rc) {
2601                         BNX2X_ERR("Function start failed!\n");
2602                         bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2603
2604                         LOAD_ERROR_EXIT(bp, load_error3);
2605                 }
2606
2607                 /* Send LOAD_DONE command to MCP */
2608                 if (!BP_NOMCP(bp)) {
2609                         load_code = bnx2x_fw_command(bp,
2610                                                      DRV_MSG_CODE_LOAD_DONE, 0);
2611                         if (!load_code) {
2612                                 BNX2X_ERR("MCP response failure, aborting\n");
2613                                 rc = -EBUSY;
2614                                 LOAD_ERROR_EXIT(bp, load_error3);
2615                         }
2616                 }
2617
2618                 /* setup the leading queue */
2619                 rc = bnx2x_setup_leading(bp);
2620                 if (rc) {
2621                         BNX2X_ERR("Setup leading failed!\n");
2622                         LOAD_ERROR_EXIT(bp, load_error3);
2623                 }
2624
2625                 /* set up the rest of the queues */
2626                 for_each_nondefault_eth_queue(bp, i) {
2627                         rc = bnx2x_setup_queue(bp, &bp->fp[i], 0);
2628                         if (rc) {
2629                                 BNX2X_ERR("Queue setup failed\n");
2630                                 LOAD_ERROR_EXIT(bp, load_error3);
2631                         }
2632                 }
2633
2634                 /* setup rss */
2635                 rc = bnx2x_init_rss_pf(bp);
2636                 if (rc) {
2637                         BNX2X_ERR("PF RSS init failed\n");
2638                         LOAD_ERROR_EXIT(bp, load_error3);
2639                 }
2640
2641         } else { /* vf */
2642                 for_each_eth_queue(bp, i) {
2643                         rc = bnx2x_vfpf_setup_q(bp, i);
2644                         if (rc) {
2645                                 BNX2X_ERR("Queue setup failed\n");
2646                                 LOAD_ERROR_EXIT(bp, load_error3);
2647                         }
2648                 }
2649         }
2650
2651         /* Now when Clients are configured we are ready to work */
2652         bp->state = BNX2X_STATE_OPEN;
2653
2654         /* Configure a ucast MAC */
2655         if (IS_PF(bp))
2656                 rc = bnx2x_set_eth_mac(bp, true);
2657         else /* vf */
2658                 rc = bnx2x_vfpf_set_mac(bp);
2659         if (rc) {
2660                 BNX2X_ERR("Setting Ethernet MAC failed\n");
2661                 LOAD_ERROR_EXIT(bp, load_error3);
2662         }
2663
2664         if (IS_PF(bp) && bp->pending_max) {
2665                 bnx2x_update_max_mf_config(bp, bp->pending_max);
2666                 bp->pending_max = 0;
2667         }
2668
2669         if (bp->port.pmf) {
2670                 rc = bnx2x_initial_phy_init(bp, load_mode);
2671                 if (rc)
2672                         LOAD_ERROR_EXIT(bp, load_error3);
2673         }
2674         bp->link_params.feature_config_flags &= ~FEATURE_CONFIG_BOOT_FROM_SAN;
2675
2676         /* Start fast path */
2677
2678         /* Initialize Rx filter. */
2679         netif_addr_lock_bh(bp->dev);
2680         bnx2x_set_rx_mode(bp->dev);
2681         netif_addr_unlock_bh(bp->dev);
2682
2683         /* Start the Tx */
2684         switch (load_mode) {
2685         case LOAD_NORMAL:
2686                 /* Tx queue should be only reenabled */
2687                 netif_tx_wake_all_queues(bp->dev);
2688                 break;
2689
2690         case LOAD_OPEN:
2691                 netif_tx_start_all_queues(bp->dev);
2692                 smp_mb__after_clear_bit();
2693                 break;
2694
2695         case LOAD_DIAG:
2696         case LOAD_LOOPBACK_EXT:
2697                 bp->state = BNX2X_STATE_DIAG;
2698                 break;
2699
2700         default:
2701                 break;
2702         }
2703
2704         if (bp->port.pmf)
2705                 bnx2x_update_drv_flags(bp, 1 << DRV_FLAGS_PORT_MASK, 0);
2706         else
2707                 bnx2x__link_status_update(bp);
2708
2709         /* start the timer */
2710         mod_timer(&bp->timer, jiffies + bp->current_interval);
2711
2712         if (CNIC_ENABLED(bp))
2713                 bnx2x_load_cnic(bp);
2714
2715         if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) {
2716                 /* mark driver is loaded in shmem2 */
2717                 u32 val;
2718                 val = SHMEM2_RD(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)]);
2719                 SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)],
2720                           val | DRV_FLAGS_CAPABILITIES_LOADED_SUPPORTED |
2721                           DRV_FLAGS_CAPABILITIES_LOADED_L2);
2722         }
2723
2724         /* Wait for all pending SP commands to complete */
2725         if (IS_PF(bp) && !bnx2x_wait_sp_comp(bp, ~0x0UL)) {
2726                 BNX2X_ERR("Timeout waiting for SP elements to complete\n");
2727                 bnx2x_nic_unload(bp, UNLOAD_CLOSE, false);
2728                 return -EBUSY;
2729         }
2730
2731         /* If PMF - send ADMIN DCBX msg to MFW to initiate DCBX FSM */
2732         if (bp->port.pmf && (bp->state != BNX2X_STATE_DIAG))
2733                 bnx2x_dcbx_init(bp, false);
2734
2735         DP(NETIF_MSG_IFUP, "Ending successfully NIC load\n");
2736
2737         return 0;
2738
2739 #ifndef BNX2X_STOP_ON_ERROR
2740 load_error3:
2741         if (IS_PF(bp)) {
2742                 bnx2x_int_disable_sync(bp, 1);
2743
2744                 /* Clean queueable objects */
2745                 bnx2x_squeeze_objects(bp);
2746         }
2747
2748         /* Free SKBs, SGEs, TPA pool and driver internals */
2749         bnx2x_free_skbs(bp);
2750         for_each_rx_queue(bp, i)
2751                 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
2752
2753         /* Release IRQs */
2754         bnx2x_free_irq(bp);
2755 load_error2:
2756         if (IS_PF(bp) && !BP_NOMCP(bp)) {
2757                 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0);
2758                 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
2759         }
2760
2761         bp->port.pmf = 0;
2762 load_error1:
2763         bnx2x_napi_disable(bp);
2764         bnx2x_del_all_napi(bp);
2765
2766         /* clear pf_load status, as it was already set */
2767         if (IS_PF(bp))
2768                 bnx2x_clear_pf_load(bp);
2769 load_error0:
2770         bnx2x_free_fp_mem(bp);
2771         bnx2x_free_fw_stats_mem(bp);
2772         bnx2x_free_mem(bp);
2773
2774         return rc;
2775 #endif /* ! BNX2X_STOP_ON_ERROR */
2776 }
2777
2778 static int bnx2x_drain_tx_queues(struct bnx2x *bp)
2779 {
2780         u8 rc = 0, cos, i;
2781
2782         /* Wait until tx fastpath tasks complete */
2783         for_each_tx_queue(bp, i) {
2784                 struct bnx2x_fastpath *fp = &bp->fp[i];
2785
2786                 for_each_cos_in_tx_queue(fp, cos)
2787                         rc = bnx2x_clean_tx_queue(bp, fp->txdata_ptr[cos]);
2788                 if (rc)
2789                         return rc;
2790         }
2791         return 0;
2792 }
2793
2794 /* must be called with rtnl_lock */
2795 int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link)
2796 {
2797         int i;
2798         bool global = false;
2799
2800         DP(NETIF_MSG_IFUP, "Starting NIC unload\n");
2801
2802         /* mark driver is unloaded in shmem2 */
2803         if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) {
2804                 u32 val;
2805                 val = SHMEM2_RD(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)]);
2806                 SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)],
2807                           val & ~DRV_FLAGS_CAPABILITIES_LOADED_L2);
2808         }
2809
2810         if (IS_PF(bp) && bp->recovery_state != BNX2X_RECOVERY_DONE &&
2811             (bp->state == BNX2X_STATE_CLOSED ||
2812              bp->state == BNX2X_STATE_ERROR)) {
2813                 /* We can get here if the driver has been unloaded
2814                  * during parity error recovery and is either waiting for a
2815                  * leader to complete or for other functions to unload and
2816                  * then ifdown has been issued. In this case we want to
2817                  * unload and let other functions to complete a recovery
2818                  * process.
2819                  */
2820                 bp->recovery_state = BNX2X_RECOVERY_DONE;
2821                 bp->is_leader = 0;
2822                 bnx2x_release_leader_lock(bp);
2823                 smp_mb();
2824
2825                 DP(NETIF_MSG_IFDOWN, "Releasing a leadership...\n");
2826                 BNX2X_ERR("Can't unload in closed or error state\n");
2827                 return -EINVAL;
2828         }
2829
2830         /* Nothing to do during unload if previous bnx2x_nic_load()
2831          * have not completed succesfully - all resourses are released.
2832          *
2833          * we can get here only after unsuccessful ndo_* callback, during which
2834          * dev->IFF_UP flag is still on.
2835          */
2836         if (bp->state == BNX2X_STATE_CLOSED || bp->state == BNX2X_STATE_ERROR)
2837                 return 0;
2838
2839         /* It's important to set the bp->state to the value different from
2840          * BNX2X_STATE_OPEN and only then stop the Tx. Otherwise bnx2x_tx_int()
2841          * may restart the Tx from the NAPI context (see bnx2x_tx_int()).
2842          */
2843         bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
2844         smp_mb();
2845
2846         if (CNIC_LOADED(bp))
2847                 bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
2848
2849         /* Stop Tx */
2850         bnx2x_tx_disable(bp);
2851         netdev_reset_tc(bp->dev);
2852
2853         bp->rx_mode = BNX2X_RX_MODE_NONE;
2854
2855         del_timer_sync(&bp->timer);
2856
2857         if (IS_PF(bp)) {
2858                 /* Set ALWAYS_ALIVE bit in shmem */
2859                 bp->fw_drv_pulse_wr_seq |= DRV_PULSE_ALWAYS_ALIVE;
2860                 bnx2x_drv_pulse(bp);
2861                 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2862                 bnx2x_save_statistics(bp);
2863         }
2864
2865         /* wait till consumers catch up with producers in all queues */
2866         bnx2x_drain_tx_queues(bp);
2867
2868         /* if VF indicate to PF this function is going down (PF will delete sp
2869          * elements and clear initializations
2870          */
2871         if (IS_VF(bp))
2872                 bnx2x_vfpf_close_vf(bp);
2873         else if (unload_mode != UNLOAD_RECOVERY)
2874                 /* if this is a normal/close unload need to clean up chip*/
2875                 bnx2x_chip_cleanup(bp, unload_mode, keep_link);
2876         else {
2877                 /* Send the UNLOAD_REQUEST to the MCP */
2878                 bnx2x_send_unload_req(bp, unload_mode);
2879
2880                 /*
2881                  * Prevent transactions to host from the functions on the
2882                  * engine that doesn't reset global blocks in case of global
2883                  * attention once gloabl blocks are reset and gates are opened
2884                  * (the engine which leader will perform the recovery
2885                  * last).
2886                  */
2887                 if (!CHIP_IS_E1x(bp))
2888                         bnx2x_pf_disable(bp);
2889
2890                 /* Disable HW interrupts, NAPI */
2891                 bnx2x_netif_stop(bp, 1);
2892                 /* Delete all NAPI objects */
2893                 bnx2x_del_all_napi(bp);
2894                 if (CNIC_LOADED(bp))
2895                         bnx2x_del_all_napi_cnic(bp);
2896                 /* Release IRQs */
2897                 bnx2x_free_irq(bp);
2898
2899                 /* Report UNLOAD_DONE to MCP */
2900                 bnx2x_send_unload_done(bp, false);
2901         }
2902
2903         /*
2904          * At this stage no more interrupts will arrive so we may safly clean
2905          * the queueable objects here in case they failed to get cleaned so far.
2906          */
2907         if (IS_PF(bp))
2908                 bnx2x_squeeze_objects(bp);
2909
2910         /* There should be no more pending SP commands at this stage */
2911         bp->sp_state = 0;
2912
2913         bp->port.pmf = 0;
2914
2915         /* Free SKBs, SGEs, TPA pool and driver internals */
2916         bnx2x_free_skbs(bp);
2917         if (CNIC_LOADED(bp))
2918                 bnx2x_free_skbs_cnic(bp);
2919         for_each_rx_queue(bp, i)
2920                 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
2921
2922         bnx2x_free_fp_mem(bp);
2923         if (CNIC_LOADED(bp))
2924                 bnx2x_free_fp_mem_cnic(bp);
2925
2926         if (IS_PF(bp)) {
2927                 bnx2x_free_mem(bp);
2928                 if (CNIC_LOADED(bp))
2929                         bnx2x_free_mem_cnic(bp);
2930         }
2931         bp->state = BNX2X_STATE_CLOSED;
2932         bp->cnic_loaded = false;
2933
2934         /* Check if there are pending parity attentions. If there are - set
2935          * RECOVERY_IN_PROGRESS.
2936          */
2937         if (IS_PF(bp) && bnx2x_chk_parity_attn(bp, &global, false)) {
2938                 bnx2x_set_reset_in_progress(bp);
2939
2940                 /* Set RESET_IS_GLOBAL if needed */
2941                 if (global)
2942                         bnx2x_set_reset_global(bp);
2943         }
2944
2945
2946         /* The last driver must disable a "close the gate" if there is no
2947          * parity attention or "process kill" pending.
2948          */
2949         if (IS_PF(bp) &&
2950             !bnx2x_clear_pf_load(bp) &&
2951             bnx2x_reset_is_done(bp, BP_PATH(bp)))
2952                 bnx2x_disable_close_the_gate(bp);
2953
2954         DP(NETIF_MSG_IFUP, "Ending NIC unload\n");
2955
2956         return 0;
2957 }
2958
2959 int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
2960 {
2961         u16 pmcsr;
2962
2963         /* If there is no power capability, silently succeed */
2964         if (!bp->pm_cap) {
2965                 BNX2X_DEV_INFO("No power capability. Breaking.\n");
2966                 return 0;
2967         }
2968
2969         pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
2970
2971         switch (state) {
2972         case PCI_D0:
2973                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
2974                                       ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
2975                                        PCI_PM_CTRL_PME_STATUS));
2976
2977                 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
2978                         /* delay required during transition out of D3hot */
2979                         msleep(20);
2980                 break;
2981
2982         case PCI_D3hot:
2983                 /* If there are other clients above don't
2984                    shut down the power */
2985                 if (atomic_read(&bp->pdev->enable_cnt) != 1)
2986                         return 0;
2987                 /* Don't shut down the power for emulation and FPGA */
2988                 if (CHIP_REV_IS_SLOW(bp))
2989                         return 0;
2990
2991                 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
2992                 pmcsr |= 3;
2993
2994                 if (bp->wol)
2995                         pmcsr |= PCI_PM_CTRL_PME_ENABLE;
2996
2997                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
2998                                       pmcsr);
2999
3000                 /* No more memory access after this point until
3001                 * device is brought back to D0.
3002                 */
3003                 break;
3004
3005         default:
3006                 dev_err(&bp->pdev->dev, "Can't support state = %d\n", state);
3007                 return -EINVAL;
3008         }
3009         return 0;
3010 }
3011
3012 /*
3013  * net_device service functions
3014  */
3015 int bnx2x_poll(struct napi_struct *napi, int budget)
3016 {
3017         int work_done = 0;
3018         u8 cos;
3019         struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
3020                                                  napi);
3021         struct bnx2x *bp = fp->bp;
3022
3023         while (1) {
3024 #ifdef BNX2X_STOP_ON_ERROR
3025                 if (unlikely(bp->panic)) {
3026                         napi_complete(napi);
3027                         return 0;
3028                 }
3029 #endif
3030
3031                 for_each_cos_in_tx_queue(fp, cos)
3032                         if (bnx2x_tx_queue_has_work(fp->txdata_ptr[cos]))
3033                                 bnx2x_tx_int(bp, fp->txdata_ptr[cos]);
3034
3035                 if (bnx2x_has_rx_work(fp)) {
3036                         work_done += bnx2x_rx_int(fp, budget - work_done);
3037
3038                         /* must not complete if we consumed full budget */
3039                         if (work_done >= budget)
3040                                 break;
3041                 }
3042
3043                 /* Fall out from the NAPI loop if needed */
3044                 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
3045
3046                         /* No need to update SB for FCoE L2 ring as long as
3047                          * it's connected to the default SB and the SB
3048                          * has been updated when NAPI was scheduled.
3049                          */
3050                         if (IS_FCOE_FP(fp)) {
3051                                 napi_complete(napi);
3052                                 break;
3053                         }
3054                         bnx2x_update_fpsb_idx(fp);
3055                         /* bnx2x_has_rx_work() reads the status block,
3056                          * thus we need to ensure that status block indices
3057                          * have been actually read (bnx2x_update_fpsb_idx)
3058                          * prior to this check (bnx2x_has_rx_work) so that
3059                          * we won't write the "newer" value of the status block
3060                          * to IGU (if there was a DMA right after
3061                          * bnx2x_has_rx_work and if there is no rmb, the memory
3062                          * reading (bnx2x_update_fpsb_idx) may be postponed
3063                          * to right before bnx2x_ack_sb). In this case there
3064                          * will never be another interrupt until there is
3065                          * another update of the status block, while there
3066                          * is still unhandled work.
3067                          */
3068                         rmb();
3069
3070                         if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
3071                                 napi_complete(napi);
3072                                 /* Re-enable interrupts */
3073                                 DP(NETIF_MSG_RX_STATUS,
3074                                    "Update index to %d\n", fp->fp_hc_idx);
3075                                 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID,
3076                                              le16_to_cpu(fp->fp_hc_idx),
3077                                              IGU_INT_ENABLE, 1);
3078                                 break;
3079                         }
3080                 }
3081         }
3082
3083         return work_done;
3084 }
3085
3086 /* we split the first BD into headers and data BDs
3087  * to ease the pain of our fellow microcode engineers
3088  * we use one mapping for both BDs
3089  */
3090 static u16 bnx2x_tx_split(struct bnx2x *bp,
3091                           struct bnx2x_fp_txdata *txdata,
3092                           struct sw_tx_bd *tx_buf,
3093                           struct eth_tx_start_bd **tx_bd, u16 hlen,
3094                           u16 bd_prod)
3095 {
3096         struct eth_tx_start_bd *h_tx_bd = *tx_bd;
3097         struct eth_tx_bd *d_tx_bd;
3098         dma_addr_t mapping;
3099         int old_len = le16_to_cpu(h_tx_bd->nbytes);
3100
3101         /* first fix first BD */
3102         h_tx_bd->nbytes = cpu_to_le16(hlen);
3103
3104         DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d (%x:%x)\n",
3105            h_tx_bd->nbytes, h_tx_bd->addr_hi, h_tx_bd->addr_lo);
3106
3107         /* now get a new data BD
3108          * (after the pbd) and fill it */
3109         bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
3110         d_tx_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
3111
3112         mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
3113                            le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
3114
3115         d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
3116         d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
3117         d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
3118
3119         /* this marks the BD as one that has no individual mapping */
3120         tx_buf->flags |= BNX2X_TSO_SPLIT_BD;
3121
3122         DP(NETIF_MSG_TX_QUEUED,
3123            "TSO split data size is %d (%x:%x)\n",
3124            d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
3125
3126         /* update tx_bd */
3127         *tx_bd = (struct eth_tx_start_bd *)d_tx_bd;
3128
3129         return bd_prod;
3130 }
3131
3132 #define bswab32(b32) ((__force __le32) swab32((__force __u32) (b32)))
3133 #define bswab16(b16) ((__force __le16) swab16((__force __u16) (b16)))
3134 static __le16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
3135 {
3136         __sum16 tsum = (__force __sum16) csum;
3137
3138         if (fix > 0)
3139                 tsum = ~csum_fold(csum_sub((__force __wsum) csum,
3140                                   csum_partial(t_header - fix, fix, 0)));
3141
3142         else if (fix < 0)
3143                 tsum = ~csum_fold(csum_add((__force __wsum) csum,
3144                                   csum_partial(t_header, -fix, 0)));
3145
3146         return bswab16(tsum);
3147 }
3148
3149 static u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
3150 {
3151         u32 rc;
3152         __u8 prot = 0;
3153         __be16 protocol;
3154
3155         if (skb->ip_summed != CHECKSUM_PARTIAL)
3156                 return XMIT_PLAIN;
3157
3158         protocol = vlan_get_protocol(skb);
3159         if (protocol == htons(ETH_P_IPV6)) {
3160                 rc = XMIT_CSUM_V6;
3161                 prot = ipv6_hdr(skb)->nexthdr;
3162         } else {
3163                 rc = XMIT_CSUM_V4;
3164                 prot = ip_hdr(skb)->protocol;
3165         }
3166
3167         if (!CHIP_IS_E1x(bp) && skb->encapsulation) {
3168                 if (inner_ip_hdr(skb)->version == 6) {
3169                         rc |= XMIT_CSUM_ENC_V6;
3170                         if (inner_ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
3171                                 rc |= XMIT_CSUM_TCP;
3172                 } else {
3173                         rc |= XMIT_CSUM_ENC_V4;
3174                         if (inner_ip_hdr(skb)->protocol == IPPROTO_TCP)
3175                                 rc |= XMIT_CSUM_TCP;
3176                 }
3177         }
3178         if (prot == IPPROTO_TCP)
3179                 rc |= XMIT_CSUM_TCP;
3180
3181         if (skb_is_gso_v6(skb)) {
3182                 rc |= (XMIT_GSO_V6 | XMIT_CSUM_TCP | XMIT_CSUM_V6);
3183                 if (rc & XMIT_CSUM_ENC)
3184                         rc |= XMIT_GSO_ENC_V6;
3185         } else if (skb_is_gso(skb)) {
3186                 rc |= (XMIT_GSO_V4 | XMIT_CSUM_V4 | XMIT_CSUM_TCP);
3187                 if (rc & XMIT_CSUM_ENC)
3188                         rc |= XMIT_GSO_ENC_V4;
3189         }
3190
3191         return rc;
3192 }
3193
3194 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
3195 /* check if packet requires linearization (packet is too fragmented)
3196    no need to check fragmentation if page size > 8K (there will be no
3197    violation to FW restrictions) */
3198 static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
3199                              u32 xmit_type)
3200 {
3201         int to_copy = 0;
3202         int hlen = 0;
3203         int first_bd_sz = 0;
3204
3205         /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
3206         if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
3207
3208                 if (xmit_type & XMIT_GSO) {
3209                         unsigned short lso_mss = skb_shinfo(skb)->gso_size;
3210                         /* Check if LSO packet needs to be copied:
3211                            3 = 1 (for headers BD) + 2 (for PBD and last BD) */
3212                         int wnd_size = MAX_FETCH_BD - 3;
3213                         /* Number of windows to check */
3214                         int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
3215                         int wnd_idx = 0;
3216                         int frag_idx = 0;
3217                         u32 wnd_sum = 0;
3218
3219                         /* Headers length */
3220                         hlen = (int)(skb_transport_header(skb) - skb->data) +
3221                                 tcp_hdrlen(skb);
3222
3223                         /* Amount of data (w/o headers) on linear part of SKB*/
3224                         first_bd_sz = skb_headlen(skb) - hlen;
3225
3226                         wnd_sum  = first_bd_sz;
3227
3228                         /* Calculate the first sum - it's special */
3229                         for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
3230                                 wnd_sum +=
3231                                         skb_frag_size(&skb_shinfo(skb)->frags[frag_idx]);
3232
3233                         /* If there was data on linear skb data - check it */
3234                         if (first_bd_sz > 0) {
3235                                 if (unlikely(wnd_sum < lso_mss)) {
3236                                         to_copy = 1;
3237                                         goto exit_lbl;
3238                                 }
3239
3240                                 wnd_sum -= first_bd_sz;
3241                         }
3242
3243                         /* Others are easier: run through the frag list and
3244                            check all windows */
3245                         for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
3246                                 wnd_sum +=
3247                           skb_frag_size(&skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1]);
3248
3249                                 if (unlikely(wnd_sum < lso_mss)) {
3250                                         to_copy = 1;
3251                                         break;
3252                                 }
3253                                 wnd_sum -=
3254                                         skb_frag_size(&skb_shinfo(skb)->frags[wnd_idx]);
3255                         }
3256                 } else {
3257                         /* in non-LSO too fragmented packet should always
3258                            be linearized */
3259                         to_copy = 1;
3260                 }
3261         }
3262
3263 exit_lbl:
3264         if (unlikely(to_copy))
3265                 DP(NETIF_MSG_TX_QUEUED,
3266                    "Linearization IS REQUIRED for %s packet. num_frags %d  hlen %d  first_bd_sz %d\n",
3267                    (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
3268                    skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
3269
3270         return to_copy;
3271 }
3272 #endif
3273
3274 static void bnx2x_set_pbd_gso_e2(struct sk_buff *skb, u32 *parsing_data,
3275                                  u32 xmit_type)
3276 {
3277         struct ipv6hdr *ipv6;
3278
3279         *parsing_data |= (skb_shinfo(skb)->gso_size <<
3280                               ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT) &
3281                               ETH_TX_PARSE_BD_E2_LSO_MSS;
3282
3283         if (xmit_type & XMIT_GSO_ENC_V6)
3284                 ipv6 = inner_ipv6_hdr(skb);
3285         else if (xmit_type & XMIT_GSO_V6)
3286                 ipv6 = ipv6_hdr(skb);
3287         else
3288                 ipv6 = NULL;
3289
3290         if (ipv6 && ipv6->nexthdr == NEXTHDR_IPV6)
3291                 *parsing_data |= ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR;
3292 }
3293
3294 /**
3295  * bnx2x_set_pbd_gso - update PBD in GSO case.
3296  *
3297  * @skb:        packet skb
3298  * @pbd:        parse BD
3299  * @xmit_type:  xmit flags
3300  */
3301 static void bnx2x_set_pbd_gso(struct sk_buff *skb,
3302                               struct eth_tx_parse_bd_e1x *pbd,
3303                               u32 xmit_type)
3304 {
3305         pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
3306         pbd->tcp_send_seq = bswab32(tcp_hdr(skb)->seq);
3307         pbd->tcp_flags = pbd_tcp_flags(tcp_hdr(skb));
3308
3309         if (xmit_type & XMIT_GSO_V4) {
3310                 pbd->ip_id = bswab16(ip_hdr(skb)->id);
3311                 pbd->tcp_pseudo_csum =
3312                         bswab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
3313                                                    ip_hdr(skb)->daddr,
3314                                                    0, IPPROTO_TCP, 0));
3315
3316         } else
3317                 pbd->tcp_pseudo_csum =
3318                         bswab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
3319                                                  &ipv6_hdr(skb)->daddr,
3320                                                  0, IPPROTO_TCP, 0));
3321
3322         pbd->global_data |=
3323                 cpu_to_le16(ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN);
3324 }
3325
3326 /**
3327  * bnx2x_set_pbd_csum_enc - update PBD with checksum and return header length
3328  *
3329  * @bp:                 driver handle
3330  * @skb:                packet skb
3331  * @parsing_data:       data to be updated
3332  * @xmit_type:          xmit flags
3333  *
3334  * 57712/578xx related, when skb has encapsulation
3335  */
3336 static u8 bnx2x_set_pbd_csum_enc(struct bnx2x *bp, struct sk_buff *skb,
3337                                  u32 *parsing_data, u32 xmit_type)
3338 {
3339         *parsing_data |=
3340                 ((((u8 *)skb_inner_transport_header(skb) - skb->data) >> 1) <<
3341                 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W_SHIFT) &
3342                 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W;
3343
3344         if (xmit_type & XMIT_CSUM_TCP) {
3345                 *parsing_data |= ((inner_tcp_hdrlen(skb) / 4) <<
3346                         ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
3347                         ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW;
3348
3349                 return skb_inner_transport_header(skb) +
3350                         inner_tcp_hdrlen(skb) - skb->data;
3351         }
3352
3353         /* We support checksum offload for TCP and UDP only.
3354          * No need to pass the UDP header length - it's a constant.
3355          */
3356         return skb_inner_transport_header(skb) +
3357                 sizeof(struct udphdr) - skb->data;
3358 }
3359
3360 /**
3361  * bnx2x_set_pbd_csum_e2 - update PBD with checksum and return header length
3362  *
3363  * @bp:                 driver handle
3364  * @skb:                packet skb
3365  * @parsing_data:       data to be updated
3366  * @xmit_type:          xmit flags
3367  *
3368  * 57712/578xx related
3369  */
3370 static u8 bnx2x_set_pbd_csum_e2(struct bnx2x *bp, struct sk_buff *skb,
3371                                 u32 *parsing_data, u32 xmit_type)
3372 {
3373         *parsing_data |=
3374                 ((((u8 *)skb_transport_header(skb) - skb->data) >> 1) <<
3375                 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W_SHIFT) &
3376                 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W;
3377
3378         if (xmit_type & XMIT_CSUM_TCP) {
3379                 *parsing_data |= ((tcp_hdrlen(skb) / 4) <<
3380                         ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
3381                         ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW;
3382
3383                 return skb_transport_header(skb) + tcp_hdrlen(skb) - skb->data;
3384         }
3385         /* We support checksum offload for TCP and UDP only.
3386          * No need to pass the UDP header length - it's a constant.
3387          */
3388         return skb_transport_header(skb) + sizeof(struct udphdr) - skb->data;
3389 }
3390
3391 /* set FW indication according to inner or outer protocols if tunneled */
3392 static void bnx2x_set_sbd_csum(struct bnx2x *bp, struct sk_buff *skb,
3393                                struct eth_tx_start_bd *tx_start_bd,
3394                                u32 xmit_type)
3395 {
3396         tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
3397
3398         if (xmit_type & (XMIT_CSUM_ENC_V6 | XMIT_CSUM_V6))
3399                 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IPV6;
3400
3401         if (!(xmit_type & XMIT_CSUM_TCP))
3402                 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IS_UDP;
3403 }
3404
3405 /**
3406  * bnx2x_set_pbd_csum - update PBD with checksum and return header length
3407  *
3408  * @bp:         driver handle
3409  * @skb:        packet skb
3410  * @pbd:        parse BD to be updated
3411  * @xmit_type:  xmit flags
3412  */
3413 static u8 bnx2x_set_pbd_csum(struct bnx2x *bp, struct sk_buff *skb,
3414                              struct eth_tx_parse_bd_e1x *pbd,
3415                              u32 xmit_type)
3416 {
3417         u8 hlen = (skb_network_header(skb) - skb->data) >> 1;
3418
3419         /* for now NS flag is not used in Linux */
3420         pbd->global_data =
3421                 cpu_to_le16(hlen |
3422                             ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
3423                              ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT));
3424
3425         pbd->ip_hlen_w = (skb_transport_header(skb) -
3426                         skb_network_header(skb)) >> 1;
3427
3428         hlen += pbd->ip_hlen_w;
3429
3430         /* We support checksum offload for TCP and UDP only */
3431         if (xmit_type & XMIT_CSUM_TCP)
3432                 hlen += tcp_hdrlen(skb) / 2;
3433         else
3434                 hlen += sizeof(struct udphdr) / 2;
3435
3436         pbd->total_hlen_w = cpu_to_le16(hlen);
3437         hlen = hlen*2;
3438
3439         if (xmit_type & XMIT_CSUM_TCP) {
3440                 pbd->tcp_pseudo_csum = bswab16(tcp_hdr(skb)->check);
3441
3442         } else {
3443                 s8 fix = SKB_CS_OFF(skb); /* signed! */
3444
3445                 DP(NETIF_MSG_TX_QUEUED,
3446                    "hlen %d  fix %d  csum before fix %x\n",
3447                    le16_to_cpu(pbd->total_hlen_w), fix, SKB_CS(skb));
3448
3449                 /* HW bug: fixup the CSUM */
3450                 pbd->tcp_pseudo_csum =
3451                         bnx2x_csum_fix(skb_transport_header(skb),
3452                                        SKB_CS(skb), fix);
3453
3454                 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
3455                    pbd->tcp_pseudo_csum);
3456         }
3457
3458         return hlen;
3459 }
3460
3461 static void bnx2x_update_pbds_gso_enc(struct sk_buff *skb,
3462                                       struct eth_tx_parse_bd_e2 *pbd_e2,
3463                                       struct eth_tx_parse_2nd_bd *pbd2,
3464                                       u16 *global_data,
3465                                       u32 xmit_type)
3466 {
3467         u16 inner_hlen_w = 0;
3468         u8 outerip_off, outerip_len = 0;
3469
3470         /* IP len */
3471         inner_hlen_w = (skb_inner_transport_header(skb) -
3472                         skb_inner_network_header(skb)) >> 1;
3473
3474         /* transport len */
3475         if (xmit_type & XMIT_CSUM_TCP)
3476                 inner_hlen_w += inner_tcp_hdrlen(skb) >> 1;
3477         else
3478                 inner_hlen_w += sizeof(struct udphdr) >> 1;
3479
3480         pbd2->fw_ip_hdr_to_payload_w = inner_hlen_w;
3481
3482         if (xmit_type & XMIT_CSUM_ENC_V4) {
3483                 struct iphdr *iph = inner_ip_hdr(skb);
3484
3485                 pbd2->fw_ip_csum_wo_len_flags_frag =
3486                         bswab16(csum_fold((~iph->check) -
3487                                           iph->tot_len - iph->frag_off));
3488         } else {
3489                 pbd2->fw_ip_hdr_to_payload_w =
3490                         inner_hlen_w - ((sizeof(struct ipv6hdr)) >> 1);
3491         }
3492
3493         pbd2->tcp_send_seq = bswab32(inner_tcp_hdr(skb)->seq);
3494
3495         pbd2->tcp_flags = pbd_tcp_flags(inner_tcp_hdr(skb));
3496
3497         if (xmit_type & XMIT_GSO_V4) {
3498                 pbd2->hw_ip_id = bswab16(ip_hdr(skb)->id);
3499
3500                 pbd_e2->data.tunnel_data.pseudo_csum =
3501                         bswab16(~csum_tcpudp_magic(
3502                                         inner_ip_hdr(skb)->saddr,
3503                                         inner_ip_hdr(skb)->daddr,
3504                                         0, IPPROTO_TCP, 0));
3505
3506                 outerip_len = ip_hdr(skb)->ihl << 1;
3507         } else {
3508                 pbd_e2->data.tunnel_data.pseudo_csum =
3509                         bswab16(~csum_ipv6_magic(
3510                                         &inner_ipv6_hdr(skb)->saddr,
3511                                         &inner_ipv6_hdr(skb)->daddr,
3512                                         0, IPPROTO_TCP, 0));
3513         }
3514
3515         outerip_off = (skb_network_header(skb) - skb->data) >> 1;
3516
3517         *global_data |=
3518                 outerip_off |
3519                 (!!(xmit_type & XMIT_CSUM_V6) <<
3520                         ETH_TX_PARSE_2ND_BD_IP_HDR_TYPE_OUTER_SHIFT) |
3521                 (outerip_len <<
3522                         ETH_TX_PARSE_2ND_BD_IP_HDR_LEN_OUTER_W_SHIFT) |
3523                 ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
3524                         ETH_TX_PARSE_2ND_BD_LLC_SNAP_EN_SHIFT);
3525 }
3526
3527 /* called with netif_tx_lock
3528  * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
3529  * netif_wake_queue()
3530  */
3531 netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
3532 {
3533         struct bnx2x *bp = netdev_priv(dev);
3534
3535         struct netdev_queue *txq;
3536         struct bnx2x_fp_txdata *txdata;
3537         struct sw_tx_bd *tx_buf;
3538         struct eth_tx_start_bd *tx_start_bd, *first_bd;
3539         struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
3540         struct eth_tx_parse_bd_e1x *pbd_e1x = NULL;
3541         struct eth_tx_parse_bd_e2 *pbd_e2 = NULL;
3542         struct eth_tx_parse_2nd_bd *pbd2 = NULL;
3543         u32 pbd_e2_parsing_data = 0;
3544         u16 pkt_prod, bd_prod;
3545         int nbd, txq_index;
3546         dma_addr_t mapping;
3547         u32 xmit_type = bnx2x_xmit_type(bp, skb);
3548         int i;
3549         u8 hlen = 0;
3550         __le16 pkt_size = 0;
3551         struct ethhdr *eth;
3552         u8 mac_type = UNICAST_ADDRESS;
3553
3554 #ifdef BNX2X_STOP_ON_ERROR
3555         if (unlikely(bp->panic))
3556                 return NETDEV_TX_BUSY;
3557 #endif
3558
3559         txq_index = skb_get_queue_mapping(skb);
3560         txq = netdev_get_tx_queue(dev, txq_index);
3561
3562         BUG_ON(txq_index >= MAX_ETH_TXQ_IDX(bp) + (CNIC_LOADED(bp) ? 1 : 0));
3563
3564         txdata = &bp->bnx2x_txq[txq_index];
3565
3566         /* enable this debug print to view the transmission queue being used
3567         DP(NETIF_MSG_TX_QUEUED, "indices: txq %d, fp %d, txdata %d\n",
3568            txq_index, fp_index, txdata_index); */
3569
3570         /* enable this debug print to view the tranmission details
3571         DP(NETIF_MSG_TX_QUEUED,
3572            "transmitting packet cid %d fp index %d txdata_index %d tx_data ptr %p fp pointer %p\n",
3573            txdata->cid, fp_index, txdata_index, txdata, fp); */
3574
3575         if (unlikely(bnx2x_tx_avail(bp, txdata) <
3576                         skb_shinfo(skb)->nr_frags +
3577                         BDS_PER_TX_PKT +
3578                         NEXT_CNT_PER_TX_PKT(MAX_BDS_PER_TX_PKT))) {
3579                 /* Handle special storage cases separately */
3580                 if (txdata->tx_ring_size == 0) {
3581                         struct bnx2x_eth_q_stats *q_stats =
3582                                 bnx2x_fp_qstats(bp, txdata->parent_fp);
3583                         q_stats->driver_filtered_tx_pkt++;
3584                         dev_kfree_skb(skb);
3585                         return NETDEV_TX_OK;
3586                 }
3587                 bnx2x_fp_qstats(bp, txdata->parent_fp)->driver_xoff++;
3588                 netif_tx_stop_queue(txq);
3589                 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
3590
3591                 return NETDEV_TX_BUSY;
3592         }
3593
3594         DP(NETIF_MSG_TX_QUEUED,
3595            "queue[%d]: SKB: summed %x  protocol %x protocol(%x,%x) gso type %x  xmit_type %x len %d\n",
3596            txq_index, skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
3597            ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type,
3598            skb->len);
3599
3600         eth = (struct ethhdr *)skb->data;
3601
3602         /* set flag according to packet type (UNICAST_ADDRESS is default)*/
3603         if (unlikely(is_multicast_ether_addr(eth->h_dest))) {
3604                 if (is_broadcast_ether_addr(eth->h_dest))
3605                         mac_type = BROADCAST_ADDRESS;
3606                 else
3607                         mac_type = MULTICAST_ADDRESS;
3608         }
3609
3610 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - BDS_PER_TX_PKT)
3611         /* First, check if we need to linearize the skb (due to FW
3612            restrictions). No need to check fragmentation if page size > 8K
3613            (there will be no violation to FW restrictions) */
3614         if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
3615                 /* Statistics of linearization */
3616                 bp->lin_cnt++;
3617                 if (skb_linearize(skb) != 0) {
3618                         DP(NETIF_MSG_TX_QUEUED,
3619                            "SKB linearization failed - silently dropping this SKB\n");
3620                         dev_kfree_skb_any(skb);
3621                         return NETDEV_TX_OK;
3622                 }
3623         }
3624 #endif
3625         /* Map skb linear data for DMA */
3626         mapping = dma_map_single(&bp->pdev->dev, skb->data,
3627                                  skb_headlen(skb), DMA_TO_DEVICE);
3628         if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
3629                 DP(NETIF_MSG_TX_QUEUED,
3630                    "SKB mapping failed - silently dropping this SKB\n");
3631                 dev_kfree_skb_any(skb);
3632                 return NETDEV_TX_OK;
3633         }
3634         /*
3635         Please read carefully. First we use one BD which we mark as start,
3636         then we have a parsing info BD (used for TSO or xsum),
3637         and only then we have the rest of the TSO BDs.
3638         (don't forget to mark the last one as last,
3639         and to unmap only AFTER you write to the BD ...)
3640         And above all, all pdb sizes are in words - NOT DWORDS!
3641         */
3642
3643         /* get current pkt produced now - advance it just before sending packet
3644          * since mapping of pages may fail and cause packet to be dropped
3645          */
3646         pkt_prod = txdata->tx_pkt_prod;
3647         bd_prod = TX_BD(txdata->tx_bd_prod);
3648
3649         /* get a tx_buf and first BD
3650          * tx_start_bd may be changed during SPLIT,
3651          * but first_bd will always stay first
3652          */
3653         tx_buf = &txdata->tx_buf_ring[TX_BD(pkt_prod)];
3654         tx_start_bd = &txdata->tx_desc_ring[bd_prod].start_bd;
3655         first_bd = tx_start_bd;
3656
3657         tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
3658
3659         /* header nbd: indirectly zero other flags! */
3660         tx_start_bd->general_data = 1 << ETH_TX_START_BD_HDR_NBDS_SHIFT;
3661
3662         /* remember the first BD of the packet */
3663         tx_buf->first_bd = txdata->tx_bd_prod;
3664         tx_buf->skb = skb;
3665         tx_buf->flags = 0;
3666
3667         DP(NETIF_MSG_TX_QUEUED,
3668            "sending pkt %u @%p  next_idx %u  bd %u @%p\n",
3669            pkt_prod, tx_buf, txdata->tx_pkt_prod, bd_prod, tx_start_bd);
3670
3671         if (vlan_tx_tag_present(skb)) {
3672                 tx_start_bd->vlan_or_ethertype =
3673                     cpu_to_le16(vlan_tx_tag_get(skb));
3674                 tx_start_bd->bd_flags.as_bitfield |=
3675                     (X_ETH_OUTBAND_VLAN << ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT);
3676         } else {
3677                 /* when transmitting in a vf, start bd must hold the ethertype
3678                  * for fw to enforce it
3679                  */
3680                 if (IS_VF(bp))
3681                         tx_start_bd->vlan_or_ethertype =
3682                                 cpu_to_le16(ntohs(eth->h_proto));
3683                 else
3684                         /* used by FW for packet accounting */
3685                         tx_start_bd->vlan_or_ethertype = cpu_to_le16(pkt_prod);
3686         }
3687
3688         nbd = 2; /* start_bd + pbd + frags (updated when pages are mapped) */
3689
3690         /* turn on parsing and get a BD */
3691         bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
3692
3693         if (xmit_type & XMIT_CSUM)
3694                 bnx2x_set_sbd_csum(bp, skb, tx_start_bd, xmit_type);
3695
3696         if (!CHIP_IS_E1x(bp)) {
3697                 pbd_e2 = &txdata->tx_desc_ring[bd_prod].parse_bd_e2;
3698                 memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2));
3699
3700                 if (xmit_type & XMIT_CSUM_ENC) {
3701                         u16 global_data = 0;
3702
3703                         /* Set PBD in enc checksum offload case */
3704                         hlen = bnx2x_set_pbd_csum_enc(bp, skb,
3705                                                       &pbd_e2_parsing_data,
3706                                                       xmit_type);
3707
3708                         /* turn on 2nd parsing and get a BD */
3709                         bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
3710
3711                         pbd2 = &txdata->tx_desc_ring[bd_prod].parse_2nd_bd;
3712
3713                         memset(pbd2, 0, sizeof(*pbd2));
3714
3715                         pbd_e2->data.tunnel_data.ip_hdr_start_inner_w =
3716                                 (skb_inner_network_header(skb) -
3717                                  skb->data) >> 1;
3718
3719                         if (xmit_type & XMIT_GSO_ENC)
3720                                 bnx2x_update_pbds_gso_enc(skb, pbd_e2, pbd2,
3721                                                           &global_data,
3722                                                           xmit_type);
3723
3724                         pbd2->global_data = cpu_to_le16(global_data);
3725
3726                         /* add addition parse BD indication to start BD */
3727                         SET_FLAG(tx_start_bd->general_data,
3728                                  ETH_TX_START_BD_PARSE_NBDS, 1);
3729                         /* set encapsulation flag in start BD */
3730                         SET_FLAG(tx_start_bd->general_data,
3731                                  ETH_TX_START_BD_TUNNEL_EXIST, 1);
3732                         nbd++;
3733                 } else if (xmit_type & XMIT_CSUM) {
3734                         /* Set PBD in checksum offload case w/o encapsulation */
3735                         hlen = bnx2x_set_pbd_csum_e2(bp, skb,
3736                                                      &pbd_e2_parsing_data,
3737                                                      xmit_type);
3738                 }
3739
3740                 /* Add the macs to the parsing BD this is a vf */
3741                 if (IS_VF(bp)) {
3742                         /* override GRE parameters in BD */
3743                         bnx2x_set_fw_mac_addr(&pbd_e2->data.mac_addr.src_hi,
3744                                               &pbd_e2->data.mac_addr.src_mid,
3745                                               &pbd_e2->data.mac_addr.src_lo,
3746                                               eth->h_source);
3747
3748                         bnx2x_set_fw_mac_addr(&pbd_e2->data.mac_addr.dst_hi,
3749                                               &pbd_e2->data.mac_addr.dst_mid,
3750                                               &pbd_e2->data.mac_addr.dst_lo,
3751                                               eth->h_dest);
3752                 }
3753
3754                 SET_FLAG(pbd_e2_parsing_data,
3755                          ETH_TX_PARSE_BD_E2_ETH_ADDR_TYPE, mac_type);
3756         } else {
3757                 u16 global_data = 0;
3758                 pbd_e1x = &txdata->tx_desc_ring[bd_prod].parse_bd_e1x;
3759                 memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x));
3760                 /* Set PBD in checksum offload case */
3761                 if (xmit_type & XMIT_CSUM)
3762                         hlen = bnx2x_set_pbd_csum(bp, skb, pbd_e1x, xmit_type);
3763
3764                 SET_FLAG(global_data,
3765                          ETH_TX_PARSE_BD_E1X_ETH_ADDR_TYPE, mac_type);
3766                 pbd_e1x->global_data |= cpu_to_le16(global_data);
3767         }
3768
3769         /* Setup the data pointer of the first BD of the packet */
3770         tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
3771         tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
3772         tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
3773         pkt_size = tx_start_bd->nbytes;
3774
3775         DP(NETIF_MSG_TX_QUEUED,
3776            "first bd @%p  addr (%x:%x)  nbytes %d  flags %x  vlan %x\n",
3777            tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
3778            le16_to_cpu(tx_start_bd->nbytes),
3779            tx_start_bd->bd_flags.as_bitfield,
3780            le16_to_cpu(tx_start_bd->vlan_or_ethertype));
3781
3782         if (xmit_type & XMIT_GSO) {
3783
3784                 DP(NETIF_MSG_TX_QUEUED,
3785                    "TSO packet len %d  hlen %d  total len %d  tso size %d\n",
3786                    skb->len, hlen, skb_headlen(skb),
3787                    skb_shinfo(skb)->gso_size);
3788
3789                 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
3790
3791                 if (unlikely(skb_headlen(skb) > hlen)) {
3792                         nbd++;
3793                         bd_prod = bnx2x_tx_split(bp, txdata, tx_buf,
3794                                                  &tx_start_bd, hlen,
3795                                                  bd_prod);
3796                 }
3797                 if (!CHIP_IS_E1x(bp))
3798                         bnx2x_set_pbd_gso_e2(skb, &pbd_e2_parsing_data,
3799                                              xmit_type);
3800                 else
3801                         bnx2x_set_pbd_gso(skb, pbd_e1x, xmit_type);
3802         }
3803
3804         /* Set the PBD's parsing_data field if not zero
3805          * (for the chips newer than 57711).
3806          */
3807         if (pbd_e2_parsing_data)
3808                 pbd_e2->parsing_data = cpu_to_le32(pbd_e2_parsing_data);
3809
3810         tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
3811
3812         /* Handle fragmented skb */
3813         for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
3814                 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3815
3816                 mapping = skb_frag_dma_map(&bp->pdev->dev, frag, 0,
3817                                            skb_frag_size(frag), DMA_TO_DEVICE);
3818                 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
3819                         unsigned int pkts_compl = 0, bytes_compl = 0;
3820
3821                         DP(NETIF_MSG_TX_QUEUED,
3822                            "Unable to map page - dropping packet...\n");
3823
3824                         /* we need unmap all buffers already mapped
3825                          * for this SKB;
3826                          * first_bd->nbd need to be properly updated
3827                          * before call to bnx2x_free_tx_pkt
3828                          */
3829                         first_bd->nbd = cpu_to_le16(nbd);
3830                         bnx2x_free_tx_pkt(bp, txdata,
3831                                           TX_BD(txdata->tx_pkt_prod),
3832                                           &pkts_compl, &bytes_compl);
3833                         return NETDEV_TX_OK;
3834                 }
3835
3836                 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
3837                 tx_data_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
3838                 if (total_pkt_bd == NULL)
3839                         total_pkt_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
3840
3841                 tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
3842                 tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
3843                 tx_data_bd->nbytes = cpu_to_le16(skb_frag_size(frag));
3844                 le16_add_cpu(&pkt_size, skb_frag_size(frag));
3845                 nbd++;
3846
3847                 DP(NETIF_MSG_TX_QUEUED,
3848                    "frag %d  bd @%p  addr (%x:%x)  nbytes %d\n",
3849                    i, tx_data_bd, tx_data_bd->addr_hi, tx_data_bd->addr_lo,
3850                    le16_to_cpu(tx_data_bd->nbytes));
3851         }
3852
3853         DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd);
3854
3855         /* update with actual num BDs */
3856         first_bd->nbd = cpu_to_le16(nbd);
3857
3858         bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
3859
3860         /* now send a tx doorbell, counting the next BD
3861          * if the packet contains or ends with it
3862          */
3863         if (TX_BD_POFF(bd_prod) < nbd)
3864                 nbd++;
3865
3866         /* total_pkt_bytes should be set on the first data BD if
3867          * it's not an LSO packet and there is more than one
3868          * data BD. In this case pkt_size is limited by an MTU value.
3869          * However we prefer to set it for an LSO packet (while we don't
3870          * have to) in order to save some CPU cycles in a none-LSO
3871          * case, when we much more care about them.
3872          */
3873         if (total_pkt_bd != NULL)
3874                 total_pkt_bd->total_pkt_bytes = pkt_size;
3875
3876         if (pbd_e1x)
3877                 DP(NETIF_MSG_TX_QUEUED,
3878                    "PBD (E1X) @%p  ip_data %x  ip_hlen %u  ip_id %u  lso_mss %u  tcp_flags %x  xsum %x  seq %u  hlen %u\n",
3879                    pbd_e1x, pbd_e1x->global_data, pbd_e1x->ip_hlen_w,
3880                    pbd_e1x->ip_id, pbd_e1x->lso_mss, pbd_e1x->tcp_flags,
3881                    pbd_e1x->tcp_pseudo_csum, pbd_e1x->tcp_send_seq,
3882                     le16_to_cpu(pbd_e1x->total_hlen_w));
3883         if (pbd_e2)
3884                 DP(NETIF_MSG_TX_QUEUED,
3885                    "PBD (E2) @%p  dst %x %x %x src %x %x %x parsing_data %x\n",
3886                    pbd_e2,
3887                    pbd_e2->data.mac_addr.dst_hi,
3888                    pbd_e2->data.mac_addr.dst_mid,
3889                    pbd_e2->data.mac_addr.dst_lo,
3890                    pbd_e2->data.mac_addr.src_hi,
3891                    pbd_e2->data.mac_addr.src_mid,
3892                    pbd_e2->data.mac_addr.src_lo,
3893                    pbd_e2->parsing_data);
3894         DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d  bd %u\n", nbd, bd_prod);
3895
3896         netdev_tx_sent_queue(txq, skb->len);
3897
3898         skb_tx_timestamp(skb);
3899
3900         txdata->tx_pkt_prod++;
3901         /*
3902          * Make sure that the BD data is updated before updating the producer
3903          * since FW might read the BD right after the producer is updated.
3904          * This is only applicable for weak-ordered memory model archs such
3905          * as IA-64. The following barrier is also mandatory since FW will
3906          * assumes packets must have BDs.
3907          */
3908         wmb();
3909
3910         txdata->tx_db.data.prod += nbd;
3911         barrier();
3912
3913         DOORBELL(bp, txdata->cid, txdata->tx_db.raw);
3914
3915         mmiowb();
3916
3917         txdata->tx_bd_prod += nbd;
3918
3919         if (unlikely(bnx2x_tx_avail(bp, txdata) < MAX_DESC_PER_TX_PKT)) {
3920                 netif_tx_stop_queue(txq);
3921
3922                 /* paired memory barrier is in bnx2x_tx_int(), we have to keep
3923                  * ordering of set_bit() in netif_tx_stop_queue() and read of
3924                  * fp->bd_tx_cons */
3925                 smp_mb();
3926
3927                 bnx2x_fp_qstats(bp, txdata->parent_fp)->driver_xoff++;
3928                 if (bnx2x_tx_avail(bp, txdata) >= MAX_DESC_PER_TX_PKT)
3929                         netif_tx_wake_queue(txq);
3930         }
3931         txdata->tx_pkt++;
3932
3933         return NETDEV_TX_OK;
3934 }
3935
3936 /**
3937  * bnx2x_setup_tc - routine to configure net_device for multi tc
3938  *
3939  * @netdev: net device to configure
3940  * @tc: number of traffic classes to enable
3941  *
3942  * callback connected to the ndo_setup_tc function pointer
3943  */
3944 int bnx2x_setup_tc(struct net_device *dev, u8 num_tc)
3945 {
3946         int cos, prio, count, offset;
3947         struct bnx2x *bp = netdev_priv(dev);
3948
3949         /* setup tc must be called under rtnl lock */
3950         ASSERT_RTNL();
3951
3952         /* no traffic classes requested. aborting */
3953         if (!num_tc) {
3954                 netdev_reset_tc(dev);
3955                 return 0;
3956         }
3957
3958         /* requested to support too many traffic classes */
3959         if (num_tc > bp->max_cos) {
3960                 BNX2X_ERR("support for too many traffic classes requested: %d. max supported is %d\n",
3961                           num_tc, bp->max_cos);
3962                 return -EINVAL;
3963         }
3964
3965         /* declare amount of supported traffic classes */
3966         if (netdev_set_num_tc(dev, num_tc)) {
3967                 BNX2X_ERR("failed to declare %d traffic classes\n", num_tc);
3968                 return -EINVAL;
3969         }
3970
3971         /* configure priority to traffic class mapping */
3972         for (prio = 0; prio < BNX2X_MAX_PRIORITY; prio++) {
3973                 netdev_set_prio_tc_map(dev, prio, bp->prio_to_cos[prio]);
3974                 DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
3975                    "mapping priority %d to tc %d\n",
3976                    prio, bp->prio_to_cos[prio]);
3977         }
3978
3979
3980         /* Use this configuration to diffrentiate tc0 from other COSes
3981            This can be used for ets or pfc, and save the effort of setting
3982            up a multio class queue disc or negotiating DCBX with a switch
3983         netdev_set_prio_tc_map(dev, 0, 0);
3984         DP(BNX2X_MSG_SP, "mapping priority %d to tc %d\n", 0, 0);
3985         for (prio = 1; prio < 16; prio++) {
3986                 netdev_set_prio_tc_map(dev, prio, 1);
3987                 DP(BNX2X_MSG_SP, "mapping priority %d to tc %d\n", prio, 1);
3988         } */
3989
3990         /* configure traffic class to transmission queue mapping */
3991         for (cos = 0; cos < bp->max_cos; cos++) {
3992                 count = BNX2X_NUM_ETH_QUEUES(bp);
3993                 offset = cos * BNX2X_NUM_NON_CNIC_QUEUES(bp);
3994                 netdev_set_tc_queue(dev, cos, count, offset);
3995                 DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
3996                    "mapping tc %d to offset %d count %d\n",
3997                    cos, offset, count);
3998         }
3999
4000         return 0;
4001 }
4002
4003 /* called with rtnl_lock */
4004 int bnx2x_change_mac_addr(struct net_device *dev, void *p)
4005 {
4006         struct sockaddr *addr = p;
4007         struct bnx2x *bp = netdev_priv(dev);
4008         int rc = 0;
4009
4010         if (!bnx2x_is_valid_ether_addr(bp, addr->sa_data)) {
4011                 BNX2X_ERR("Requested MAC address is not valid\n");
4012                 return -EINVAL;
4013         }
4014
4015         if ((IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp)) &&
4016             !is_zero_ether_addr(addr->sa_data)) {
4017                 BNX2X_ERR("Can't configure non-zero address on iSCSI or FCoE functions in MF-SD mode\n");
4018                 return -EINVAL;
4019         }
4020
4021         if (netif_running(dev))  {
4022                 rc = bnx2x_set_eth_mac(bp, false);
4023                 if (rc)
4024                         return rc;
4025         }
4026
4027         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
4028
4029         if (netif_running(dev))
4030                 rc = bnx2x_set_eth_mac(bp, true);
4031
4032         return rc;
4033 }
4034
4035 static void bnx2x_free_fp_mem_at(struct bnx2x *bp, int fp_index)
4036 {
4037         union host_hc_status_block *sb = &bnx2x_fp(bp, fp_index, status_blk);
4038         struct bnx2x_fastpath *fp = &bp->fp[fp_index];
4039         u8 cos;
4040
4041         /* Common */
4042
4043         if (IS_FCOE_IDX(fp_index)) {
4044                 memset(sb, 0, sizeof(union host_hc_status_block));
4045                 fp->status_blk_mapping = 0;
4046         } else {
4047                 /* status blocks */
4048                 if (!CHIP_IS_E1x(bp))
4049                         BNX2X_PCI_FREE(sb->e2_sb,
4050                                        bnx2x_fp(bp, fp_index,
4051                                                 status_blk_mapping),
4052                                        sizeof(struct host_hc_status_block_e2));
4053                 else
4054                         BNX2X_PCI_FREE(sb->e1x_sb,
4055                                        bnx2x_fp(bp, fp_index,
4056                                                 status_blk_mapping),
4057                                        sizeof(struct host_hc_status_block_e1x));
4058         }
4059
4060         /* Rx */
4061         if (!skip_rx_queue(bp, fp_index)) {
4062                 bnx2x_free_rx_bds(fp);
4063
4064                 /* fastpath rx rings: rx_buf rx_desc rx_comp */
4065                 BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_buf_ring));
4066                 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_desc_ring),
4067                                bnx2x_fp(bp, fp_index, rx_desc_mapping),
4068                                sizeof(struct eth_rx_bd) * NUM_RX_BD);
4069
4070                 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_comp_ring),
4071                                bnx2x_fp(bp, fp_index, rx_comp_mapping),
4072                                sizeof(struct eth_fast_path_rx_cqe) *
4073                                NUM_RCQ_BD);
4074
4075                 /* SGE ring */
4076                 BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_page_ring));
4077                 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_sge_ring),
4078                                bnx2x_fp(bp, fp_index, rx_sge_mapping),
4079                                BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
4080         }
4081
4082         /* Tx */
4083         if (!skip_tx_queue(bp, fp_index)) {
4084                 /* fastpath tx rings: tx_buf tx_desc */
4085                 for_each_cos_in_tx_queue(fp, cos) {
4086                         struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
4087
4088                         DP(NETIF_MSG_IFDOWN,
4089                            "freeing tx memory of fp %d cos %d cid %d\n",
4090                            fp_index, cos, txdata->cid);
4091
4092                         BNX2X_FREE(txdata->tx_buf_ring);
4093                         BNX2X_PCI_FREE(txdata->tx_desc_ring,
4094                                 txdata->tx_desc_mapping,
4095                                 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
4096                 }
4097         }
4098         /* end of fastpath */
4099 }
4100
4101 void bnx2x_free_fp_mem_cnic(struct bnx2x *bp)
4102 {
4103         int i;
4104         for_each_cnic_queue(bp, i)
4105                 bnx2x_free_fp_mem_at(bp, i);
4106 }
4107
4108 void bnx2x_free_fp_mem(struct bnx2x *bp)
4109 {
4110         int i;
4111         for_each_eth_queue(bp, i)
4112                 bnx2x_free_fp_mem_at(bp, i);
4113 }
4114
4115 static void set_sb_shortcuts(struct bnx2x *bp, int index)
4116 {
4117         union host_hc_status_block status_blk = bnx2x_fp(bp, index, status_blk);
4118         if (!CHIP_IS_E1x(bp)) {
4119                 bnx2x_fp(bp, index, sb_index_values) =
4120                         (__le16 *)status_blk.e2_sb->sb.index_values;
4121                 bnx2x_fp(bp, index, sb_running_index) =
4122                         (__le16 *)status_blk.e2_sb->sb.running_index;
4123         } else {
4124                 bnx2x_fp(bp, index, sb_index_values) =
4125                         (__le16 *)status_blk.e1x_sb->sb.index_values;
4126                 bnx2x_fp(bp, index, sb_running_index) =
4127                         (__le16 *)status_blk.e1x_sb->sb.running_index;
4128         }
4129 }
4130
4131 /* Returns the number of actually allocated BDs */
4132 static int bnx2x_alloc_rx_bds(struct bnx2x_fastpath *fp,
4133                               int rx_ring_size)
4134 {
4135         struct bnx2x *bp = fp->bp;
4136         u16 ring_prod, cqe_ring_prod;
4137         int i, failure_cnt = 0;
4138
4139         fp->rx_comp_cons = 0;
4140         cqe_ring_prod = ring_prod = 0;
4141
4142         /* This routine is called only during fo init so
4143          * fp->eth_q_stats.rx_skb_alloc_failed = 0
4144          */
4145         for (i = 0; i < rx_ring_size; i++) {
4146                 if (bnx2x_alloc_rx_data(bp, fp, ring_prod) < 0) {
4147                         failure_cnt++;
4148                         continue;
4149                 }
4150                 ring_prod = NEXT_RX_IDX(ring_prod);
4151                 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
4152                 WARN_ON(ring_prod <= (i - failure_cnt));
4153         }
4154
4155         if (failure_cnt)
4156                 BNX2X_ERR("was only able to allocate %d rx skbs on queue[%d]\n",
4157                           i - failure_cnt, fp->index);
4158
4159         fp->rx_bd_prod = ring_prod;
4160         /* Limit the CQE producer by the CQE ring size */
4161         fp->rx_comp_prod = min_t(u16, NUM_RCQ_RINGS*RCQ_DESC_CNT,
4162                                cqe_ring_prod);
4163         fp->rx_pkt = fp->rx_calls = 0;
4164
4165         bnx2x_fp_stats(bp, fp)->eth_q_stats.rx_skb_alloc_failed += failure_cnt;
4166
4167         return i - failure_cnt;
4168 }
4169
4170 static void bnx2x_set_next_page_rx_cq(struct bnx2x_fastpath *fp)
4171 {
4172         int i;
4173
4174         for (i = 1; i <= NUM_RCQ_RINGS; i++) {
4175                 struct eth_rx_cqe_next_page *nextpg;
4176
4177                 nextpg = (struct eth_rx_cqe_next_page *)
4178                         &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
4179                 nextpg->addr_hi =
4180                         cpu_to_le32(U64_HI(fp->rx_comp_mapping +
4181                                    BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4182                 nextpg->addr_lo =
4183                         cpu_to_le32(U64_LO(fp->rx_comp_mapping +
4184                                    BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4185         }
4186 }
4187
4188 static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index)
4189 {
4190         union host_hc_status_block *sb;
4191         struct bnx2x_fastpath *fp = &bp->fp[index];
4192         int ring_size = 0;
4193         u8 cos;
4194         int rx_ring_size = 0;
4195
4196         if (!bp->rx_ring_size &&
4197             (IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp))) {
4198                 rx_ring_size = MIN_RX_SIZE_NONTPA;
4199                 bp->rx_ring_size = rx_ring_size;
4200         } else if (!bp->rx_ring_size) {
4201                 rx_ring_size = MAX_RX_AVAIL/BNX2X_NUM_RX_QUEUES(bp);
4202
4203                 if (CHIP_IS_E3(bp)) {
4204                         u32 cfg = SHMEM_RD(bp,
4205                                            dev_info.port_hw_config[BP_PORT(bp)].
4206                                            default_cfg);
4207
4208                         /* Decrease ring size for 1G functions */
4209                         if ((cfg & PORT_HW_CFG_NET_SERDES_IF_MASK) ==
4210                             PORT_HW_CFG_NET_SERDES_IF_SGMII)
4211                                 rx_ring_size /= 10;
4212                 }
4213
4214                 /* allocate at least number of buffers required by FW */
4215                 rx_ring_size = max_t(int, bp->disable_tpa ? MIN_RX_SIZE_NONTPA :
4216                                      MIN_RX_SIZE_TPA, rx_ring_size);
4217
4218                 bp->rx_ring_size = rx_ring_size;
4219         } else /* if rx_ring_size specified - use it */
4220                 rx_ring_size = bp->rx_ring_size;
4221
4222         DP(BNX2X_MSG_SP, "calculated rx_ring_size %d\n", rx_ring_size);
4223
4224         /* Common */
4225         sb = &bnx2x_fp(bp, index, status_blk);
4226
4227         if (!IS_FCOE_IDX(index)) {
4228                 /* status blocks */
4229                 if (!CHIP_IS_E1x(bp))
4230                         BNX2X_PCI_ALLOC(sb->e2_sb,
4231                                 &bnx2x_fp(bp, index, status_blk_mapping),
4232                                 sizeof(struct host_hc_status_block_e2));
4233                 else
4234                         BNX2X_PCI_ALLOC(sb->e1x_sb,
4235                                 &bnx2x_fp(bp, index, status_blk_mapping),
4236                             sizeof(struct host_hc_status_block_e1x));
4237         }
4238
4239         /* FCoE Queue uses Default SB and doesn't ACK the SB, thus no need to
4240          * set shortcuts for it.
4241          */
4242         if (!IS_FCOE_IDX(index))
4243                 set_sb_shortcuts(bp, index);
4244
4245         /* Tx */
4246         if (!skip_tx_queue(bp, index)) {
4247                 /* fastpath tx rings: tx_buf tx_desc */
4248                 for_each_cos_in_tx_queue(fp, cos) {
4249                         struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
4250
4251                         DP(NETIF_MSG_IFUP,
4252                            "allocating tx memory of fp %d cos %d\n",
4253                            index, cos);
4254
4255                         BNX2X_ALLOC(txdata->tx_buf_ring,
4256                                 sizeof(struct sw_tx_bd) * NUM_TX_BD);
4257                         BNX2X_PCI_ALLOC(txdata->tx_desc_ring,
4258                                 &txdata->tx_desc_mapping,
4259                                 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
4260                 }
4261         }
4262
4263         /* Rx */
4264         if (!skip_rx_queue(bp, index)) {
4265                 /* fastpath rx rings: rx_buf rx_desc rx_comp */
4266                 BNX2X_ALLOC(bnx2x_fp(bp, index, rx_buf_ring),
4267                                 sizeof(struct sw_rx_bd) * NUM_RX_BD);
4268                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_desc_ring),
4269                                 &bnx2x_fp(bp, index, rx_desc_mapping),
4270                                 sizeof(struct eth_rx_bd) * NUM_RX_BD);
4271
4272                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_comp_ring),
4273                                 &bnx2x_fp(bp, index, rx_comp_mapping),
4274                                 sizeof(struct eth_fast_path_rx_cqe) *
4275                                 NUM_RCQ_BD);
4276
4277                 /* SGE ring */
4278                 BNX2X_ALLOC(bnx2x_fp(bp, index, rx_page_ring),
4279                                 sizeof(struct sw_rx_page) * NUM_RX_SGE);
4280                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_sge_ring),
4281                                 &bnx2x_fp(bp, index, rx_sge_mapping),
4282                                 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
4283                 /* RX BD ring */
4284                 bnx2x_set_next_page_rx_bd(fp);
4285
4286                 /* CQ ring */
4287                 bnx2x_set_next_page_rx_cq(fp);
4288
4289                 /* BDs */
4290                 ring_size = bnx2x_alloc_rx_bds(fp, rx_ring_size);
4291                 if (ring_size < rx_ring_size)
4292                         goto alloc_mem_err;
4293         }
4294
4295         return 0;
4296
4297 /* handles low memory cases */
4298 alloc_mem_err:
4299         BNX2X_ERR("Unable to allocate full memory for queue %d (size %d)\n",
4300                                                 index, ring_size);
4301         /* FW will drop all packets if queue is not big enough,
4302          * In these cases we disable the queue
4303          * Min size is different for OOO, TPA and non-TPA queues
4304          */
4305         if (ring_size < (fp->disable_tpa ?
4306                                 MIN_RX_SIZE_NONTPA : MIN_RX_SIZE_TPA)) {
4307                         /* release memory allocated for this queue */
4308                         bnx2x_free_fp_mem_at(bp, index);
4309                         return -ENOMEM;
4310         }
4311         return 0;
4312 }
4313
4314 int bnx2x_alloc_fp_mem_cnic(struct bnx2x *bp)
4315 {
4316         if (!NO_FCOE(bp))
4317                 /* FCoE */
4318                 if (bnx2x_alloc_fp_mem_at(bp, FCOE_IDX(bp)))
4319                         /* we will fail load process instead of mark
4320                          * NO_FCOE_FLAG
4321                          */
4322                         return -ENOMEM;
4323
4324         return 0;
4325 }
4326
4327 int bnx2x_alloc_fp_mem(struct bnx2x *bp)
4328 {
4329         int i;
4330
4331         /* 1. Allocate FP for leading - fatal if error
4332          * 2. Allocate RSS - fix number of queues if error
4333          */
4334
4335         /* leading */
4336         if (bnx2x_alloc_fp_mem_at(bp, 0))
4337                 return -ENOMEM;
4338
4339         /* RSS */
4340         for_each_nondefault_eth_queue(bp, i)
4341                 if (bnx2x_alloc_fp_mem_at(bp, i))
4342                         break;
4343
4344         /* handle memory failures */
4345         if (i != BNX2X_NUM_ETH_QUEUES(bp)) {
4346                 int delta = BNX2X_NUM_ETH_QUEUES(bp) - i;
4347
4348                 WARN_ON(delta < 0);
4349                 bnx2x_shrink_eth_fp(bp, delta);
4350                 if (CNIC_SUPPORT(bp))
4351                         /* move non eth FPs next to last eth FP
4352                          * must be done in that order
4353                          * FCOE_IDX < FWD_IDX < OOO_IDX
4354                          */
4355
4356                         /* move FCoE fp even NO_FCOE_FLAG is on */
4357                         bnx2x_move_fp(bp, FCOE_IDX(bp), FCOE_IDX(bp) - delta);
4358                 bp->num_ethernet_queues -= delta;
4359                 bp->num_queues = bp->num_ethernet_queues +
4360                                  bp->num_cnic_queues;
4361                 BNX2X_ERR("Adjusted num of queues from %d to %d\n",
4362                           bp->num_queues + delta, bp->num_queues);
4363         }
4364
4365         return 0;
4366 }
4367
4368 void bnx2x_free_mem_bp(struct bnx2x *bp)
4369 {
4370         int i;
4371
4372         for (i = 0; i < bp->fp_array_size; i++)
4373                 kfree(bp->fp[i].tpa_info);
4374         kfree(bp->fp);
4375         kfree(bp->sp_objs);
4376         kfree(bp->fp_stats);
4377         kfree(bp->bnx2x_txq);
4378         kfree(bp->msix_table);
4379         kfree(bp->ilt);
4380 }
4381
4382 int bnx2x_alloc_mem_bp(struct bnx2x *bp)
4383 {
4384         struct bnx2x_fastpath *fp;
4385         struct msix_entry *tbl;
4386         struct bnx2x_ilt *ilt;
4387         int msix_table_size = 0;
4388         int fp_array_size, txq_array_size;
4389         int i;
4390
4391         /*
4392          * The biggest MSI-X table we might need is as a maximum number of fast
4393          * path IGU SBs plus default SB (for PF only).
4394          */
4395         msix_table_size = bp->igu_sb_cnt;
4396         if (IS_PF(bp))
4397                 msix_table_size++;
4398         BNX2X_DEV_INFO("msix_table_size %d\n", msix_table_size);
4399
4400         /* fp array: RSS plus CNIC related L2 queues */
4401         fp_array_size = BNX2X_MAX_RSS_COUNT(bp) + CNIC_SUPPORT(bp);
4402         bp->fp_array_size = fp_array_size;
4403         BNX2X_DEV_INFO("fp_array_size %d\n", bp->fp_array_size);
4404
4405         fp = kcalloc(bp->fp_array_size, sizeof(*fp), GFP_KERNEL);
4406         if (!fp)
4407                 goto alloc_err;
4408         for (i = 0; i < bp->fp_array_size; i++) {
4409                 fp[i].tpa_info =
4410                         kcalloc(ETH_MAX_AGGREGATION_QUEUES_E1H_E2,
4411                                 sizeof(struct bnx2x_agg_info), GFP_KERNEL);
4412                 if (!(fp[i].tpa_info))
4413                         goto alloc_err;
4414         }
4415
4416         bp->fp = fp;
4417
4418         /* allocate sp objs */
4419         bp->sp_objs = kcalloc(bp->fp_array_size, sizeof(struct bnx2x_sp_objs),
4420                               GFP_KERNEL);
4421         if (!bp->sp_objs)
4422                 goto alloc_err;
4423
4424         /* allocate fp_stats */
4425         bp->fp_stats = kcalloc(bp->fp_array_size, sizeof(struct bnx2x_fp_stats),
4426                                GFP_KERNEL);
4427         if (!bp->fp_stats)
4428                 goto alloc_err;
4429
4430         /* Allocate memory for the transmission queues array */
4431         txq_array_size =
4432                 BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS + CNIC_SUPPORT(bp);
4433         BNX2X_DEV_INFO("txq_array_size %d", txq_array_size);
4434
4435         bp->bnx2x_txq = kcalloc(txq_array_size, sizeof(struct bnx2x_fp_txdata),
4436                                 GFP_KERNEL);
4437         if (!bp->bnx2x_txq)
4438                 goto alloc_err;
4439
4440         /* msix table */
4441         tbl = kcalloc(msix_table_size, sizeof(*tbl), GFP_KERNEL);
4442         if (!tbl)
4443                 goto alloc_err;
4444         bp->msix_table = tbl;
4445
4446         /* ilt */
4447         ilt = kzalloc(sizeof(*ilt), GFP_KERNEL);
4448         if (!ilt)
4449                 goto alloc_err;
4450         bp->ilt = ilt;
4451
4452         return 0;
4453 alloc_err:
4454         bnx2x_free_mem_bp(bp);
4455         return -ENOMEM;
4456
4457 }
4458
4459 int bnx2x_reload_if_running(struct net_device *dev)
4460 {
4461         struct bnx2x *bp = netdev_priv(dev);
4462
4463         if (unlikely(!netif_running(dev)))
4464                 return 0;
4465
4466         bnx2x_nic_unload(bp, UNLOAD_NORMAL, true);
4467         return bnx2x_nic_load(bp, LOAD_NORMAL);
4468 }
4469
4470 int bnx2x_get_cur_phy_idx(struct bnx2x *bp)
4471 {
4472         u32 sel_phy_idx = 0;
4473         if (bp->link_params.num_phys <= 1)
4474                 return INT_PHY;
4475
4476         if (bp->link_vars.link_up) {
4477                 sel_phy_idx = EXT_PHY1;
4478                 /* In case link is SERDES, check if the EXT_PHY2 is the one */
4479                 if ((bp->link_vars.link_status & LINK_STATUS_SERDES_LINK) &&
4480                     (bp->link_params.phy[EXT_PHY2].supported & SUPPORTED_FIBRE))
4481                         sel_phy_idx = EXT_PHY2;
4482         } else {
4483
4484                 switch (bnx2x_phy_selection(&bp->link_params)) {
4485                 case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT:
4486                 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY:
4487                 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY:
4488                        sel_phy_idx = EXT_PHY1;
4489                        break;
4490                 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY:
4491                 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY:
4492                        sel_phy_idx = EXT_PHY2;
4493                        break;
4494                 }
4495         }
4496
4497         return sel_phy_idx;
4498
4499 }
4500 int bnx2x_get_link_cfg_idx(struct bnx2x *bp)
4501 {
4502         u32 sel_phy_idx = bnx2x_get_cur_phy_idx(bp);
4503         /*
4504          * The selected activated PHY is always after swapping (in case PHY
4505          * swapping is enabled). So when swapping is enabled, we need to reverse
4506          * the configuration
4507          */
4508
4509         if (bp->link_params.multi_phy_config &
4510             PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
4511                 if (sel_phy_idx == EXT_PHY1)
4512                         sel_phy_idx = EXT_PHY2;
4513                 else if (sel_phy_idx == EXT_PHY2)
4514                         sel_phy_idx = EXT_PHY1;
4515         }
4516         return LINK_CONFIG_IDX(sel_phy_idx);
4517 }
4518
4519 #ifdef NETDEV_FCOE_WWNN
4520 int bnx2x_fcoe_get_wwn(struct net_device *dev, u64 *wwn, int type)
4521 {
4522         struct bnx2x *bp = netdev_priv(dev);
4523         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
4524
4525         switch (type) {
4526         case NETDEV_FCOE_WWNN:
4527                 *wwn = HILO_U64(cp->fcoe_wwn_node_name_hi,
4528                                 cp->fcoe_wwn_node_name_lo);
4529                 break;
4530         case NETDEV_FCOE_WWPN:
4531                 *wwn = HILO_U64(cp->fcoe_wwn_port_name_hi,
4532                                 cp->fcoe_wwn_port_name_lo);
4533                 break;
4534         default:
4535                 BNX2X_ERR("Wrong WWN type requested - %d\n", type);
4536                 return -EINVAL;
4537         }
4538
4539         return 0;
4540 }
4541 #endif
4542
4543 /* called with rtnl_lock */
4544 int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
4545 {
4546         struct bnx2x *bp = netdev_priv(dev);
4547
4548         if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
4549                 BNX2X_ERR("Can't perform change MTU during parity recovery\n");
4550                 return -EAGAIN;
4551         }
4552
4553         if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
4554             ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE)) {
4555                 BNX2X_ERR("Can't support requested MTU size\n");
4556                 return -EINVAL;
4557         }
4558
4559         /* This does not race with packet allocation
4560          * because the actual alloc size is
4561          * only updated as part of load
4562          */
4563         dev->mtu = new_mtu;
4564
4565         return bnx2x_reload_if_running(dev);
4566 }
4567
4568 netdev_features_t bnx2x_fix_features(struct net_device *dev,
4569                                      netdev_features_t features)
4570 {
4571         struct bnx2x *bp = netdev_priv(dev);
4572
4573         /* TPA requires Rx CSUM offloading */
4574         if (!(features & NETIF_F_RXCSUM) || bp->disable_tpa) {
4575                 features &= ~NETIF_F_LRO;
4576                 features &= ~NETIF_F_GRO;
4577         }
4578
4579         return features;
4580 }
4581
4582 int bnx2x_set_features(struct net_device *dev, netdev_features_t features)
4583 {
4584         struct bnx2x *bp = netdev_priv(dev);
4585         u32 flags = bp->flags;
4586         bool bnx2x_reload = false;
4587
4588         if (features & NETIF_F_LRO)
4589                 flags |= TPA_ENABLE_FLAG;
4590         else
4591                 flags &= ~TPA_ENABLE_FLAG;
4592
4593         if (features & NETIF_F_GRO)
4594                 flags |= GRO_ENABLE_FLAG;
4595         else
4596                 flags &= ~GRO_ENABLE_FLAG;
4597
4598         if (features & NETIF_F_LOOPBACK) {
4599                 if (bp->link_params.loopback_mode != LOOPBACK_BMAC) {
4600                         bp->link_params.loopback_mode = LOOPBACK_BMAC;
4601                         bnx2x_reload = true;
4602                 }
4603         } else {
4604                 if (bp->link_params.loopback_mode != LOOPBACK_NONE) {
4605                         bp->link_params.loopback_mode = LOOPBACK_NONE;
4606                         bnx2x_reload = true;
4607                 }
4608         }
4609
4610         if (flags ^ bp->flags) {
4611                 bp->flags = flags;
4612                 bnx2x_reload = true;
4613         }
4614
4615         if (bnx2x_reload) {
4616                 if (bp->recovery_state == BNX2X_RECOVERY_DONE)
4617                         return bnx2x_reload_if_running(dev);
4618                 /* else: bnx2x_nic_load() will be called at end of recovery */
4619         }
4620
4621         return 0;
4622 }
4623
4624 void bnx2x_tx_timeout(struct net_device *dev)
4625 {
4626         struct bnx2x *bp = netdev_priv(dev);
4627
4628 #ifdef BNX2X_STOP_ON_ERROR
4629         if (!bp->panic)
4630                 bnx2x_panic();
4631 #endif
4632
4633         smp_mb__before_clear_bit();
4634         set_bit(BNX2X_SP_RTNL_TX_TIMEOUT, &bp->sp_rtnl_state);
4635         smp_mb__after_clear_bit();
4636
4637         /* This allows the netif to be shutdown gracefully before resetting */
4638         schedule_delayed_work(&bp->sp_rtnl_task, 0);
4639 }
4640
4641 int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
4642 {
4643         struct net_device *dev = pci_get_drvdata(pdev);
4644         struct bnx2x *bp;
4645
4646         if (!dev) {
4647                 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
4648                 return -ENODEV;
4649         }
4650         bp = netdev_priv(dev);
4651
4652         rtnl_lock();
4653
4654         pci_save_state(pdev);
4655
4656         if (!netif_running(dev)) {
4657                 rtnl_unlock();
4658                 return 0;
4659         }
4660
4661         netif_device_detach(dev);
4662
4663         bnx2x_nic_unload(bp, UNLOAD_CLOSE, false);
4664
4665         bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
4666
4667         rtnl_unlock();
4668
4669         return 0;
4670 }
4671
4672 int bnx2x_resume(struct pci_dev *pdev)
4673 {
4674         struct net_device *dev = pci_get_drvdata(pdev);
4675         struct bnx2x *bp;
4676         int rc;
4677
4678         if (!dev) {
4679                 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
4680                 return -ENODEV;
4681         }
4682         bp = netdev_priv(dev);
4683
4684         if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
4685                 BNX2X_ERR("Handling parity error recovery. Try again later\n");
4686                 return -EAGAIN;
4687         }
4688
4689         rtnl_lock();
4690
4691         pci_restore_state(pdev);
4692
4693         if (!netif_running(dev)) {
4694                 rtnl_unlock();
4695                 return 0;
4696         }
4697
4698         bnx2x_set_power_state(bp, PCI_D0);
4699         netif_device_attach(dev);
4700
4701         rc = bnx2x_nic_load(bp, LOAD_OPEN);
4702
4703         rtnl_unlock();
4704
4705         return rc;
4706 }
4707
4708
4709 void bnx2x_set_ctx_validation(struct bnx2x *bp, struct eth_context *cxt,
4710                               u32 cid)
4711 {
4712         /* ustorm cxt validation */
4713         cxt->ustorm_ag_context.cdu_usage =
4714                 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid),
4715                         CDU_REGION_NUMBER_UCM_AG, ETH_CONNECTION_TYPE);
4716         /* xcontext validation */
4717         cxt->xstorm_ag_context.cdu_reserved =
4718                 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid),
4719                         CDU_REGION_NUMBER_XCM_AG, ETH_CONNECTION_TYPE);
4720 }
4721
4722 static void storm_memset_hc_timeout(struct bnx2x *bp, u8 port,
4723                                     u8 fw_sb_id, u8 sb_index,
4724                                     u8 ticks)
4725 {
4726
4727         u32 addr = BAR_CSTRORM_INTMEM +
4728                    CSTORM_STATUS_BLOCK_DATA_TIMEOUT_OFFSET(fw_sb_id, sb_index);
4729         REG_WR8(bp, addr, ticks);
4730         DP(NETIF_MSG_IFUP,
4731            "port %x fw_sb_id %d sb_index %d ticks %d\n",
4732            port, fw_sb_id, sb_index, ticks);
4733 }
4734
4735 static void storm_memset_hc_disable(struct bnx2x *bp, u8 port,
4736                                     u16 fw_sb_id, u8 sb_index,
4737                                     u8 disable)
4738 {
4739         u32 enable_flag = disable ? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT);
4740         u32 addr = BAR_CSTRORM_INTMEM +
4741                    CSTORM_STATUS_BLOCK_DATA_FLAGS_OFFSET(fw_sb_id, sb_index);
4742         u16 flags = REG_RD16(bp, addr);
4743         /* clear and set */
4744         flags &= ~HC_INDEX_DATA_HC_ENABLED;
4745         flags |= enable_flag;
4746         REG_WR16(bp, addr, flags);
4747         DP(NETIF_MSG_IFUP,
4748            "port %x fw_sb_id %d sb_index %d disable %d\n",
4749            port, fw_sb_id, sb_index, disable);
4750 }
4751
4752 void bnx2x_update_coalesce_sb_index(struct bnx2x *bp, u8 fw_sb_id,
4753                                     u8 sb_index, u8 disable, u16 usec)
4754 {
4755         int port = BP_PORT(bp);
4756         u8 ticks = usec / BNX2X_BTR;
4757
4758         storm_memset_hc_timeout(bp, port, fw_sb_id, sb_index, ticks);
4759
4760         disable = disable ? 1 : (usec ? 0 : 1);
4761         storm_memset_hc_disable(bp, port, fw_sb_id, sb_index, disable);
4762 }